focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatRightJoinWithWithin() { final Join join = new Join(leftAlias, ImmutableList.of(new JoinedSource( Optional.empty(), rightAlias, JoinedSource.Type.RIGHT, criteria, Optional.of(new WithinExpression(10, TimeUnit.SECONDS))))); final String expected = "`left` L\nRIGHT OUTER JOIN `right` R WITHIN 10 SECONDS ON " + "(('left.col0' = 'right.col0'))"; assertEquals(expected, SqlFormatter.formatSql(join)); }
public long nextId() { waitIfNecessary(); long next = timestampAndSequence.incrementAndGet(); long timestampWithSequence = next & timestampAndSequenceMask; return workerId | timestampWithSequence; }
@Test void testNextId() { IdWorker worker = new IdWorker(null); long id1 = worker.nextId(); long id2 = worker.nextId(); assertEquals(1L, id2 - id1, "increment step should be 1"); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { try { if(null != status.getModified()) { // We must both set the accessed and modified time. See AttribFlags.SSH_FILEXFER_ATTR_V3_ACMODTIME // All times are represented as seconds from Jan 1, 1970 in UTC. final long atime = Timestamp.toSeconds(System.currentTimeMillis()); final long mtime = Timestamp.toSeconds(status.getModified() != null ? status.getModified() : System.currentTimeMillis()); final FileAttributes attrs = new FileAttributes.Builder().withAtimeMtime(atime / 1000, mtime / 1000).build(); session.sftp().setAttributes(file.getAbsolute(), attrs); } } catch(IOException e) { throw new SFTPExceptionMappingService().map("Cannot change timestamp of {0}", e, file); } }
@Test public void testSetTimestamp() throws Exception { final Path home = new SFTPHomeDirectoryService(session).find(); final Path test = new Path(home, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new SFTPTouchFeature(session).touch(test, new TransferStatus()); final long modified = System.currentTimeMillis(); new SFTPTimestampFeature(session).setTimestamp(test, modified); assertEquals(TimeUnit.SECONDS.toMillis(TimeUnit.MILLISECONDS.toSeconds(modified)), new SFTPAttributesFinderFeature(session).find(test).getModificationDate()); assertEquals(TimeUnit.SECONDS.toMillis(TimeUnit.MILLISECONDS.toSeconds(modified)), new DefaultAttributesFinderFeature(session).find(test).getModificationDate()); new SFTPDeleteFeature(session).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public ParSeqBasedCompletionStage<T> exceptionally(Function<Throwable, ? extends T> fn) { return nextStageByComposingTask(_task.recover(fn::apply)); }
@Test public void testExceptionally() throws Exception { AtomicReference<Throwable> exception = new AtomicReference<>(); CompletionStage<String> stage = createTestFailedStage(EXCEPTION).exceptionally((t) -> { exception.set(t); return null; }); finish(stage); Assert.assertEquals(exception.get(), EXCEPTION); }
@Override public ListView<String> getServiceList(int pageNo, int pageSize, String groupName, AbstractSelector selector) throws NacosException { return grpcClientProxy.getServiceList(pageNo, pageSize, groupName, selector); }
@Test void testGetServiceList() throws NacosException { AbstractSelector selector = new ExpressionSelector(); int pageNo = 1; int pageSize = 10; String groupName = "group2"; delegate.getServiceList(pageNo, pageSize, groupName, selector); verify(mockGrpcClient, times(1)).getServiceList(pageNo, pageSize, groupName, selector); }
@Override public MavenArtifact searchSha1(String sha1) throws IOException { if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) { throw new IllegalArgumentException("Invalid SHA1 format"); } final URL url = new URL(rootURL, String.format("identify/sha1/%s", sha1.toLowerCase())); LOGGER.debug("Searching Nexus url {}", url); // Determine if we need to use a proxy. The rules: // 1) If the proxy is set, AND the setting is set to true, use the proxy // 2) Otherwise, don't use the proxy (either the proxy isn't configured, // or proxy is specifically set to false final HttpURLConnection conn; final URLConnectionFactory factory = new URLConnectionFactory(settings); conn = factory.createHttpURLConnection(url, useProxy); conn.setDoOutput(true); final String authHeader = buildHttpAuthHeaderValue(); if (!authHeader.isEmpty()) { conn.addRequestProperty("Authorization", authHeader); } // JSON would be more elegant, but there's not currently a dependency // on JSON, so don't want to add one just for this conn.addRequestProperty("Accept", "application/xml"); conn.connect(); switch (conn.getResponseCode()) { case 200: try { final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder(); final Document doc = builder.parse(conn.getInputStream()); final XPath xpath = XPathFactory.newInstance().newXPath(); final String groupId = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/groupId", doc); final String artifactId = xpath.evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/artifactId", doc); final String version = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/version", doc); final String link = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/artifactLink", doc); final String pomLink = xpath .evaluate( "/org.sonatype.nexus.rest.model.NexusArtifact/pomLink", doc); final MavenArtifact ma = new MavenArtifact(groupId, artifactId, version); if (link != null && !link.isEmpty()) { ma.setArtifactUrl(link); } if (pomLink != null && !pomLink.isEmpty()) { ma.setPomUrl(pomLink); } return ma; } catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) { // Anything else is jacked-up XML stuff that we really can't recover // from well throw new IOException(e.getMessage(), e); } case 404: throw new FileNotFoundException("Artifact not found in Nexus"); default: LOGGER.debug("Could not connect to Nexus received response code: {} {}", conn.getResponseCode(), conn.getResponseMessage()); throw new IOException("Could not connect to Nexus"); } }
@Test(expected = IllegalArgumentException.class) @Ignore public void testMalformedSha1() throws Exception { searcher.searchSha1("invalid"); }
public void resetProducer() { if (processingMode != EXACTLY_ONCE_V2) { throw new IllegalStateException("Expected eos-v2 to be enabled, but the processing mode was " + processingMode); } oldProducerTotalBlockedTime += totalBlockedTime(producer); final long start = time.nanoseconds(); close(); final long closeTime = time.nanoseconds() - start; oldProducerTotalBlockedTime += closeTime; producer = clientSupplier.getProducer(eosV2ProducerConfigs); }
@Test public void shouldFailOnResetProducerForExactlyOnceAlpha() { final IllegalStateException thrown = assertThrows( IllegalStateException.class, () -> eosAlphaStreamsProducer.resetProducer() ); assertThat(thrown.getMessage(), is("Expected eos-v2 to be enabled, but the processing mode was EXACTLY_ONCE_ALPHA")); }
@Override public ValidationResult validate(Object value) { if (value == null || value instanceof String) { return new ValidationResult.ValidationPassed(); } else { return new ValidationResult.ValidationFailed("Value \"" + value + "\" is not a valid string!"); } }
@Test public void validateNull() { assertThat(validator.validate(null)).isInstanceOf(ValidationResult.ValidationPassed.class); }
List<StatisticsEntry> takeStatistics() { if (reporterEnabled) throw new IllegalStateException("Cannot take consistent snapshot while reporter is enabled"); var ret = new ArrayList<StatisticsEntry>(); consume((metric, value) -> ret.add(new StatisticsEntry(metric, value))); return ret; }
@Test void statistics_are_aggregated_by_category() { testRequest("http", 300, "GET"); testRequest("http", 301, "GET"); testRequest("http", 200, "GET"); var stats = collector.takeStatistics(); assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_2XX, "read", 200, 1L); assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_3XX, "read", 301, 1L); assertStatisticsEntry(stats, "http", "GET", MetricDefinitions.RESPONSES_3XX, "read", 300, 1L); }
@Override public Long createOAuth2Client(OAuth2ClientSaveReqVO createReqVO) { validateClientIdExists(null, createReqVO.getClientId()); // 插入 OAuth2ClientDO client = BeanUtils.toBean(createReqVO, OAuth2ClientDO.class); oauth2ClientMapper.insert(client); return client.getId(); }
@Test public void testCreateOAuth2Client_success() { // 准备参数 OAuth2ClientSaveReqVO reqVO = randomPojo(OAuth2ClientSaveReqVO.class, o -> o.setLogo(randomString())) .setId(null); // 防止 id 被赋值 // 调用 Long oauth2ClientId = oauth2ClientService.createOAuth2Client(reqVO); // 断言 assertNotNull(oauth2ClientId); // 校验记录的属性是否正确 OAuth2ClientDO oAuth2Client = oauth2ClientMapper.selectById(oauth2ClientId); assertPojoEquals(reqVO, oAuth2Client, "id"); }
public HivePartitionStats getTableStatistics(String dbName, String tblName) { return get(tableStatsCache, DatabaseTableName.of(dbName, tblName)); }
@Test public void testGetTableStatistics() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false); HivePartitionStats statistics = cachingHiveMetastore.getTableStatistics("db1", "table1"); HiveCommonStats commonStats = statistics.getCommonStats(); Assert.assertEquals(50, commonStats.getRowNums()); Assert.assertEquals(100, commonStats.getTotalFileBytes()); HiveColumnStats columnStatistics = statistics.getColumnStats().get("col1"); Assert.assertEquals(0, columnStatistics.getTotalSizeBytes()); Assert.assertEquals(1, columnStatistics.getNumNulls()); Assert.assertEquals(2, columnStatistics.getNdv()); }
private static void connectPointwise( ExecutionJobVertex jobVertex, IntermediateResult result, JobVertexInputInfo jobVertexInputInfo) { Map<IndexRange, List<Integer>> consumersByPartition = new LinkedHashMap<>(); for (ExecutionVertexInputInfo executionVertexInputInfo : jobVertexInputInfo.getExecutionVertexInputInfos()) { int consumerIndex = executionVertexInputInfo.getSubtaskIndex(); IndexRange range = executionVertexInputInfo.getPartitionIndexRange(); consumersByPartition.compute( range, (ignore, consumers) -> { if (consumers == null) { consumers = new ArrayList<>(); } consumers.add(consumerIndex); return consumers; }); } consumersByPartition.forEach( (range, subtasks) -> { List<ExecutionVertex> taskVertices = new ArrayList<>(); List<IntermediateResultPartition> partitions = new ArrayList<>(); for (int index : subtasks) { taskVertices.add(jobVertex.getTaskVertices()[index]); } for (int i = range.getStartIndex(); i <= range.getEndIndex(); ++i) { partitions.add(result.getPartitions()[i]); } connectInternal( taskVertices, partitions, result.getResultType(), jobVertex.getGraph().getEdgeManager()); }); }
@Test void testConnectPointwise() throws Exception { int upstream = 4; int downstream = 4; // use dynamic graph to specify the vertex input info ExecutionGraph eg = setupExecutionGraph(upstream, downstream, POINTWISE, true); // set partition ranges List<IndexRange> partitionRanges = Arrays.asList( new IndexRange(0, 0), new IndexRange(0, 0), new IndexRange(1, 2), new IndexRange(3, 3)); List<ExecutionVertexInputInfo> executionVertexInputInfos = new ArrayList<>(); for (int i = 0; i < downstream; i++) { executionVertexInputInfos.add( new ExecutionVertexInputInfo( // the subpartition range will not be used in edge manager, so set (0, // 0) i, partitionRanges.get(i), new IndexRange(0, 0))); } final JobVertexInputInfo jobVertexInputInfo = new JobVertexInputInfo(executionVertexInputInfos); final Iterator<ExecutionJobVertex> vertexIterator = eg.getVerticesTopologically().iterator(); final ExecutionJobVertex producer = vertexIterator.next(); final ExecutionJobVertex consumer = vertexIterator.next(); // initialize producer and consumer eg.initializeJobVertex(producer, 1L, Collections.emptyMap()); eg.initializeJobVertex( consumer, 1L, Collections.singletonMap( producer.getProducedDataSets()[0].getId(), jobVertexInputInfo)); IntermediateResult result = Objects.requireNonNull(eg.getJobVertex(producer.getJobVertexId())) .getProducedDataSets()[0]; IntermediateResultPartition partition1 = result.getPartitions()[0]; IntermediateResultPartition partition2 = result.getPartitions()[1]; IntermediateResultPartition partition3 = result.getPartitions()[2]; IntermediateResultPartition partition4 = result.getPartitions()[3]; ExecutionVertex vertex1 = consumer.getTaskVertices()[0]; ExecutionVertex vertex2 = consumer.getTaskVertices()[1]; ExecutionVertex vertex3 = consumer.getTaskVertices()[2]; ExecutionVertex vertex4 = consumer.getTaskVertices()[3]; // check consumers of the partitions ConsumerVertexGroup consumerVertexGroup1 = partition1.getConsumerVertexGroups().get(0); ConsumerVertexGroup consumerVertexGroup2 = partition2.getConsumerVertexGroups().get(0); ConsumerVertexGroup consumerVertexGroup3 = partition4.getConsumerVertexGroups().get(0); assertThat(consumerVertexGroup1) .containsExactlyInAnyOrder(vertex1.getID(), vertex2.getID()); assertThat(consumerVertexGroup2).containsExactlyInAnyOrder(vertex3.getID()); assertThat(partition3.getConsumerVertexGroups().get(0)).isEqualTo(consumerVertexGroup2); assertThat(consumerVertexGroup3).containsExactlyInAnyOrder(vertex4.getID()); // check inputs of the execution vertices ConsumedPartitionGroup consumedPartitionGroup1 = vertex1.getConsumedPartitionGroup(0); ConsumedPartitionGroup consumedPartitionGroup2 = vertex3.getConsumedPartitionGroup(0); ConsumedPartitionGroup consumedPartitionGroup3 = vertex4.getConsumedPartitionGroup(0); assertThat(consumedPartitionGroup1).containsExactlyInAnyOrder(partition1.getPartitionId()); assertThat(vertex2.getConsumedPartitionGroup(0)).isEqualTo(consumedPartitionGroup1); assertThat(consumedPartitionGroup2) .containsExactlyInAnyOrder( partition2.getPartitionId(), partition3.getPartitionId()); assertThat(consumedPartitionGroup3).containsExactlyInAnyOrder(partition4.getPartitionId()); // check the consumerVertexGroups and consumedPartitionGroups are properly set assertThat(consumerVertexGroup1.getConsumedPartitionGroup()) .isEqualTo(consumedPartitionGroup1); assertThat(consumedPartitionGroup1.getConsumerVertexGroup()) .isEqualTo(consumerVertexGroup1); assertThat(consumerVertexGroup2.getConsumedPartitionGroup()) .isEqualTo(consumedPartitionGroup2); assertThat(consumedPartitionGroup2.getConsumerVertexGroup()) .isEqualTo(consumerVertexGroup2); assertThat(consumerVertexGroup3.getConsumedPartitionGroup()) .isEqualTo(consumedPartitionGroup3); assertThat(consumedPartitionGroup3.getConsumerVertexGroup()) .isEqualTo(consumerVertexGroup3); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testTableScanThenIncrementalWithNonEmptyTable() throws Exception { appendTwoSnapshots(); ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.TABLE_SCAN_THEN_INCREMENTAL) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null); assertThat(initialResult.fromPosition()).isNull(); assertThat(initialResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot2.snapshotId()); assertThat(initialResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot2.timestampMillis()); assertThat(initialResult.splits()).hasSize(1); IcebergSourceSplit split = Iterables.getOnlyElement(initialResult.splits()); assertThat(split.task().files()).hasSize(2); Set<String> discoveredFiles = split.task().files().stream() .map(fileScanTask -> fileScanTask.file().path().toString()) .collect(Collectors.toSet()); Set<String> expectedFiles = ImmutableSet.of(dataFile1.path().toString(), dataFile2.path().toString()); assertThat(discoveredFiles).containsExactlyInAnyOrderElementsOf(expectedFiles); IcebergEnumeratorPosition lastPosition = initialResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
@Override public boolean evaluate(Map<String, Object> values) { Boolean toReturn = null; for (KiePMMLPredicate kiePMMLPredicate : kiePMMLPredicates) { Boolean evaluation = kiePMMLPredicate.evaluate(values); switch (booleanOperator) { case OR: toReturn = orOperator(toReturn, evaluation); break; case AND: toReturn = andOperator(toReturn, evaluation); break; case XOR: toReturn = xorOperator(toReturn, evaluation); break; case SURROGATE: toReturn = surrogateOperator(toReturn, evaluation); break; default: throw new KiePMMLException("Unknown BOOLEAN_OPERATOR " + booleanOperator); } } return toReturn != null && toReturn; }
@Test void evaluateCompoundPredicateSurrogate() { ARRAY_TYPE arrayType = ARRAY_TYPE.STRING; List<Object> stringValues = getObjects(arrayType, 4); KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicateString = getKiePMMLSimpleSetPredicate(SIMPLE_SET_PREDICATE_STRING_NAME, stringValues, arrayType, IN_NOTIN.IN); arrayType = ARRAY_TYPE.INT; List<Object> intValues = getObjects(arrayType, 4); KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicateInt = getKiePMMLSimpleSetPredicate(SIMPLE_SET_PREDICATE_INT_NAME, intValues, arrayType, IN_NOTIN.NOT_IN); KiePMMLCompoundPredicate kiePMMLCompoundPredicate = getKiePMMLCompoundPredicate(BOOLEAN_OPERATOR.SURROGATE, Arrays.asList(kiePMMLSimpleSetPredicateString, kiePMMLSimpleSetPredicateInt)); Map<String, Object> inputData = new HashMap<>(); inputData.put(SIMPLE_SET_PREDICATE_STRING_NAME, "NOT"); // This predicate verify the "IN" condition inputData.put(SIMPLE_SET_PREDICATE_INT_NAME, intValues.get(0)); // This predicate verify the "NOT_IN" condition assertThat(kiePMMLCompoundPredicate.evaluate(inputData)).isFalse(); inputData.put(SIMPLE_SET_PREDICATE_STRING_NAME, stringValues.get(0)); // This predicate verify the "IN" condition inputData.put(SIMPLE_SET_PREDICATE_INT_NAME, intValues.get(0)); // This predicate verify the "NOT_IN" condition assertThat(kiePMMLCompoundPredicate.evaluate(inputData)).isTrue(); inputData.put(SIMPLE_SET_PREDICATE_STRING_NAME, "NOT"); // This predicate verify the "IN" condition inputData.put(SIMPLE_SET_PREDICATE_INT_NAME, 1); // This predicate verify the "NOT_IN" condition assertThat(kiePMMLCompoundPredicate.evaluate(inputData)).isFalse(); inputData.put(SIMPLE_SET_PREDICATE_STRING_NAME, stringValues.get(0)); // This predicate verify the "IN" // condition inputData.put(SIMPLE_SET_PREDICATE_INT_NAME, 1); // This predicate verify the "NOT_IN" condition assertThat(kiePMMLCompoundPredicate.evaluate(inputData)).isTrue(); }
public RingbufferConfig setCapacity(int capacity) { this.capacity = checkPositive("capacity", capacity); return this; }
@Test public void setCapacity() { RingbufferConfig config = new RingbufferConfig(NAME); config.setCapacity(1000); assertEquals(1000, config.getCapacity()); }
@Override public boolean canHandleReturnType(Class returnType) { return (Flux.class.isAssignableFrom(returnType)) || (Mono.class .isAssignableFrom(returnType)); }
@Test public void testCheckTypes() { assertThat(reactorBulkheadAspectExt.canHandleReturnType(Mono.class)).isTrue(); assertThat(reactorBulkheadAspectExt.canHandleReturnType(Flux.class)).isTrue(); }
List<CounterRequest> getOrderedByHitsRequests() { final List<CounterRequest> requestList = getRequests(); if (requestList.size() > 1) { requestList.sort(COUNTER_REQUEST_BY_HITS_COMPARATOR); } return requestList; }
@Test public void testGetOrderedByHitsRequests() { counter.clear(); counter.addRequest("test 1", 0, 0, 0, false, 1000); counter.addRequest("test 2", 1000, 500, 500, false, 1000); // supérieur en hits counter.addRequest("test 2", 1000, 500, 500, false, 1000); counter.addRequest("test 2", 1000, 500, 500, false, 1000); counter.addRequest("test 3", 100, 50, 50, false, 1000); // égal counter.addRequest("test 4", 100, 50, 50, false, 1000); // inférieur counter.addRequest("test 4", 100, 50, 50, false, 1000); final List<CounterRequest> requests = counter.getOrderedByHitsRequests(); assertEquals("requests size", 4, requests.size()); }
@Override public String getMapName() { return mapName; }
@Test public void testGetMapName() { assertEquals("mapName", localCacheWideEventData.getMapName()); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestTaggedReplicate() { internalEncodeLogHeader(buffer, 0, 3, 6, () -> 5_500_000_000L); final TaggedReplicateRequestEncoder requestEncoder = new TaggedReplicateRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(1) .correlationId(-10) .srcRecordingId(9) .dstRecordingId(31) .channelTagId(4) .subscriptionTagId(7) .srcControlStreamId(15) .srcControlChannel("src") .liveDestination("alive and well"); dissectControlRequest(CMD_IN_TAGGED_REPLICATE, buffer, 0, builder); assertEquals("[5.500000000] " + CONTEXT + ": " + CMD_IN_TAGGED_REPLICATE.name() + " [3/6]:" + " controlSessionId=1" + " correlationId=-10" + " srcRecordingId=9" + " dstRecordingId=31" + " channelTagId=4" + " subscriptionTagId=7" + " srcControlStreamId=15" + " srcControlChannel=src" + " liveDestination=alive and well", builder.toString()); }
@PostMapping("add-to-favourites") public Mono<String> addProductToFavourites(@ModelAttribute("product") Mono<Product> productMono) { return productMono .map(Product::id) .flatMap(productId -> this.favouriteProductsClient.addProductToFavourites(productId) .thenReturn("redirect:/customer/products/%d".formatted(productId)) .onErrorResume(exception -> { log.error(exception.getMessage(), exception); return Mono.just("redirect:/customer/products/%d".formatted(productId)); })); }
@Test void addProductToFavourites_RequestIsInvalid_RedirectsToProductPage() { // given doReturn(Mono.error(new ClientBadRequestException("Возникла какая-то ошибка", null, List.of("Какая-то ошибка")))) .when(this.favouriteProductsClient).addProductToFavourites(1); // when StepVerifier.create(this.controller.addProductToFavourites( Mono.just(new Product(1, "Товар №1", "Описание товара №1")))) // then .expectNext("redirect:/customer/products/1") .verifyComplete(); verify(this.favouriteProductsClient).addProductToFavourites(1); verifyNoMoreInteractions(this.favouriteProductsClient); verifyNoInteractions(this.productReviewsClient, this.productsClient); }
@Override public String formatNotifyTemplateContent(String content, Map<String, Object> params) { return StrUtil.format(content, params); }
@Test public void testFormatNotifyTemplateContent() { // 准备参数 Map<String, Object> params = new HashMap<>(); params.put("name", "小红"); params.put("what", "饭"); // 调用,并断言 assertEquals("小红,你好,饭吃了吗?", notifyTemplateService.formatNotifyTemplateContent("{name},你好,{what}吃了吗?", params)); }
@Override public long countSubscribers() { return commandExecutor.get(countSubscribersAsync()); }
@Test public void testCountSubscribers() { RTopic topic1 = redisson.getTopic("topic", LongCodec.INSTANCE); assertThat(topic1.countSubscribers()).isZero(); int id = topic1.addListener(Long.class, (channel, msg) -> { }); assertThat(topic1.countSubscribers()).isOne(); topic1.removeListener(id); assertThat(topic1.countSubscribers()).isZero(); }
@Override public boolean isManagedIndex(String indexName) { return isManagedIndex(findAllMongoIndexSets(), indexName); }
@Test public void isManagedIndexReturnsAMapOfIndices() { final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> indexSetConfigs = Collections.singletonList(indexSetConfig); final MongoIndexSet indexSet = mock(MongoIndexSet.class); when(mongoIndexSetFactory.create(indexSetConfig)).thenReturn(indexSet); when(indexSetService.findAll()).thenReturn(indexSetConfigs); when(indexSet.isManagedIndex("index1")).thenReturn(true); when(indexSet.isManagedIndex("index2")).thenReturn(false); final Map<String, Boolean> managedStatus = indexSetRegistry.isManagedIndex(ImmutableSet.of("index1", "index2")); assertThat(managedStatus) .containsEntry("index1", true) .containsEntry("index2", false); }
@Override public <T extends MigrationStep> MigrationStepRegistry add(long migrationNumber, String description, Class<T> stepClass) { validate(migrationNumber); requireNonNull(description, "description can't be null"); checkArgument(!description.isEmpty(), "description can't be empty"); requireNonNull(stepClass, "MigrationStep class can't be null"); checkState(!migrations.containsKey(migrationNumber), "A migration is already registered for migration number '%s'", migrationNumber); this.migrations.put(migrationNumber, new RegisteredMigrationStep(migrationNumber, description, stepClass)); return this; }
@Test public void add_fails_with_NPE_if_description_is_null() { assertThatThrownBy(() -> { underTest.add(12, null, MigrationStep.class); }) .isInstanceOf(NullPointerException.class) .hasMessage("description can't be null"); }
@Override public PolicerId allocatePolicerId() { // Init step DriverHandler handler = handler(); // First step is to get MeterService MeterService meterService = handler.get(MeterService.class); // There was a problem, return none if (meterService == null) { log.warn("MeterService is null"); return PolicerId.NONE; } // Let's get the device id DeviceId deviceId = handler.data().deviceId(); // Double check correspondence between schemas if (!deviceId.uri().getScheme().equals(OF_SCHEME)) { log.warn("The device {} does not seem to be managed by OpenFlow", deviceId); return PolicerId.NONE; } // Get a new meter id MeterId meterId = meterService.allocateMeterId(deviceId); // There was a problem if (meterId == null) { log.warn("MeterService does not provide valid ids"); return PolicerId.NONE; } // Create a policer id from the meter id return getPolicerIdFromMeterId(meterId); }
@Test public void testMeterNull() { // Get device handler DriverHandler driverHandler = driverService.createHandler(ofDid); // Get policer config behavior PolicerConfigurable policerConfigurable = driverHandler.behaviour(PolicerConfigurable.class); // Get policer id PolicerId policerId = policerConfigurable.allocatePolicerId(); // this works assertThat(policerId.uri().getScheme(), is(OF_SCHEME)); String hexId = Long.toHexString((mid1.id())); assertThat(policerId.uri().getSchemeSpecificPart(), is(hexId)); // Get another policer id policerId = policerConfigurable.allocatePolicerId(); assertThat(policerId.uri().getScheme(), is(OF_SCHEME)); hexId = Long.toHexString((mid10.id())); assertThat(policerId.uri().getSchemeSpecificPart(), is(hexId)); // Get the last policer id policerId = policerConfigurable.allocatePolicerId(); assertThat(policerId.uri().getScheme(), is(OF_SCHEME)); hexId = Long.toHexString((mid100.id())); assertThat(policerId.uri().getSchemeSpecificPart(), is(hexId)); // this does not work policerId = policerConfigurable.allocatePolicerId(); // Assert that is none assertThat(policerId, is(PolicerId.NONE)); }
@Override public void handlerAdded(ChannelHandlerContext ctx) throws Exception { if (inboundHandler == null) { throw new IllegalStateException( "init() must be invoked before being added to a " + ChannelPipeline.class.getSimpleName() + " if " + CombinedChannelDuplexHandler.class.getSimpleName() + " was constructed with the default constructor."); } outboundCtx = new DelegatingChannelHandlerContext(ctx, outboundHandler); inboundCtx = new DelegatingChannelHandlerContext(ctx, inboundHandler) { @SuppressWarnings("deprecation") @Override public ChannelHandlerContext fireExceptionCaught(Throwable cause) { if (!outboundCtx.removed) { try { // We directly delegate to the ChannelOutboundHandler as this may override exceptionCaught(...) // as well outboundHandler.exceptionCaught(outboundCtx, cause); } catch (Throwable error) { if (logger.isDebugEnabled()) { logger.debug( "An exception {}" + "was thrown by a user handler's exceptionCaught() " + "method while handling the following exception:", ThrowableUtil.stackTraceToString(error), cause); } else if (logger.isWarnEnabled()) { logger.warn( "An exception '{}' [enable DEBUG level for full stacktrace] " + "was thrown by a user handler's exceptionCaught() " + "method while handling the following exception:", error, cause); } } } else { super.fireExceptionCaught(cause); } return this; } }; // The inboundCtx and outboundCtx were created and set now it's safe to call removeInboundHandler() and // removeOutboundHandler(). handlerAdded = true; try { inboundHandler.handlerAdded(inboundCtx); } finally { outboundHandler.handlerAdded(outboundCtx); } }
@Test public void testInitNotCalledBeforeAdded() { final CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler> handler = new CombinedChannelDuplexHandler<ChannelInboundHandler, ChannelOutboundHandler>() { }; assertThrows(IllegalStateException.class, new Executable() { @Override public void execute() throws Throwable { handler.handlerAdded(null); } }); }
public static Read read() { return new AutoValue_RedisIO_Read.Builder() .setConnectionConfiguration(RedisConnectionConfiguration.create()) .setKeyPattern("*") .setBatchSize(1000) .setOutputParallelization(true) .build(); }
@Test public void testReadWithKeyPattern() { List<KV<String, String>> data = buildIncrementalData("pattern", 10); data.forEach(kv -> client.set(kv.getKey(), kv.getValue())); PCollection<KV<String, String>> read = p.apply("Read", RedisIO.read().withEndpoint(REDIS_HOST, port).withKeyPattern("pattern*")); PAssert.that(read).containsInAnyOrder(data); PCollection<KV<String, String>> readNotMatch = p.apply( "ReadNotMatch", RedisIO.read().withEndpoint(REDIS_HOST, port).withKeyPattern("foobar*")); PAssert.thatSingleton(readNotMatch.apply(Count.globally())).isEqualTo(0L); p.run(); }
@Override public String getPath() { var fullPath = request.getRequestURI(); // it shouldn't be null, but in case it is, it's better to return empty string if (fullPath == null) { return Pac4jConstants.EMPTY_STRING; } // very strange use case if (fullPath.startsWith("//")) { fullPath = fullPath.substring(1); } val context = request.getContextPath(); // this one shouldn't be null either, but in case it is, then let's consider it is empty if (context != null) { return fullPath.substring(context.length()); } return fullPath; }
@Test public void testGetPathDoubleSlashFullpathContext() { when(request.getRequestURI()).thenReturn("/" + CTX_PATH); when(request.getContextPath()).thenReturn(CTX); WebContext context = new JEEContext(request, response); assertEquals(PATH, context.getPath()); }
static public boolean createMissingParentDirectories(File file) { File parent = file.getParentFile(); if (parent == null) { // Parent directory not specified, therefore it's a request to // create nothing. Done! ;) return true; } // File.mkdirs() creates the parent directories only if they don't // already exist; and it's okay if they do. parent.mkdirs(); return parent.exists(); }
@Test public void createParentDirAcceptsNoParentSpecified() { File file = new File("testing.txt"); assertTrue(FileUtil.createMissingParentDirectories(file)); }
@Override public void onClick(View v) { final AppCompatActivity activity = (AppCompatActivity) requireActivity(); switch (v.getId()) { case R.id.go_to_languages_action: startActivity( new Intent( Intent.ACTION_VIEW, Uri.parse(requireContext().getString(R.string.deeplink_url_keyboards)), requireContext(), MainSettingsActivity.class)); break; case R.id.go_to_theme_action: startActivity( new Intent( Intent.ACTION_VIEW, Uri.parse(requireContext().getString(R.string.deeplink_url_themes)), requireContext(), MainSettingsActivity.class)); break; case R.id.go_to_all_settings_action: startActivity(new Intent(getContext(), MainSettingsActivity.class)); // not returning to this Activity any longer. activity.finish(); break; default: throw new IllegalArgumentException( "Failed to handle " + v.getId() + " in WizardPageDoneAndMoreSettingsFragment"); } }
@Test public void testGoToAllSettingsOnClick() { final WizardPageDoneAndMoreSettingsFragment fragment = startFragment(); final ShadowApplication shadowApplication = Shadows.shadowOf((Application) getApplicationContext()); shadowApplication.clearNextStartedActivities(); final View clickView = fragment.getView().findViewById(R.id.go_to_all_settings_action); View.OnClickListener clickHandler = Shadows.shadowOf(clickView).getOnClickListener(); clickHandler.onClick(clickView); final Intent startIntent = shadowApplication.getNextStartedActivity(); Assert.assertNotNull(startIntent); assertChauffeurIntent( new Intent(fragment.requireContext(), MainSettingsActivity.class), startIntent); }
public static <K, V> Cache<K, V> eternal() { return forMaximumBytes(Long.MAX_VALUE); }
@Test public void testClearableCacheClearing() { Cache<String, String> parent = Caches.eternal(); ClearableCache<String, String> cache = new ClearableCache<>(parent); parent.put("untracked", "untrackedValue"); parent.put("tracked", "parentValue"); cache.put("tracked", "parentValueNowTracked"); cache.computeIfAbsent("tracked2", (unused) -> "trackedValue2"); cache.clear(); assertNull(parent.peek("tracked")); assertNull(parent.peek("tracked2")); assertEquals("untrackedValue", parent.peek("untracked")); }
static IndexComponentFilter findBestComponentFilter( IndexType type, List<IndexComponentCandidate> candidates, QueryDataType converterType ) { // First look for equality filters, assuming that they are more selective than ranges IndexComponentFilter equalityComponentFilter = searchForEquality(candidates, converterType); if (equalityComponentFilter != null) { return equalityComponentFilter; } // Look for ranges filters return searchForRange(type, candidates, converterType); }
@Test public void when_equalityAndRangeFilterPresentNoBetterChoiceAndSortedIndex_then_itIsUsed() { IndexComponentFilter bestFilter = IndexComponentFilterResolver.findBestComponentFilter( indexType, WITH_EQUALITY_AND_RANGE_FILTER_AS_BEST_CANDIDATES, QUERY_DATA_TYPE ); if (indexType == IndexType.SORTED) { assertEquals(bestFilter.getFilter(), EQUALITY_AND_RANGE_CANDIDATE.getFilter()); } else { assertNull(bestFilter); } }
public List<PrometheusQueryResult> queryMetric(String queryString, long startTimeMs, long endTimeMs) throws IOException { URI queryUri = URI.create(_prometheusEndpoint.toURI() + QUERY_RANGE_API_PATH); HttpPost httpPost = new HttpPost(queryUri); List<NameValuePair> data = new ArrayList<>(); data.add(new BasicNameValuePair(QUERY, queryString)); /* "start" and "end" are expected to be unix timestamp in seconds (number of seconds since the Unix epoch). They accept values with a decimal point (up to 64 bits). The samples returned are inclusive of the "end" timestamp provided. */ data.add(new BasicNameValuePair(START, String.valueOf((double) startTimeMs / SEC_TO_MS))); data.add(new BasicNameValuePair(END, String.valueOf((double) endTimeMs / SEC_TO_MS))); // step is expected to be in seconds, and accept values with a decimal point (up to 64 bits). data.add(new BasicNameValuePair(STEP, String.valueOf((double) _samplingIntervalMs / SEC_TO_MS))); httpPost.setEntity(new UrlEncodedFormEntity(data)); try (CloseableHttpResponse response = _httpClient.execute(httpPost)) { int responseCode = response.getStatusLine().getStatusCode(); HttpEntity entity = response.getEntity(); InputStream content = entity.getContent(); String responseString = IOUtils.toString(content, StandardCharsets.UTF_8); if (responseCode != HttpServletResponse.SC_OK) { throw new IOException(String.format("Received non-success response code on Prometheus API HTTP call," + " response code = %d, response body = %s", responseCode, responseString)); } PrometheusResponse prometheusResponse = GSON.fromJson(responseString, PrometheusResponse.class); if (prometheusResponse == null) { throw new IOException(String.format( "No response received from Prometheus API query, response body = %s", responseString)); } if (!SUCCESS.equals(prometheusResponse.status())) { throw new IOException(String.format( "Prometheus API query was not successful, response body = %s", responseString)); } if (prometheusResponse.data() == null || prometheusResponse.data().result() == null) { throw new IOException(String.format( "Response from Prometheus HTTP API is malformed, response body = %s", responseString)); } EntityUtils.consume(entity); return prometheusResponse.data().result(); } }
@Test(expected = IOException.class) public void testFailureResponseWith200Code() throws Exception { this.serverBootstrap.registerHandler(PrometheusAdapter.QUERY_RANGE_API_PATH, new HttpRequestHandler() { @Override public void handle(HttpRequest request, HttpResponse response, HttpContext context) { response.setStatusCode(HttpServletResponse.SC_OK); response.setEntity(new StringEntity( "{\"status\": \"failure\", \"data\": {\"result\": []}}", StandardCharsets.UTF_8)); } }); HttpHost httpHost = this.start(); PrometheusAdapter prometheusAdapter = new PrometheusAdapter(this.httpclient, httpHost, SAMPLING_INTERVAL_MS); prometheusAdapter.queryMetric( "kafka_server_BrokerTopicMetrics_OneMinuteRate{name=\"BytesOutPerSec\",topic=\"\"}", START_TIME_MS, END_TIME_MS); }
@Override public String getString(int rowIndex, int columnIndex) { if (columnIndex != 0) { throw new IllegalArgumentException("Column index must always be 0 for aggregation result sets"); } if (rowIndex != 0) { throw new IllegalArgumentException("Row index must always be 0 for aggregation result sets"); } return _jsonObject.get("value").asText(); }
@Test(expectedExceptions = IllegalArgumentException.class) public void testGetStringForNonZeroColumn() { // Run the test _aggregationResultSetUnderTest.getString(0, 1); }
public List<SocketPortAllocation> getHostServerSocketPorts( String hostname ) { List<SocketPortAllocation> ports = hostServerSocketPortsMap.get( hostname ); return ports == null ? Collections.emptyList() : Collections.unmodifiableList( ports ); }
@Test public void getHostServerSocketPortsWithoutAllocatedPorts() { List<SocketPortAllocation> actualResult = transformationMap.getHostServerSocketPorts( TEST_HOST ); assertNotNull( actualResult ); assertTrue( actualResult.isEmpty() ); }
@Override public List<HoodieColumnRangeMetadata<Comparable>> readColumnStatsFromMetadata(HoodieStorage storage, StoragePath filePath, List<String> columnList) { ParquetMetadata metadata = readMetadata(storage, filePath); // Collect stats from all individual Parquet blocks Stream<HoodieColumnRangeMetadata<Comparable>> hoodieColumnRangeMetadataStream = metadata.getBlocks().stream().sequential().flatMap(blockMetaData -> blockMetaData.getColumns().stream() .filter(f -> columnList.contains(f.getPath().toDotString())) .map(columnChunkMetaData -> { Statistics stats = columnChunkMetaData.getStatistics(); return (HoodieColumnRangeMetadata<Comparable>) HoodieColumnRangeMetadata.<Comparable>create( filePath.getName(), columnChunkMetaData.getPath().toDotString(), convertToNativeJavaType( columnChunkMetaData.getPrimitiveType(), stats.genericGetMin()), convertToNativeJavaType( columnChunkMetaData.getPrimitiveType(), stats.genericGetMax()), // NOTE: In case when column contains only nulls Parquet won't be creating // stats for it instead returning stubbed (empty) object. In that case // we have to equate number of nulls to the value count ourselves stats.isEmpty() ? columnChunkMetaData.getValueCount() : stats.getNumNulls(), columnChunkMetaData.getValueCount(), columnChunkMetaData.getTotalSize(), columnChunkMetaData.getTotalUncompressedSize()); }) ); Map<String, List<HoodieColumnRangeMetadata<Comparable>>> columnToStatsListMap = hoodieColumnRangeMetadataStream.collect(Collectors.groupingBy(HoodieColumnRangeMetadata::getColumnName)); // Combine those into file-level statistics // NOTE: Inlining this var makes javac (1.8) upset (due to its inability to infer // expression type correctly) Stream<HoodieColumnRangeMetadata<Comparable>> stream = columnToStatsListMap.values() .stream() .map(this::getColumnRangeInFile); return stream.collect(Collectors.toList()); }
@Test public void testReadColumnStatsFromMetadata() throws Exception { List<Pair<Pair<String, String>, Boolean>> valueList = new ArrayList<>(); String minKey = "z"; String maxKey = "0"; String minValue = "z"; String maxValue = "0"; int nullValueCount = 0; int totalCount = 1000; String partitionPath = "path1"; for (int i = 0; i < totalCount; i++) { boolean nullifyData = i % 3 == 0; String rowKey = UUID.randomUUID().toString(); String value = String.valueOf(i); valueList.add(Pair.of(Pair.of(rowKey, value), nullifyData)); minKey = (minKey.compareTo(rowKey) > 0) ? rowKey : minKey; maxKey = (maxKey.compareTo(rowKey) < 0) ? rowKey : maxKey; if (nullifyData) { nullValueCount++; } else { minValue = (minValue.compareTo(value) > 0) ? value : minValue; maxValue = (maxValue.compareTo(value) < 0) ? value : maxValue; } } String fileName = "test.parquet"; String filePath = new StoragePath(basePath, fileName).toString(); String recordKeyField = "id"; String partitionPathField = "partition"; String dataField = "data"; Schema schema = getSchema(recordKeyField, partitionPathField, dataField); BloomFilter filter = BloomFilterFactory .createBloomFilter(1000, 0.0001, 10000, BloomFilterTypeCode.SIMPLE.name()); HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(new AvroSchemaConverter().convert(schema), schema, Option.of(filter), new Properties()); try (ParquetWriter writer = new ParquetWriter(new Path(filePath), writeSupport, CompressionCodecName.GZIP, 120 * 1024 * 1024, ParquetWriter.DEFAULT_PAGE_SIZE)) { valueList.forEach(entry -> { GenericRecord rec = new GenericData.Record(schema); rec.put(recordKeyField, entry.getLeft().getLeft()); rec.put(partitionPathField, partitionPath); if (entry.getRight()) { rec.put(dataField, null); } else { rec.put(dataField, entry.getLeft().getRight()); } try { writer.write(rec); } catch (IOException e) { throw new RuntimeException(e); } writeSupport.add(entry.getLeft().getLeft()); }); } List<String> columnList = new ArrayList<>(); columnList.add(recordKeyField); columnList.add(partitionPathField); columnList.add(dataField); List<HoodieColumnRangeMetadata<Comparable>> columnRangeMetadataList = parquetUtils.readColumnStatsFromMetadata( HoodieTestUtils.getStorage(filePath), new StoragePath(filePath), columnList) .stream() .sorted(Comparator.comparing(HoodieColumnRangeMetadata::getColumnName)) .collect(Collectors.toList()); assertEquals(3, columnRangeMetadataList.size(), "Should return column stats of 3 columns"); validateColumnRangeMetadata(columnRangeMetadataList.get(0), fileName, dataField, minValue, maxValue, nullValueCount, totalCount); validateColumnRangeMetadata(columnRangeMetadataList.get(1), fileName, recordKeyField, minKey, maxKey, 0, totalCount); validateColumnRangeMetadata(columnRangeMetadataList.get(2), fileName, partitionPathField, partitionPath, partitionPath, 0, totalCount); }
public InnerCNode getTree() throws CodegenRuntimeException { try { if (root == null) parse(); } catch (DefParserException | IOException e) { throw new CodegenRuntimeException("Error parsing or reading config definition." + e.getMessage(), e); } return root; }
@Disabled //TODO: finish this! The numeric leaf nodes must contain their range. @Test void testRanges() { StringBuilder sb = new StringBuilder(); sb.append("i int range=[0,10]"); sb.append("l long range=[-1e20,0]"); sb.append("d double range=[0,1]"); DefParser parser = createParser(sb.toString()); CNode root = parser.getTree(); LeafCNode.IntegerLeaf intNode = (LeafCNode.IntegerLeaf) root.getChild("i"); }
@Override public void onStateElection(Job job, JobState newState) { if (isNotFailed(newState) || isJobNotFoundException(newState) || isProblematicExceptionAndMustNotRetry(newState) || maxAmountOfRetriesReached(job)) return; job.scheduleAt(now().plusSeconds(getSecondsToAdd(job)), String.format("Retry %d of %d", getFailureCount(job), getMaxNumberOfRetries(job))); }
@Test void retryFilterKeepsDefaultRetryFilterValueOf10IfRetriesOnJobAnnotationIsNotProvided() { final Job job = aJob() .<TestService>withJobDetails(ts -> ts.doWork()) .withState(new FailedState("a message", new RuntimeException("boom"))) .build(); applyDefaultJobFilter(job); int beforeVersion = job.getJobStates().size(); retryFilter.onStateElection(job, job.getJobState()); int afterVersion = job.getJobStates().size(); assertThat(afterVersion).isEqualTo(beforeVersion + 1); assertThat(job.getState()).isEqualTo(SCHEDULED); }
static ProtocolHandlerWithClassLoader load(ProtocolHandlerMetadata metadata, String narExtractionDirectory) throws IOException { final File narFile = metadata.getArchivePath().toAbsolutePath().toFile(); NarClassLoader ncl = NarClassLoaderBuilder.builder() .narFile(narFile) .parentClassLoader(ProtocolHandler.class.getClassLoader()) .extractionDirectory(narExtractionDirectory) .build(); ProtocolHandlerDefinition phDef = getProtocolHandlerDefinition(ncl); if (StringUtils.isBlank(phDef.getHandlerClass())) { throw new IOException("Protocol handler `" + phDef.getName() + "` does NOT provide a protocol" + " handler implementation"); } try { Class handlerClass = ncl.loadClass(phDef.getHandlerClass()); Object handler = handlerClass.getDeclaredConstructor().newInstance(); if (!(handler instanceof ProtocolHandler)) { throw new IOException("Class " + phDef.getHandlerClass() + " does not implement protocol handler interface"); } ProtocolHandler ph = (ProtocolHandler) handler; return new ProtocolHandlerWithClassLoader(ph, ncl); } catch (Throwable t) { rethrowIOException(t); return null; } }
@Test public void testLoadProtocolHandlerWrongHandlerClass() throws Exception { ProtocolHandlerDefinition def = new ProtocolHandlerDefinition(); def.setHandlerClass(Runnable.class.getName()); def.setDescription("test-protocol-handler"); String archivePath = "/path/to/protocol/handler/nar"; ProtocolHandlerMetadata metadata = new ProtocolHandlerMetadata(); metadata.setDefinition(def); metadata.setArchivePath(Paths.get(archivePath)); NarClassLoader mockLoader = mock(NarClassLoader.class); when(mockLoader.getServiceDefinition(eq(PULSAR_PROTOCOL_HANDLER_DEFINITION_FILE))) .thenReturn(ObjectMapperFactory.getYamlMapper().writer().writeValueAsString(def)); Class handlerClass = Runnable.class; when(mockLoader.loadClass(eq(Runnable.class.getName()))) .thenReturn(handlerClass); final NarClassLoaderBuilder mockedBuilder = mock(NarClassLoaderBuilder.class, RETURNS_SELF); when(mockedBuilder.build()).thenReturn(mockLoader); try (MockedStatic<NarClassLoaderBuilder> builder = Mockito.mockStatic(NarClassLoaderBuilder.class)) { builder.when(() -> NarClassLoaderBuilder.builder()).thenReturn(mockedBuilder); try { ProtocolHandlerUtils.load(metadata, ""); fail("Should not reach here"); } catch (IOException ioe) { // expected } } }
@Override public JvmTask getTask(JvmContext context) throws IOException { // A rough imitation of code from TaskTracker. JVMId jvmId = context.jvmId; LOG.info("JVM with ID : " + jvmId + " asked for a task"); JvmTask jvmTask = null; // TODO: Is it an authorized container to get a task? Otherwise return null. // TODO: Child.java's firstTaskID isn't really firstTaskID. Ask for update // to jobId and task-type. WrappedJvmID wJvmID = new WrappedJvmID(jvmId.getJobId(), jvmId.isMap, jvmId.getId()); // Try to look up the task. We remove it directly as we don't give // multiple tasks to a JVM if (!jvmIDToActiveAttemptMap.containsKey(wJvmID)) { LOG.info("JVM with ID: " + jvmId + " is invalid and will be killed."); jvmTask = TASK_FOR_INVALID_JVM; } else { if (!launchedJVMs.contains(wJvmID)) { jvmTask = null; LOG.info("JVM with ID: " + jvmId + " asking for task before AM launch registered. Given null task"); } else { // remove the task as it is no more needed and free up the memory. // Also we have already told the JVM to process a task, so it is no // longer pending, and further request should ask it to exit. org.apache.hadoop.mapred.Task task = jvmIDToActiveAttemptMap.remove(wJvmID); launchedJVMs.remove(wJvmID); LOG.info("JVM with ID: " + jvmId + " given task: " + task.getTaskID()); task.setEncryptedSpillKey(encryptedSpillKey); jvmTask = new JvmTask(task, false); } } return jvmTask; }
@Test (timeout=5000) public void testGetTask() throws IOException { configureMocks(); startListener(false); // Verify ask before registration. //The JVM ID has not been registered yet so we should kill it. JvmContext context = new JvmContext(); context.jvmId = id; JvmTask result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); // Verify ask after registration but before launch. // Don't kill, should be null. //Now put a task with the ID listener.registerPendingTask(task, wid); result = listener.getTask(context); assertNull(result); // Unregister for more testing. listener.unregister(attemptId, wid); // Verify ask after registration and launch //Now put a task with the ID listener.registerPendingTask(task, wid); listener.registerLaunchedTask(attemptId, wid); verify(hbHandler).register(attemptId); result = listener.getTask(context); assertNotNull(result); assertFalse(result.shouldDie); // Don't unregister yet for more testing. //Verify that if we call it again a second time we are told to die. result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); listener.unregister(attemptId, wid); // Verify after unregistration. result = listener.getTask(context); assertNotNull(result); assertTrue(result.shouldDie); // test JVMID JVMId jvmid = JVMId.forName("jvm_001_002_m_004"); assertNotNull(jvmid); try { JVMId.forName("jvm_001_002_m_004_006"); fail(); } catch (IllegalArgumentException e) { assertThat(e.getMessage()).isEqualTo( "TaskId string : jvm_001_002_m_004_006 is not properly formed"); } }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListFilePlusCharacter() throws Exception { final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("us-east-1"); final Path placeholder = new GoogleStorageTouchFeature(session).touch( new Path(container, String.format("test+%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(new GoogleStorageObjectListService(session).list(container, new DisabledListProgressListener()).contains(placeholder)); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_silence_no_publish_quite_plugin() { properties.put(Constants.PLUGIN_PUBLISH_QUIET_PROPERTY_NAME, "true"); RuntimeOptions options = cucumberPropertiesParser.parse(properties).build(); assertThat(options.plugins(), empty()); }
@Override public Optional<SoamId> createDm(MdId mdName, MaIdShort maName, MepId mepId, DelayMeasurementCreate dmNew) throws CfmConfigException, SoamConfigException { DeviceId mepDeviceId = cfmMepService.getMep(mdName, maName, mepId).deviceId(); if (mepDeviceId == null) { throw new CfmConfigException("Unable to create DM. MEP :" + mdName + "/" + maName + "/" + mepId + " does not exist"); } else if (deviceService.getDevice(mepDeviceId) == null) { throw new CfmConfigException("Device " + mepDeviceId + " from MEP :" + mdName + "/" + maName + "/" + mepId + " does not exist"); } else if (!deviceService.getDevice(mepDeviceId).is(SoamDmProgrammable.class)) { throw new CfmConfigException("Device " + mepDeviceId + " from MEP :" + mdName + "/" + maName + "/" + mepId + " does not implement SoamDmProgrammable"); } log.debug("Creating new DM in MD {}, MA {}, MEP {} on Device {}", mdName, maName, mepId, mepDeviceId); return deviceService.getDevice(mepDeviceId) .as(SoamDmProgrammable.class).createDm(mdName, maName, mepId, dmNew); }
@Test public void testCreateDmNoBehavior() throws CfmConfigException, SoamConfigException { final DeviceId deviceId3 = DeviceId.deviceId("netconf:3.2.3.4:830"); final MepId mepId3 = MepId.valueOf((short) 3); Map<Class<? extends Behaviour>, Class<? extends Behaviour>> behaviours = new HashMap<>(); behaviours.put(DeviceDescriptionDiscovery.class, TestDeviceDiscoveryBehavior.class); Driver testDriver3 = new DefaultDriver( TEST_DRIVER_3, new ArrayList<Driver>(), TEST_MFR, TEST_HW_VERSION, TEST_SW_3, behaviours, new HashMap<>()); Device device3 = new DefaultDevice( ProviderId.NONE, deviceId3, Device.Type.SWITCH, TEST_MFR, TEST_HW_VERSION, TEST_SW_3, TEST_SN, new ChassisId(2), DefaultAnnotations.builder().set(AnnotationKeys.DRIVER, TEST_DRIVER_3).build()); expect(deviceService.getDevice(deviceId3)).andReturn(device3).anyTimes(); replay(deviceService); MepEntry mep3 = DefaultMepEntry.builder(mepId3, deviceId3, PortNumber.P0, Mep.MepDirection.UP_MEP, MDNAME1, MANAME1) .buildEntry(); expect(mepService.getMep(MDNAME1, MANAME1, mepId3)).andReturn(mep3).anyTimes(); replay(mepService); expect(driverService.getDriver(deviceId3)).andReturn(testDriver3).anyTimes(); replay(driverService); DelayMeasurementCreate dmCreate1 = DefaultDelayMeasurementCreate .builder(DelayMeasurementCreate.DmType.DM1DMTX, DelayMeasurementCreate.Version.Y17312011, MepId.valueOf((short) 11), Mep.Priority.PRIO3) .binsPerFdInterval((short) 4) .binsPerFdrInterval((short) 5) .binsPerIfdvInterval((short) 6) .build(); try { soamManager.createDm(MDNAME1, MANAME1, mepId3, dmCreate1); fail("Expecting exception since device does not support behavior"); } catch (CfmConfigException e) { assertEquals("Device netconf:3.2.3.4:830 from MEP :md-1/" + "ma-1-1/3 does not implement SoamDmProgrammable", e.getMessage()); } }
public UUID initializeProcessId() { if (!hasPersistentStores) { final UUID processId = UUID.randomUUID(); log.info("Created new process id: {}", processId); return processId; } if (!lockStateDirectory()) { log.error("Unable to obtain lock as state directory is already locked by another process"); throw new StreamsException(String.format("Unable to initialize state, this can happen if multiple instances of " + "Kafka Streams are running in the same state directory " + "(current state directory is [%s]", stateDir.getAbsolutePath())); } final File processFile = new File(stateDir, PROCESS_FILE_NAME); final ObjectMapper mapper = new ObjectMapper(); try { if (processFile.exists()) { try { final StateDirectoryProcessFile processFileData = mapper.readValue(processFile, StateDirectoryProcessFile.class); log.info("Reading UUID from process file: {}", processFileData.processId); if (processFileData.processId != null) { return processFileData.processId; } } catch (final Exception e) { log.warn("Failed to read json process file", e); } } final StateDirectoryProcessFile processFileData = new StateDirectoryProcessFile(UUID.randomUUID()); log.info("No process id found on disk, got fresh process id {}", processFileData.processId); mapper.writeValue(processFile, processFileData); return processFileData.processId; } catch (final IOException e) { log.error("Unable to read/write process file due to unexpected exception", e); throw new ProcessorStateException(e); } }
@Test public void shouldGetFreshProcessIdIfJsonUnreadable() throws Exception { final File processFile = new File(appDir, PROCESS_FILE_NAME); Files.createFile(processFile.toPath()); final UUID processId = UUID.randomUUID(); final FileOutputStream fileOutputStream = new FileOutputStream(processFile); try (final BufferedWriter writer = new BufferedWriter( new OutputStreamWriter(fileOutputStream, StandardCharsets.UTF_8))) { writer.write(processId.toString()); writer.flush(); fileOutputStream.getFD().sync(); } assertThat(directory.initializeProcessId(), not(processId)); }
public void conf() { try { requireJob(); } catch (Exception e) { renderText(e.getMessage()); return; } render(confPage()); }
@Test public void testConfiguration() { appController.conf(); assertEquals(JobConfPage.class, appController.getClazz()); }
private void setVariable(final Double variable) { model = new CalculatorModel( variable, model.getOutput() ); }
@Test void testSetVariable() { List<CalculatorAction> actions = List.of( new SetVariableCalculatorAction(10.0) ); CalculatorModel model = modelAfterExecutingActions(actions); assertEquals(10.0, model.getVariable()); assertEquals(0, model.getOutput()); }
@Override public int hashCode() { if (value == null) { return 31; } // Using recommended hashing algorithm from Effective Java for longs and doubles if (isIntegral(this)) { long value = getAsNumber().longValue(); return (int) (value ^ (value >>> 32)); } if (value instanceof Number) { long value = Double.doubleToLongBits(getAsNumber().doubleValue()); return (int) (value ^ (value >>> 32)); } return value.hashCode(); }
@Test public void testByteEqualsLong() { JsonPrimitive p1 = new JsonPrimitive((byte) 10); JsonPrimitive p2 = new JsonPrimitive(10L); assertThat(p1).isEqualTo(p2); assertThat(p1.hashCode()).isEqualTo(p2.hashCode()); }
public void logStandbySnapshotNotification( final int memberId, final long recordingId, final long leadershipTermId, final long termBaseLogPosition, final long logPosition, final long timestamp, final TimeUnit timeUnit, final int serviceId, final String archiveEndpoint) { final int length = ClusterEventEncoder.standbySnapshotNotificationLength(timeUnit, archiveEndpoint); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(STANDBY_SNAPSHOT_NOTIFICATION.toEventCodeId(), encodedLength); if (index > 0) { try { ClusterEventEncoder.encodeStandbySnapshotNotification( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, recordingId, leadershipTermId, termBaseLogPosition, logPosition, timestamp, timeUnit, serviceId, archiveEndpoint); } finally { ringBuffer.commit(index); } } }
@Test void logStandbySnapshotNotification() { final int memberId = 222; final long recordingId = 9823674L; final long leadershipTermId = 23478L; final long termBaseLogPosition = 823423L; final long logPosition = 9827342L; final long timestamp = 98273423434L; final int serviceId = 1; final String archiveEndpoint = "localhost:9090"; final int offset = 64; final TimeUnit timeUnit = MILLISECONDS; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); logger.logStandbySnapshotNotification( memberId, recordingId, leadershipTermId, termBaseLogPosition, logPosition, timestamp, timeUnit, serviceId, archiveEndpoint); verifyLogHeader( logBuffer, offset, STANDBY_SNAPSHOT_NOTIFICATION.toEventCodeId(), standbySnapshotNotificationLength(timeUnit, archiveEndpoint), standbySnapshotNotificationLength(timeUnit, archiveEndpoint)); final int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(recordingId, logBuffer.getLong(index, LITTLE_ENDIAN)); assertEquals(leadershipTermId, logBuffer.getLong(index + SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(termBaseLogPosition, logBuffer.getLong(index + (2 * SIZE_OF_LONG), LITTLE_ENDIAN)); assertEquals(logPosition, logBuffer.getLong(index + (3 * SIZE_OF_LONG), LITTLE_ENDIAN)); assertEquals(timestamp, logBuffer.getLong(index + (4 * SIZE_OF_LONG), LITTLE_ENDIAN)); assertEquals(memberId, logBuffer.getInt(index + (5 * SIZE_OF_LONG), LITTLE_ENDIAN)); assertEquals(serviceId, logBuffer.getInt(index + (5 * SIZE_OF_LONG) + (SIZE_OF_INT), LITTLE_ENDIAN)); final int timeUnitIndex = index + (5 * SIZE_OF_LONG) + (2 * SIZE_OF_INT); assertEquals(timeUnit.name(), logBuffer.getStringAscii(timeUnitIndex)); final int archiveEndpointIndex = timeUnitIndex + SIZE_OF_INT + timeUnit.name().length(); assertEquals(archiveEndpoint, logBuffer.getStringAscii(archiveEndpointIndex, LITTLE_ENDIAN)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectStandbySnapshotNotification( STANDBY_SNAPSHOT_NOTIFICATION, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: STANDBY_SNAPSHOT_NOTIFICATION \\[90/90]: memberId=222 " + "recordingId=9823674 leadershipTermId=23478 termBaseLeadershipPosition=823423 logPosition=9827342 " + "timestamp=98273423434 timeUnit=MILLISECONDS serviceId=1 archiveEndpoint=localhost:9090"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
@Override public List<BlockWorkerInfo> getPreferredWorkers(WorkerClusterView workerClusterView, String fileId, int count) throws ResourceExhaustedException { if (workerClusterView.size() < count) { throw new ResourceExhaustedException(String.format( "Not enough workers in the cluster %d workers in the cluster but %d required", workerClusterView.size(), count)); } Set<WorkerIdentity> workerIdentities = workerClusterView.workerIds(); mHashProvider.refresh(workerIdentities); List<WorkerIdentity> workers = mHashProvider.getMultiple(fileId, count); if (workers.size() != count) { throw new ResourceExhaustedException(String.format( "Found %d workers from the hash ring but %d required", workers.size(), count)); } ImmutableList.Builder<BlockWorkerInfo> builder = ImmutableList.builder(); for (WorkerIdentity worker : workers) { Optional<WorkerInfo> optionalWorkerInfo = workerClusterView.getWorkerById(worker); final WorkerInfo workerInfo; if (optionalWorkerInfo.isPresent()) { workerInfo = optionalWorkerInfo.get(); } else { // the worker returned by the policy does not exist in the cluster view // supplied by the client. // this can happen when the membership changes and some callers fail to update // to the latest worker cluster view. // in this case, just skip this worker LOG.debug("Inconsistency between caller's view of cluster and that of " + "the consistent hash policy's: worker {} selected by policy does not exist in " + "caller's view {}. Skipping this worker.", worker, workerClusterView); continue; } BlockWorkerInfo blockWorkerInfo = new BlockWorkerInfo( worker, workerInfo.getAddress(), workerInfo.getCapacityBytes(), workerInfo.getUsedBytes(), workerInfo.getState() == WorkerState.LIVE ); builder.add(blockWorkerInfo); } List<BlockWorkerInfo> infos = builder.build(); return infos; }
@Test public void getMultipleWorkers() throws Exception { WorkerLocationPolicy policy = WorkerLocationPolicy.Factory.create(mConf); assertTrue(policy instanceof JumpHashPolicy); // Prepare a worker list WorkerClusterView workers = new WorkerClusterView(Arrays.asList( new WorkerInfo() .setIdentity(WorkerIdentityTestUtils.ofLegacyId(1)) .setAddress(new WorkerNetAddress() .setHost("master1").setRpcPort(29998).setDataPort(29999).setWebPort(30000)) .setCapacityBytes(1024) .setUsedBytes(0), new WorkerInfo() .setIdentity(WorkerIdentityTestUtils.ofLegacyId(2)) .setAddress(new WorkerNetAddress() .setHost("master2").setRpcPort(29998).setDataPort(29999).setWebPort(30000)) .setCapacityBytes(1024) .setUsedBytes(0))); List<BlockWorkerInfo> assignedWorkers = policy.getPreferredWorkers(workers, "hdfs://a/b/c", 2); assertEquals(2, assignedWorkers.size()); assertTrue(assignedWorkers.stream().allMatch(w -> contains(workers, w))); // The two workers should be different assertNotEquals(assignedWorkers.get(0).getNetAddress().getHost(), assignedWorkers.get(1).getNetAddress().getHost()); assertThrows(ResourceExhaustedException.class, () -> { // Getting 2 out of 1 worker will result in an error policy.getPreferredWorkers( new WorkerClusterView(Arrays.asList( new WorkerInfo() .setIdentity(WorkerIdentityTestUtils.ofLegacyId(1)) .setAddress(new WorkerNetAddress() .setHost("master1").setRpcPort(29998).setDataPort(29999).setWebPort(30000)) .setCapacityBytes(1024) .setUsedBytes(0))), "hdfs://a/b/c", 2); }); }
public static void onBroadcastReceiver(BroadcastReceiver receiver, Context context, Intent intent) { onBroadcastServiceIntent(intent); }
@Test public void onBroadcastReceiver() { BroadcastReceiver broadcastReceiver = new BroadcastReceiver() { @Override public void onReceive(Context context, Intent intent) { } }; PushAutoTrackHelper.onBroadcastReceiver(broadcastReceiver, mApplication, null); }
@Nullable @Override public Session load(@NonNull String sessionId) { var session = store.getIfPresent(sessionId); if (session == null || session.createdAt().plus(timeToLive).isBefore(Instant.now())) { return null; } return session; }
@Test void load_realCache() { var ttl = Duration.ofMinutes(5); Cache<String, Session> cache = Caffeine.newBuilder() .expireAfter(new AfterCreatedExpiry(ttl.toNanos())) .maximumSize(1000) .build(); var sut = new CaffeineSessionRepo(cache, ttl); var state = "myState"; var nonce = UUID.randomUUID().toString(); var redirectUri = URI.create("https://example.com/callback"); var clientId = "app"; var sesionIds = IntStream.range(0, 100).mapToObj(Integer::toString).toList(); sesionIds.stream() .map( i -> Session.create() .id(i) .state(state) .nonce(nonce) .redirectUri(redirectUri) .clientId(clientId) .build()) .forEach(sut::save); sesionIds.forEach( id -> { // when var got = sut.load(id); // then assertNotNull(got); assertEquals(id, got.id()); assertEquals(state, got.state()); assertEquals(redirectUri, got.redirectUri()); assertEquals(clientId, got.clientId()); }); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testCacheValues() { final RMapCacheNative<String, String> map = redisson.getMapCacheNative("testRMapCacheValues"); map.put("1234", "5678", Duration.ofSeconds(60)); assertThat(map.values()).containsOnly("5678"); map.destroy(); }
@Override public Metrics toDay() { SumLabeledFunction metrics = (SumLabeledFunction) createNew(); metrics.setEntityId(getEntityId()); metrics.setTimeBucket(toTimeBucketInDay()); metrics.setServiceId(getServiceId()); metrics.getValue().copyFrom(getValue()); return metrics; }
@Test public void testToDay() { function.accept( MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1 ); function.accept( MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2 ); function.calculate(); final SumLabeledFunction dayFunction = (SumLabeledFunction) function.toDay(); dayFunction.calculate(); Assertions.assertEquals(dayFunction.getValue(), new DataTable("200,3|301,2|404,7|502,9|505,1")); }
static Optional<String> extractMRJobURL(String log) { Matcher matcher = JOBURL_PATTERN.matcher(log); if (matcher.matches()) { String jobURL = matcher.group(1); return Optional.of(jobURL); } return Optional.empty(); }
@Test public void testJobURL() { Optional<String> jobURL = HiveUtils.extractMRJobURL( "INFO : The url to track the job: " + "http://localhost:8088/proxy/application_1591195707498_0064/\n" + "INFO : Starting Job = job_1591195707498_0064, " + "Tracking URL = http://localhost:8088/proxy/application_1591195707498_0064/\n" + "INFO : Kill Command = /Users/abc/Java/lib/hadoop-2.7.7/bin/hadoop job " + " -kill job_1591195707498_0064"); assertTrue(jobURL.isPresent()); assertEquals("http://localhost:8088/proxy/application_1591195707498_0064/", jobURL.get()); }
@Override public StreamMessageId add(StreamAddArgs<K, V> args) { return get(addAsync(args)); }
@Test public void testAdd() { RStream<String, String> stream = redisson.getStream("test1"); StreamMessageId s = stream.add(StreamAddArgs.entry("12", "33")); assertThat(s.getId0()).isNotNegative(); assertThat(s.getId1()).isNotNegative(); assertThat(stream.size()).isEqualTo(1); }
public int format(String... args) throws UsageException { CommandLineOptions parameters = processArgs(args); if (parameters.version()) { errWriter.println(versionString()); return 0; } if (parameters.help()) { throw new UsageException(); } JavaFormatterOptions options = JavaFormatterOptions.builder() .style(parameters.aosp() ? Style.AOSP : Style.GOOGLE) .formatJavadoc(parameters.formatJavadoc()) .build(); if (parameters.stdin()) { return formatStdin(parameters, options); } else { return formatFiles(parameters, options); } }
@Test public void testUsageOutput() { StringWriter out = new StringWriter(); StringWriter err = new StringWriter(); Main main = new Main(new PrintWriter(out, true), new PrintWriter(err, true), System.in); try { main.format("--help"); throw new AssertionError("Expected UsageException to be thrown"); } catch (UsageException e) { String usage = e.getMessage(); // Check that doc links are included. assertThat(usage).contains("https://github.com/google/google-java-format"); assertThat(usage).contains("Usage: google-java-format"); // Sanity check that a flag and description is in included. assertThat(usage).contains("--length"); assertThat(usage).contains("Character length to format."); // Check that some of the additional text is included. assertThat(usage).contains("the result is sent to stdout"); } }
public static String checkComponentQualifier(String qualifier) { checkArgument(!isNullOrEmpty(qualifier), "Component qualifier can't be empty"); checkArgument(qualifier.length() <= MAX_COMPONENT_QUALIFIER_LENGTH, "Component qualifier length (%s) is longer than the maximum authorized (%s). '%s' was provided.", qualifier.length(), MAX_COMPONENT_QUALIFIER_LENGTH, qualifier); return qualifier; }
@Test void check_qualifier() { String qualifier = repeat("a", 10); assertThat(ComponentValidator.checkComponentQualifier(qualifier)).isEqualTo(qualifier); }
protected Object[] copyOrCloneArrayFromLoadFile( Object[] outputRowData, Object[] readrow ) { // if readrow array is shorter than outputRowData reserved space, then we can not clone it because we have to // preserve the outputRowData reserved space. Clone, creates a new array with a new length, equals to the // readRow length and with that set we lost our outputRowData reserved space - needed for future additions. // The equals case works in both clauses, but arraycopy is up to 5 times faster for smaller arrays. if ( readrow.length <= outputRowData.length ) { System.arraycopy( readrow, 0, outputRowData, 0, readrow.length ); } else { // if readrow array is longer than outputRowData reserved space, then we can only clone it. // Copy does not work here and will return an error since we are trying to copy a bigger array into a shorter one. outputRowData = readrow.clone(); } return outputRowData; }
@Test public void testCopyOrCloneArrayFromLoadFileWithSmallerSizedReadRowArray() { int size = 5; Object[] rowData = new Object[ size ]; Object[] readrow = new Object[ size - 1 ]; LoadFileInput loadFileInput = mock( LoadFileInput.class ); Mockito.when( loadFileInput.copyOrCloneArrayFromLoadFile( rowData, readrow ) ).thenCallRealMethod(); assertEquals( 5, loadFileInput.copyOrCloneArrayFromLoadFile( rowData, readrow ).length ); }
@Nullable public DataBuffer readChunk(int index) throws IOException { if (index >= mDataBuffers.length) { return null; } if (index >= mBufferCount.get()) { try (LockResource ignored = new LockResource(mBufferLocks.writeLock())) { while (index >= mBufferCount.get()) { DataBuffer buffer = readChunk(); mDataBuffers[mBufferCount.get()] = buffer; mBufferCount.incrementAndGet(); } } } return mDataBuffers[index]; }
@Test public void testOutOfBound() throws Exception { int chunkNum = BLOCK_SIZE / CHUNK_SIZE + 1; for (int i = chunkNum; i < chunkNum + 10; i++) { DataBuffer buffer = mDataReader.readChunk(i); Assert.assertEquals(0, mDataReader.getReadChunkNum()); Assert.assertNull(buffer); } }
@Nullable public String ensureUser(String userName, String password, String firstName, String lastName, String email, Set<String> expectedRoles) { return ensureUser(userName, password, firstName, lastName, email, expectedRoles, false); }
@Test public void ensureUser() throws Exception { final Permissions permissions = new Permissions(ImmutableSet.of()); when(userService.load("test-user")).thenReturn(null); when(userService.create()).thenReturn(newUser(permissions)); when(userService.save(any(User.class))).thenReturn("new-id"); assertThat(migrationHelpers.ensureUser("test-user", "pass", "Test", "User", "test@example.com", ImmutableSet.of("54e3deadbeefdeadbeef0001", "54e3deadbeefdeadbeef0002"))) .isEqualTo("new-id"); final ArgumentCaptor<User> userArg = ArgumentCaptor.forClass(User.class); verify(userService, times(1)).save(userArg.capture()); assertThat(userArg.getValue()).satisfies(user -> { assertThat(user.getName()).describedAs("user name").isEqualTo("test-user"); assertThat(user.getFullName()).describedAs("user full-name").isEqualTo("Test User"); assertThat(user.getHashedPassword()).describedAs("user hashed password").isNotBlank(); assertThat(user.getEmail()).describedAs("user email").isEqualTo("test@example.com"); assertThat(user.isReadOnly()).describedAs("user is read-only").isFalse(); assertThat(user.getPermissions()).describedAs("user permissions") .containsOnlyElementsOf(permissions.userSelfEditPermissions("test-user")); assertThat(user.getRoleIds()).describedAs("user roles").containsOnly( "54e3deadbeefdeadbeef0001", "54e3deadbeefdeadbeef0002" ); assertThat(user.getTimeZone()).describedAs("user timezone").isEqualTo(DateTimeZone.UTC); }); }
@Override public void initialize(String name, Map<String, String> props) { Preconditions.checkArgument(props != null, "Invalid configuration: null"); sessionCatalog.initialize(name, props); }
@Test public void testInvalidPageSize() { RESTCatalogAdapter adapter = Mockito.spy(new RESTCatalogAdapter(backendCatalog)); RESTCatalog catalog = new RESTCatalog(SessionCatalog.SessionContext.createEmpty(), (config) -> adapter); assertThatThrownBy( () -> catalog.initialize( "test", ImmutableMap.of(RESTSessionCatalog.REST_PAGE_SIZE, "-1"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage( String.format( "Invalid value for %s, must be a positive integer", RESTSessionCatalog.REST_PAGE_SIZE)); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = host.getCredentials(); if(host.getProtocol().isPasswordConfigurable()) { final String domain, username; if(credentials.getUsername().contains("\\")) { domain = StringUtils.substringBefore(credentials.getUsername(), "\\"); username = StringUtils.substringAfter(credentials.getUsername(), "\\"); } else { username = credentials.getUsername(); domain = new HostPreferences(host).getProperty("webdav.ntlm.domain"); } for(String scheme : Arrays.asList(AuthSchemes.NTLM, AuthSchemes.SPNEGO)) { client.setCredentials( new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, scheme), new NTCredentials(username, credentials.getPassword(), preferences.getProperty("webdav.ntlm.workstation"), domain) ); } for(String scheme : Arrays.asList(AuthSchemes.BASIC, AuthSchemes.DIGEST, AuthSchemes.KERBEROS)) { client.setCredentials( new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, scheme), new UsernamePasswordCredentials(username, credentials.getPassword())); } if(preferences.getBoolean("webdav.basic.preemptive")) { client.enablePreemptiveAuthentication(host.getHostname(), host.getPort(), host.getPort(), Charset.forName(preferences.getProperty("http.credentials.charset")) ); } else { client.disablePreemptiveAuthentication(); } } if(credentials.isPassed()) { if(log.isWarnEnabled()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); } return; } try { final Path home = new DelegatingHomeFeature(new WorkdirHomeFeature(host), new DefaultPathHomeFeature(host)).find(); final HttpHead head = new HttpHead(new DAVPathEncoder().encode(home)); try { client.execute(head, new MicrosoftIISFeaturesResponseHandler(capabilities)); } catch(SardineException e) { switch(e.getStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(log.isWarnEnabled()) { log.warn(String.format("Ignore failure %s", e)); } break; case HttpStatus.SC_NOT_IMPLEMENTED: case HttpStatus.SC_FORBIDDEN: case HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE: case HttpStatus.SC_METHOD_NOT_ALLOWED: if(log.isWarnEnabled()) { log.warn(String.format("Failed HEAD request to %s with %s. Retry with PROPFIND.", host, e.getResponsePhrase())); } cancel.verify(); // Possibly only HEAD requests are not allowed final ListService list = this.getFeature(ListService.class); list.list(home, new DisabledListProgressListener() { @Override public void chunk(final Path parent, final AttributedList<Path> list) throws ListCanceledException { try { cancel.verify(); } catch(ConnectionCanceledException e) { throw new ListCanceledException(list, e); } } }); break; case HttpStatus.SC_BAD_REQUEST: if(preferences.getBoolean("webdav.basic.preemptive")) { if(log.isWarnEnabled()) { log.warn(String.format("Disable preemptive authentication for %s due to failure %s", host, e.getResponsePhrase())); } cancel.verify(); client.disablePreemptiveAuthentication(); client.execute(head, new MicrosoftIISFeaturesResponseHandler(capabilities)); } else { throw new DAVExceptionMappingService().map(e); } break; default: throw new DAVExceptionMappingService().map(e); } } } catch(SardineException e) { throw new DAVExceptionMappingService().map(e); } catch(IOException e) { throw new HttpExceptionMappingService().map(e); } }
@Test(expected = ConnectionRefusedException.class) public void testConnectRefused() throws Exception { final Host host = new Host(new DAVSSLProtocol(), "localhost", 2121); final DAVSession session = new DAVSession(host, new CertificateStoreX509TrustManager(new DisabledCertificateTrustCallback(), new DefaultTrustManagerHostnameCallback(host), new DefaultCertificateStore()), new CertificateStoreX509KeyManager(new DisabledCertificateIdentityCallback(), host, new DefaultCertificateStore())); try { session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); } catch(ConnectionRefusedException e) { assertEquals("Connection failed", e.getMessage()); throw e; } }
static boolean needWrap(MethodDescriptor methodDescriptor, Class<?>[] parameterClasses, Class<?> returnClass) { String methodName = methodDescriptor.getMethodName(); // generic call must be wrapped if (CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName)) { return true; } // echo must be wrapped if ($ECHO.equals(methodName)) { return true; } boolean returnClassProtobuf = isProtobufClass(returnClass); // Response foo() if (parameterClasses.length == 0) { return !returnClassProtobuf; } int protobufParameterCount = 0; int javaParameterCount = 0; int streamParameterCount = 0; boolean secondParameterStream = false; // count normal and protobuf param for (int i = 0; i < parameterClasses.length; i++) { Class<?> parameterClass = parameterClasses[i]; if (isProtobufClass(parameterClass)) { protobufParameterCount++; } else { if (isStreamType(parameterClass)) { if (i == 1) { secondParameterStream = true; } streamParameterCount++; } else { javaParameterCount++; } } } // more than one stream param if (streamParameterCount > 1) { throw new IllegalStateException("method params error: more than one Stream params. method=" + methodName); } // protobuf only support one param if (protobufParameterCount >= 2) { throw new IllegalStateException("method params error: more than one protobuf params. method=" + methodName); } // server stream support one normal param and one stream param if (streamParameterCount == 1) { if (javaParameterCount + protobufParameterCount > 1) { throw new IllegalStateException( "method params error: server stream does not support more than one normal param." + " method=" + methodName); } // server stream: void foo(Request, StreamObserver<Response>) if (!secondParameterStream) { throw new IllegalStateException( "method params error: server stream's second param must be StreamObserver." + " method=" + methodName); } } if (methodDescriptor.getRpcType() != MethodDescriptor.RpcType.UNARY) { if (MethodDescriptor.RpcType.SERVER_STREAM == methodDescriptor.getRpcType()) { if (!secondParameterStream) { throw new IllegalStateException( "method params error:server stream's second param must be StreamObserver." + " method=" + methodName); } } // param type must be consistent if (returnClassProtobuf) { if (javaParameterCount > 0) { throw new IllegalStateException( "method params error: both normal and protobuf param found. method=" + methodName); } } else { if (protobufParameterCount > 0) { throw new IllegalStateException("method params error method=" + methodName); } } } else { if (streamParameterCount > 0) { throw new IllegalStateException( "method params error: unary method should not contain any StreamObserver." + " method=" + methodName); } if (protobufParameterCount > 0 && returnClassProtobuf) { return false; } // handler reactor or rxjava only consider gen by proto if (isMono(returnClass) || isRx(returnClass)) { return false; } if (protobufParameterCount <= 0 && !returnClassProtobuf) { return true; } // handle grpc stub only consider gen by proto if (GRPC_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName()) && protobufParameterCount == 1) { return false; } // handle dubbo generated method if (TRI_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName())) { Class<?> actualReturnClass = (Class<?>) ((ParameterizedType) methodDescriptor.getMethod().getGenericReturnType()) .getActualTypeArguments()[0]; boolean actualReturnClassProtobuf = isProtobufClass(actualReturnClass); if (actualReturnClassProtobuf && protobufParameterCount == 1) { return false; } if (!actualReturnClassProtobuf && protobufParameterCount == 0) { return true; } } // todo remove this in future boolean ignore = checkNeedIgnore(returnClass); if (ignore) { return protobufParameterCount != 1; } throw new IllegalStateException("method params error method=" + methodName); } // java param should be wrapped return javaParameterCount > 0; }
@Test void testIsServerStream() throws NoSuchMethodException { Method method = DescriptorService.class.getMethod("sayHelloServerStream", HelloReply.class, StreamObserver.class); ReflectionMethodDescriptor descriptor = new ReflectionMethodDescriptor(method); Assertions.assertFalse(needWrap(descriptor)); Method method2 = DescriptorService.class.getMethod("sayHelloServerStream2", Object.class, StreamObserver.class); ReflectionMethodDescriptor descriptor2 = new ReflectionMethodDescriptor(method2); Assertions.assertTrue(needWrap(descriptor2)); }
static Optional<String> globalResponseError(Optional<ClientResponse> response) { if (!response.isPresent()) { return Optional.of("Timeout"); } if (response.get().authenticationException() != null) { return Optional.of("AuthenticationException"); } if (response.get().wasTimedOut()) { return Optional.of("Disonnected[Timeout]"); } if (response.get().wasDisconnected()) { return Optional.of("Disconnected"); } if (response.get().versionMismatch() != null) { return Optional.of("UnsupportedVersionException"); } if (response.get().responseBody() == null) { return Optional.of("EmptyResponse"); } if (!(response.get().responseBody() instanceof AssignReplicasToDirsResponse)) { return Optional.of("ClassCastException"); } AssignReplicasToDirsResponseData data = ((AssignReplicasToDirsResponse) response.get().responseBody()).data(); Errors error = Errors.forCode(data.errorCode()); if (error != Errors.NONE) { return Optional.of("Response-level error: " + error.name()); } return Optional.empty(); }
@Test public void testGlobalResponseErrorEmptyResponse() { assertEquals(Optional.of("EmptyResponse"), AssignmentsManager.globalResponseError(Optional.of( new ClientResponse(null, null, "", 0, 0, false, false, null, null, null)))); }
static <E extends Enum<E>> int encodeStateChange( final UnsafeBuffer encodingBuffer, final int offset, final int captureLength, final int length, final int memberId, final E from, final E to) { int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length); encodingBuffer.putInt(offset + encodedLength, memberId, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; return encodeTrailingStateChange(encodingBuffer, offset, encodedLength, captureLength, from, to); }
@Test void testEncodeStateChange() { final int offset = 24; final ConsensusModule.State from = ConsensusModule.State.ACTIVE; final ConsensusModule.State to = ConsensusModule.State.CLOSED; final int memberId = 42; final String payload = from.name() + STATE_SEPARATOR + to.name(); final int length = payload.length() + SIZE_OF_INT * 2; final int captureLength = captureLength(length); final int encodedLength = encodeStateChange(buffer, offset, captureLength, length, memberId, from, to); assertEquals(encodedLength(stateChangeLength(from, to)), encodedLength); assertEquals(captureLength, buffer.getInt(offset, LITTLE_ENDIAN)); assertEquals(length, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN)); assertNotEquals(0, buffer.getLong(offset + SIZE_OF_INT * 2, LITTLE_ENDIAN)); assertEquals(memberId, buffer.getInt(offset + LOG_HEADER_LENGTH)); assertEquals(payload, buffer.getStringAscii(offset + LOG_HEADER_LENGTH + SIZE_OF_INT)); }
public static boolean isBase64IgnoreTrailingPeriods(byte[] arrayOctet) { int i = arrayOctet.length - 1; while (i >= 0 && '.' == arrayOctet[i]) { --i; } while (i >= 0) { if (!isBase64(arrayOctet[i])) { return false; } --i; } return true; }
@Test public void testBase64IgnoreTrailingPeriods() { for (final String s : BASE64_STRINGS) { String testStr = s; for (int i = 0; i < 10; i++) { assertTrue(Base64Utils.isBase64IgnoreTrailingPeriods(testStr.getBytes(StandardCharsets.UTF_8))); testStr = testStr + "."; } } for (final String s : BASE64_STRINGS_WITH_WHITE_SPACE) { String testStr = s; for (int i = 0; i < 2; i++) { assertFalse(Base64Utils.isBase64IgnoreTrailingPeriods(testStr.getBytes(StandardCharsets.UTF_8))); testStr = testStr + "."; } } for (final String s : NON_BASE64_STRINGS) { String testStr = s; for (int i = 0; i < 2; i++) { assertFalse(Base64Utils.isBase64IgnoreTrailingPeriods(testStr.getBytes(StandardCharsets.UTF_8))); testStr = testStr + "."; } } }
@Override public WorkerIdentity get() { // Look at configurations first if (mConf.isSetByUser(PropertyKey.WORKER_IDENTITY_UUID)) { String uuidStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID); final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr); LOG.debug("Loaded worker identity from configuration: {}", workerIdentity); return workerIdentity; } // Try loading from the identity file String filePathStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH); final Path idFile = Paths.get(filePathStr); try (BufferedReader reader = Files.newBufferedReader(idFile)) { List<String> nonCommentLines = reader.lines() .filter(line -> !line.startsWith("#")) .filter(line -> !line.trim().isEmpty()) .collect(Collectors.toList()); if (nonCommentLines.size() > 0) { if (nonCommentLines.size() > 1) { LOG.warn("Multiple worker identities configured in {}, only the first one will be used", idFile); } String uuidStr = nonCommentLines.get(0); final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr); LOG.debug("Loaded worker identity from file {}: {}", idFile, workerIdentity); return workerIdentity; } } catch (FileNotFoundException | NoSuchFileException ignored) { // if not existent, proceed to auto generate one LOG.debug("Worker identity file {} not found", idFile); } catch (IOException e) { // in case of other IO error, better stop worker from starting up than use a new identity throw new RuntimeException( String.format("Failed to read worker identity from identity file %s", idFile), e); } // No identity is supplied by the user // Assume this is the first time the worker starts up, and generate a new one LOG.debug("Auto generating new worker identity as no identity is supplied by the user"); UUID generatedId = mUUIDGenerator.get(); WorkerIdentity identity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(generatedId); LOG.debug("Generated worker identity as {}", identity); try (BufferedWriter writer = Files.newBufferedWriter(idFile, StandardCharsets.UTF_8, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { writer.write("# Worker identity automatically generated at "); writer.write(OffsetDateTime.now().format(DateTimeFormatter.RFC_1123_DATE_TIME)); writer.newLine(); writer.write(generatedId.toString()); writer.newLine(); } catch (Exception e) { LOG.warn("Failed to persist automatically generated worker identity ({}) to {}, " + "this worker will lose its identity after restart", identity, idFile, e); } try { // set the file to be read-only Set<PosixFilePermission> permSet = Files.getPosixFilePermissions(idFile); Set<PosixFilePermission> nonWritablePermSet = Sets.filter(permSet, perm -> perm != PosixFilePermission.OWNER_WRITE && perm != PosixFilePermission.GROUP_WRITE && perm != PosixFilePermission.OTHERS_WRITE); Files.setPosixFilePermissions(idFile, nonWritablePermSet); } catch (Exception e) { LOG.warn("Failed to set identity file to be read-only", e); } return identity; }
@Test public void readFromWorkingDirIfIdFilePathNotExplicitlySetButFileExists() throws Exception { AlluxioProperties props = new AlluxioProperties(); props.put(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH, mUuidFilePath.toString(), Source.DEFAULT); try (BufferedWriter fout = Files.newBufferedWriter(mUuidFilePath, StandardCharsets.UTF_8, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { fout.write(mReferenceUuid.toString()); fout.newLine(); } AlluxioConfiguration conf = new InstancedConfiguration(props); WorkerIdentityProvider provider = new WorkerIdentityProvider(conf); WorkerIdentity identity = provider.get(); assertEquals(mReferenceUuid, WorkerIdentity.ParserV1.INSTANCE.toUUID(identity)); }
@Override public ResultSet executeQuery(String sql) throws SQLException { StatementResult result = executeInternal(sql); if (!result.isQueryResult()) { result.close(); throw new SQLException(String.format("Statement[%s] is not a query.", sql)); } currentResults = new FlinkResultSet(this, result); hasResults = true; return currentResults; }
@Test public void testCloseNonQuery() throws Exception { CompletableFuture<Void> closedFuture = new CompletableFuture<>(); try (FlinkConnection connection = new FlinkConnection(new TestingExecutor(closedFuture))) { try (Statement statement = connection.createStatement()) { assertThatThrownBy(() -> statement.executeQuery("INSERT")) .hasMessage(String.format("Statement[%s] is not a query.", "INSERT")); closedFuture.get(10, TimeUnit.SECONDS); } } }
public static long parseTimestamp(String value) { try { return parseLong(value); } catch (NumberFormatException e) { // Sometimes Pinot returns float point string in a field that we expect timestamp. // This can happen because of JsonParser, Pinot Query Engine, etc. // In this case we may still go through by reading a float value try { return parseDouble(value).longValue(); } catch (Exception ignoredEx) { return TimestampUtils.toMillisSinceEpoch(value); } } }
@Test public void testParseTimestamp() { long epoch = parseTimestamp("1.672528152E12"); assertEquals(epoch, 1672528152000L); long epoch2 = parseTimestamp("1652374863000"); assertEquals(epoch2, 1652374863000L); long epoch3 = parseTimestamp("2022-05-12 10:48:06.5"); assertEquals(epoch3, 1652370486500L); }
@Override public Collection<RedisServer> masters() { List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS); return toRedisServersList(masters); }
@Test public void testMasters() { Collection<RedisServer> masters = connection.masters(); assertThat(masters).hasSize(1); }
public HealthCheckResponse checkHealth() { final Map<String, HealthCheckResponseDetail> results = DEFAULT_CHECKS.stream() .collect(Collectors.toMap( Check::getName, check -> check.check(this) )); final boolean allHealthy = results.values().stream() .allMatch(HealthCheckResponseDetail::getIsHealthy); final State serverState = commandRunner.checkServerState(); return new HealthCheckResponse(allHealthy, results, Optional.of(serverState.toString())); }
@Test public void shouldReturnHealthyIfKafkaCheckFailsWithAuthorizationException() { // Given: givenDescribeTopicsThrows(KsqlTopicAuthorizationException.class); // When: final HealthCheckResponse response = healthCheckAgent.checkHealth(); // Then: assertThat(response.getDetails().get(KAFKA_CHECK_NAME).getIsHealthy(), is(true)); assertThat(response.getIsHealthy(), is(true)); }
@Override public String version() { return AppInfoParser.getVersion(); }
@Test public void testInsertHeaderVersionRetrievedFromAppInfoParser() { assertEquals(AppInfoParser.getVersion(), xform.version()); }
@Override public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers, ServiceUnitId serviceUnit, LoadManagerContext context) { ServiceConfiguration conf = context.brokerConfiguration(); if (!conf.isPreferLaterVersions() || brokers.isEmpty()) { return CompletableFuture.completedFuture(brokers); } Version latestVersion; try { latestVersion = getLatestVersionNumber(brokers); if (log.isDebugEnabled()) { log.debug("Latest broker version found was [{}]", latestVersion); } } catch (Exception ex) { log.warn("Disabling PreferLaterVersions feature; reason: " + ex.getMessage()); return FutureUtil.failedFuture( new BrokerFilterBadVersionException("Cannot determine newest broker version: " + ex.getMessage())); } int numBrokersLatestVersion = 0; int numBrokersOlderVersion = 0; Iterator<Map.Entry<String, BrokerLookupData>> brokerIterator = brokers.entrySet().iterator(); while (brokerIterator.hasNext()) { Map.Entry<String, BrokerLookupData> next = brokerIterator.next(); String brokerId = next.getKey(); String version = next.getValue().brokerVersion(); Version brokerVersionVersion = Version.valueOf(version); if (brokerVersionVersion.equals(latestVersion)) { log.debug("Broker [{}] is running the latest version ([{}])", brokerId, version); numBrokersLatestVersion++; } else { log.info("Broker [{}] is running an older version ([{}]); latest version is [{}]", brokerId, version, latestVersion); numBrokersOlderVersion++; brokerIterator.remove(); } } if (numBrokersOlderVersion == 0) { log.info("All {} brokers are running the latest version [{}]", numBrokersLatestVersion, latestVersion); } return CompletableFuture.completedFuture(brokers); }
@Test public void testFilterEmptyBrokerList() throws BrokerFilterException, ExecutionException, InterruptedException { BrokerVersionFilter brokerVersionFilter = new BrokerVersionFilter(); Map<String, BrokerLookupData> result = brokerVersionFilter.filterAsync(new HashMap<>(), null, getContext()).get(); assertTrue(result.isEmpty()); }
public String readStringVar() { // TODO return ""; }
@Test void assertReadStringVar() { assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readStringVar(), is("")); }
public String getCallbackUri(String secretString) { String cbid = cbidGenerator.generate(secretString); if (!isValidPortNumber(callbackPort)) { throw new AssertionError("Invalid callbackPort number specified"); } HostAndPort hostAndPort = callbackPort == 80 ? HostAndPort.fromHost(callbackAddress) : HostAndPort.fromParts(callbackAddress, callbackPort); // check if the specified address is raw IP or domain if (InetAddresses.isInetAddress(callbackAddress)) { return CbidProcessor.addCbidToUrl(cbid, hostAndPort); } else if (InternetDomainName.isValid(callbackAddress)) { return CbidProcessor.addCbidToSubdomain(cbid, hostAndPort); } // Should never reach here throw new AssertionError("Unrecognized address format, should be Ip address or valid domain"); }
@Test public void getCallbackUri_validDomainAddress_returnsUriWithCbidInSubdomain() { client = new TcsClient(VALID_DOMAIN, VALID_PORT, VALID_URL, httpClient); String url = client.getCallbackUri(SECRET); String expectedUriString = String.format("%s.%s:%d", CBID, VALID_DOMAIN, VALID_PORT); assertThat(url).isEqualTo(expectedUriString); }
public static Schema getTableSchema(TableId tableId) { ValuesTable table = globalTables.get(tableId); Preconditions.checkNotNull(table, tableId + " is not existed"); Schema.Builder builder = Schema.newBuilder(); for (Column column : table.columns) { builder.physicalColumn(column.getName(), column.getType()); } return builder.primaryKey(table.primaryKeys).build(); }
@Test public void testValuesMetadataAccessor() { Schema schema = Schema.newBuilder() .physicalColumn("col1", new CharType()) .physicalColumn("col2", new CharType()) .primaryKey("col1") .build(); Assert.assertEquals(2, metadataAccessor.listNamespaces().size()); Assert.assertEquals(2, metadataAccessor.listSchemas(null).size()); Assert.assertEquals(1, metadataAccessor.listTables(null, "default").size()); Assert.assertEquals(schema, metadataAccessor.getTableSchema(table1)); }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { if (stream == null) { throw new NullPointerException("null stream"); } Throwable t; boolean alive = false; ForkClient client = acquireClient(); try { ContentHandler tee = (handler instanceof AbstractRecursiveParserWrapperHandler) ? handler : new TeeContentHandler(handler, new MetadataContentHandler(metadata)); t = client.call("parse", stream, tee, metadata, context); alive = true; } catch (TikaException te) { // Problem occurred on our side alive = true; throw te; } catch (IOException e) { // Problem occurred on the other side throw new TikaException("Failed to communicate with a forked parser process." + " The process has most likely crashed due to some error" + " like running out of memory. A new process will be" + " started for the next parsing request.", e); } finally { releaseClient(client, alive); } if (t instanceof IOException) { throw (IOException) t; } else if (t instanceof SAXException) { throw (SAXException) t; } else if (t instanceof TikaException) { throw (TikaException) t; } else if (t != null) { throw new TikaException("Unexpected error in forked server process", t); } }
@Test public void testRecursiveParserWrapperWithProxyingContentHandlersAndMetadata() throws Exception { Parser parser = new AutoDetectParser(); RecursiveParserWrapper wrapper = new RecursiveParserWrapper(parser); BufferingHandler handler = new BufferingHandler(new SBContentHandlerFactory()); try (ForkParser fork = new ForkParser(ForkParserTest.class.getClassLoader(), wrapper); InputStream is = getResourceAsStream("/test-documents/basic_embedded.xml")) { Metadata metadata = new Metadata(); ParseContext context = new ParseContext(); fork.parse(is, handler, metadata, context); } List<Metadata> metadataList = handler.getMetadataList(); List<ContentHandler> contentHandlers = handler.getContentHandlers(); Metadata m0 = metadataList.get(0); String content0 = contentHandlers.get(0).toString(); assertEquals("Nikolai Lobachevsky", m0.get(TikaCoreProperties.CREATOR)); assertContains("main_content", content0); assertContains("embed1.xml", content0); Metadata m1 = metadataList.get(1); String content1 = contentHandlers.get(1).toString(); assertEquals("embeddedAuthor", m1.get(TikaCoreProperties.CREATOR)); assertContains("some_embedded_content", content1); assertEquals("/embed1.xml", m1.get(TikaCoreProperties.EMBEDDED_RESOURCE_PATH)); }
@Override public GlobalStatusResponseProto convert2Proto(GlobalStatusResponse globalStatusResponse) { final short typeCode = globalStatusResponse.getTypeCode(); final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType( MessageTypeProto.forNumber(typeCode)).build(); final String msg = globalStatusResponse.getMsg(); final AbstractResultMessageProto abstractResultMessageProto = AbstractResultMessageProto.newBuilder().setMsg( msg == null ? "" : msg).setResultCode(ResultCodeProto.valueOf(globalStatusResponse.getResultCode().name())) .setAbstractMessage(abstractMessage).build(); AbstractTransactionResponseProto abstractTransactionResponseProto = AbstractTransactionResponseProto .newBuilder().setAbstractResultMessage(abstractResultMessageProto).setTransactionExceptionCode( TransactionExceptionCodeProto.valueOf(globalStatusResponse.getTransactionExceptionCode().name())) .build(); AbstractGlobalEndResponseProto abstractGlobalEndResponseProto = AbstractGlobalEndResponseProto.newBuilder() .setAbstractTransactionResponse(abstractTransactionResponseProto).setGlobalStatus( GlobalStatusProto.valueOf(globalStatusResponse.getGlobalStatus().name())).build(); GlobalStatusResponseProto result = GlobalStatusResponseProto.newBuilder().setAbstractGlobalEndResponse( abstractGlobalEndResponseProto).build(); return result; }
@Test public void convert2Proto() { GlobalStatusResponse globalStatusResponse = new GlobalStatusResponse(); globalStatusResponse.setGlobalStatus(GlobalStatus.AsyncCommitting); globalStatusResponse.setMsg("msg"); globalStatusResponse.setResultCode(ResultCode.Failed); globalStatusResponse.setTransactionExceptionCode(TransactionExceptionCode.BranchRegisterFailed); GlobalStatusResponseConvertor convertor = new GlobalStatusResponseConvertor(); GlobalStatusResponseProto proto = convertor.convert2Proto( globalStatusResponse); GlobalStatusResponse real = convertor.convert2Model(proto); assertThat((real.getTypeCode())).isEqualTo(globalStatusResponse.getTypeCode()); assertThat((real.getMsg())).isEqualTo(globalStatusResponse.getMsg()); assertThat((real.getResultCode())).isEqualTo(globalStatusResponse.getResultCode()); assertThat((real.getTransactionExceptionCode())).isEqualTo( globalStatusResponse.getTransactionExceptionCode()); }
public void deleteGroup(String groupName) { Iterator<PipelineConfigs> iterator = this.iterator(); while (iterator.hasNext()) { PipelineConfigs currentGroup = iterator.next(); if (currentGroup.isNamed(groupName)) { if (!currentGroup.isEmpty()) { throw new UnprocessableEntityException("Failed to delete group " + groupName + " because it was not empty."); } iterator.remove(); break; } } }
@Test public void shouldDeleteGroupWhenEmpty() { PipelineConfigs group = createGroup("group", new PipelineConfig[]{}); PipelineGroups groups = new PipelineGroups(group); groups.deleteGroup("group"); assertThat(groups.size(), is(0)); }
@Override public final void finish() throws Exception { finished = true; finishProcessing(currentTransaction()); }
@Test void testNoTransactionAfterSinkFunctionFinish() throws Exception { harness.open(); harness.processElement("42", 0); harness.snapshot(0, 1); harness.processElement("43", 2); harness.snapshot(1, 3); harness.processElement("44", 4); // do not expect new input after finish() sinkFunction.finish(); harness.snapshot(2, 5); harness.notifyOfCompletedCheckpoint(1); // make sure the previous empty transaction will not be pre-committed harness.snapshot(3, 6); assertThatThrownBy(() -> harness.processElement("45", 7)) .isInstanceOf(NullPointerException.class); // Checkpoint2 has not complete assertExactlyOnce(Arrays.asList("42", "43")); // transaction for checkpoint2 assertThat(tmpDirectory.listFiles()).hasSize(1); }
static BlockLocation[] fixBlockLocations(BlockLocation[] locations, long start, long len, long fileOffsetInHar) { // offset 1 past last byte of desired range long end = start + len; for (BlockLocation location : locations) { // offset of part block relative to beginning of desired file // (may be negative if file starts in this part block) long harBlockStart = location.getOffset() - fileOffsetInHar; // offset 1 past last byte of har block relative to beginning of // desired file long harBlockEnd = harBlockStart + location.getLength(); if (start > harBlockStart) { // desired range starts after beginning of this har block // fix offset to beginning of relevant range (relative to desired file) location.setOffset(start); // fix length to relevant portion of har block location.setLength(location.getLength() - (start - harBlockStart)); } else { // desired range includes beginning of this har block location.setOffset(harBlockStart); } if (harBlockEnd > end) { // range ends before end of this har block // fix length to remove irrelevant portion at the end location.setLength(location.getLength() - (harBlockEnd - end)); } } return locations; }
@Test public void testFixBlockLocations() { // do some tests where start == 0 { // case 1: range starts before current har block and ends after BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 0, 20, 5); assertThat(b[0].getOffset()).isEqualTo(5); assertThat(b[0].getLength()).isEqualTo(10); } { // case 2: range starts in current har block and ends after BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 0, 20, 15); assertThat(b[0].getOffset()).isZero(); assertThat(b[0].getLength()).isEqualTo(5); } { // case 3: range starts before current har block and ends in // current har block BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 0, 10, 5); assertThat(b[0].getOffset()).isEqualTo(5); assertThat(b[0].getLength()).isEqualTo(5); } { // case 4: range starts and ends in current har block BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 0, 6, 12); assertThat(b[0].getOffset()).isZero(); assertThat(b[0].getLength()).isEqualTo(6); } // now try a range where start == 3 { // case 5: range starts before current har block and ends after BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 3, 20, 5); assertThat(b[0].getOffset()).isEqualTo(5); assertThat(b[0].getLength()).isEqualTo(10); } { // case 6: range starts in current har block and ends after BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 3, 20, 15); assertThat(b[0].getOffset()).isEqualTo(3); assertThat(b[0].getLength()).isEqualTo(2); } { // case 7: range starts before current har block and ends in // current har block BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 3, 7, 5); assertThat(b[0].getOffset()).isEqualTo(5); assertThat(b[0].getLength()).isEqualTo(5); } { // case 8: range starts and ends in current har block BlockLocation[] b = { new BlockLocation(null, null, 10, 10) }; HarFileSystem.fixBlockLocations(b, 3, 3, 12); assertThat(b[0].getOffset()).isEqualTo(3); assertThat(b[0].getLength()).isEqualTo(3); } // test case from JIRA MAPREDUCE-1752 { BlockLocation[] b = { new BlockLocation(null, null, 512, 512), new BlockLocation(null, null, 1024, 512) }; HarFileSystem.fixBlockLocations(b, 0, 512, 896); assertThat(b[0].getOffset()).isZero(); assertThat(b[0].getLength()).isEqualTo(128); assertThat(b[1].getOffset()).isEqualTo(128); assertThat(b[1].getLength()).isEqualTo(384); } }
public void set(PropertyKey key, Object value) { set(key, value, Source.RUNTIME); }
@Test public void setValidation() { assertThrows(IllegalArgumentException.class, () -> mConfiguration.set(PropertyKey.MASTER_KEYTAB_KEY_FILE, "/file/not/exist")); }
public static Thread daemonThread(Runnable r, Class<?> context, String description) { return daemonThread(r, "hollow", context, description); }
@Test public void nullName() { try { daemonThread(() -> {}, null); fail("expected an exception"); } catch (NullPointerException e) { assertEquals("name required", e.getMessage()); } }
public int randomNum() { int value = random.nextInt(); if (value < 0) { value = Math.abs(value); if (value < 0) value = 0; } return value; }
@Test public void testRandomNum() { int randomNum = pullAPIWrapper.randomNum(); assertTrue(randomNum > 0); }
public void sort(Comparator<E> comparator) { ArrayList<E> array = new ArrayList<>(size); Iterator<E> iterator = iterator(); while (iterator.hasNext()) { E e = iterator.next(); iterator.remove(); array.add(e); } array.sort(comparator); for (E e : array) { add(e); } }
@Test public void testSort() { ImplicitLinkedHashCollection<TestElement> coll = new ImplicitLinkedHashCollection<>(); coll.add(new TestElement(3, 3)); coll.add(new TestElement(1, 1)); coll.add(new TestElement(10, 10)); coll.add(new TestElement(9, 9)); coll.add(new TestElement(2, 2)); coll.add(new TestElement(4, 4)); coll.add(new TestElement(0, 0)); coll.add(new TestElement(30, 30)); coll.add(new TestElement(20, 20)); coll.add(new TestElement(11, 11)); coll.add(new TestElement(15, 15)); coll.add(new TestElement(5, 5)); expectTraversal(coll.iterator(), 3, 1, 10, 9, 2, 4, 0, 30, 20, 11, 15, 5); coll.sort(TestElementComparator.INSTANCE); expectTraversal(coll.iterator(), 0, 1, 2, 3, 4, 5, 9, 10, 11, 15, 20, 30); coll.sort(TestElementComparator.INSTANCE); expectTraversal(coll.iterator(), 0, 1, 2, 3, 4, 5, 9, 10, 11, 15, 20, 30); coll.sort(ReverseTestElementComparator.INSTANCE); expectTraversal(coll.iterator(), 30, 20, 15, 11, 10, 9, 5, 4, 3, 2, 1, 0); }
public static void validateSecret(byte[] secret) throws OtpInfoException { if (secret.length != SECRET_LENGTH && secret.length != SECRET_FULL_LENGTH) { throw new OtpInfoException(String.format("Invalid Yandex secret length: %d bytes", secret.length)); } // Secrets originating from a QR code do not have a checksum, so we assume those are valid if (secret.length == SECRET_LENGTH) { return; } char originalChecksum = (char) ((secret[secret.length - 2] & 0x0F) << 8 | secret[secret.length - 1] & 0xff); char accum = 0; int accumBits = 0; int inputTotalBitsAvailable = secret.length * 8 - 12; int inputIndex = 0; int inputBitsAvailable = 8; while (inputTotalBitsAvailable > 0) { int requiredBits = 13 - accumBits; if (inputTotalBitsAvailable < requiredBits) { requiredBits = inputTotalBitsAvailable; } while (requiredBits > 0) { int curInput = (secret[inputIndex] & (1 << inputBitsAvailable) - 1) & 0xff; int bitsToRead = Math.min(requiredBits, inputBitsAvailable); curInput >>= inputBitsAvailable - bitsToRead; accum = (char) (accum << bitsToRead | curInput); inputTotalBitsAvailable -= bitsToRead; requiredBits -= bitsToRead; inputBitsAvailable -= bitsToRead; accumBits += bitsToRead; if (inputBitsAvailable == 0) { inputIndex += 1; inputBitsAvailable = 8; } } if (accumBits == 13) { accum ^= 0b1_1000_1111_0011; } accumBits = 16 - getNumberOfLeadingZeros(accum); } if (accum != originalChecksum) { throw new OtpInfoException("Yandex secret checksum invalid"); } }
@Test public void testYandexSecretValidation() { assertThrows(OtpInfoException.class, () -> YandexInfo.validateSecret(getBase32Vector(2))); assertThrows(OtpInfoException.class, () -> YandexInfo.validateSecret(getBase32Vector(3))); }
protected List<OUT> executeOnCollections(RuntimeContext ctx, ExecutionConfig executionConfig) throws Exception { @SuppressWarnings("unchecked") InputFormat<OUT, InputSplit> inputFormat = (InputFormat<OUT, InputSplit>) this.formatWrapper.getUserCodeObject(); // configure the input format inputFormat.configure(this.parameters); // open the input format if (inputFormat instanceof RichInputFormat) { ((RichInputFormat) inputFormat).setRuntimeContext(ctx); ((RichInputFormat) inputFormat).openInputFormat(); } List<OUT> result = new ArrayList<OUT>(); // splits InputSplit[] splits = inputFormat.createInputSplits(1); TypeSerializer<OUT> serializer = getOperatorInfo() .getOutputType() .createSerializer(executionConfig.getSerializerConfig()); for (InputSplit split : splits) { inputFormat.open(split); while (!inputFormat.reachedEnd()) { OUT next = inputFormat.nextRecord(serializer.createInstance()); if (next != null) { result.add(serializer.copy(next)); } } inputFormat.close(); } // close the input format if (inputFormat instanceof RichInputFormat) { ((RichInputFormat) inputFormat).closeInputFormat(); } return result; }
@Test void testDataSourcePlain() throws Exception { TestNonRichInputFormat in = new TestNonRichInputFormat(); GenericDataSourceBase<String, TestNonRichInputFormat> source = new GenericDataSourceBase<>( in, new OperatorInformation<>(BasicTypeInfo.STRING_TYPE_INFO), "testSource"); ExecutionConfig executionConfig = new ExecutionConfig(); executionConfig.disableObjectReuse(); List<String> resultMutableSafe = source.executeOnCollections(null, executionConfig); in.reset(); executionConfig.enableObjectReuse(); List<String> resultRegular = source.executeOnCollections(null, executionConfig); assertThat(resultMutableSafe).isEqualTo(asList(TestIOData.NAMES)); assertThat(resultRegular).isEqualTo(asList(TestIOData.NAMES)); }
public DoubleArrayAsIterable usingExactEquality() { return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_contains_otherTypes() { // Expected value is Float assertThat(array(1.0, 2.0, 3.0)).usingExactEquality().contains(2.0f); // Expected value is Integer assertThat(array(1.0, 2.0, 3.0)).usingExactEquality().contains(2); assertThat(array(1.0, Integer.MAX_VALUE, 3.0)).usingExactEquality().contains(Integer.MAX_VALUE); // Expected value is Long - supported up to +/- 2^53 assertThat(array(1.0, 2.0, 3.0)).usingExactEquality().contains(2L); assertThat(array(1.0, 1L << 53, 3.0)).usingExactEquality().contains(1L << 53); }
@Override public final boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof MapStoreConfig that)) { return false; } return enabled == that.enabled && writeCoalescing == that.writeCoalescing && writeDelaySeconds == that.writeDelaySeconds && writeBatchSize == that.writeBatchSize && Objects.equals(implementation, that.implementation) && Objects.equals(className, that.className) && Objects.equals(factoryImplementation, that.factoryImplementation) && Objects.equals(factoryClassName, that.factoryClassName) && properties.equals(that.properties) && initialLoadMode == that.initialLoadMode && offload == that.offload; }
@Test public void testEquals() { assertNotEquals(defaultCfg, cfgNotEnabled); assertNotEquals(defaultCfg, cfgNotWriteCoalescing); assertNotEquals(defaultCfg, cfgNonDefaultWriteDelaySeconds); assertNotEquals(defaultCfg, cfgNonDefaultWriteBatchSize); // class name branches assertNotEquals(defaultCfg, cfgNonNullClassName); assertNotEquals(cfgNonNullClassName, cfgNonNullOtherClassName); assertNotEquals(cfgNonNullClassName, defaultCfg); // factory class name branches assertNotEquals(defaultCfg, cfgNonNullFactoryClassName); assertNotEquals(cfgNonNullFactoryClassName, cfgNonNullOtherFactoryClassName); assertNotEquals(cfgNonNullFactoryClassName, defaultCfg); // implementation assertNotEquals(defaultCfg, cfgNonNullImplementation); assertNotEquals(cfgNonNullImplementation, cfgNonNullOtherImplementation); assertNotEquals(cfgNonNullImplementation, defaultCfg); // factory implementation assertNotEquals(defaultCfg, cfgNonNullFactoryImplementation); assertNotEquals(cfgNonNullFactoryImplementation, cfgNonNullOtherFactoryImplementation); assertNotEquals(cfgNonNullFactoryImplementation, defaultCfg); assertNotEquals(defaultCfg, cfgWithProperties); assertNotEquals(defaultCfg, cfgEagerMode); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test public void testVisit1() { String s1 = "{\"type\": \"record\", \"name\": \"t1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": \"int\"}" + "]}"; Assert.assertEquals("t1.", Schemas.visit(new Schema.Parser().parse(s1), new TestVisitor())); }
public Future<KafkaVersionChange> reconcile() { return getVersionFromController() .compose(i -> getPods()) .compose(this::detectToAndFromVersions) .compose(i -> prepareVersionChange()); }
@Test public void testDowngradeWithAllVersionsAndRecovery(VertxTestContext context) { String oldKafkaVersion = KafkaVersionTestUtils.LATEST_KAFKA_VERSION; String oldInterBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; String oldLogMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; String kafkaVersion = KafkaVersionTestUtils.PREVIOUS_KAFKA_VERSION; String interBrokerProtocolVersion = KafkaVersionTestUtils.PREVIOUS_PROTOCOL_VERSION; String logMessageFormatVersion = KafkaVersionTestUtils.PREVIOUS_FORMAT_VERSION; VersionChangeCreator vcc = mockVersionChangeCreator( mockKafka(kafkaVersion, interBrokerProtocolVersion, logMessageFormatVersion), mockNewCluster( null, mockSps(kafkaVersion), mockMixedPods(oldKafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion, kafkaVersion, oldInterBrokerProtocolVersion, oldLogMessageFormatVersion) ) ); Checkpoint async = context.checkpoint(); vcc.reconcile().onComplete(context.succeeding(c -> context.verify(() -> { assertThat(c.from(), is(VERSIONS.version(oldKafkaVersion))); assertThat(c.to(), is(VERSIONS.version(kafkaVersion))); assertThat(c.interBrokerProtocolVersion(), nullValue()); assertThat(c.logMessageFormatVersion(), nullValue()); async.flag(); }))); }
@Override public void checkTopicAccess( final KsqlSecurityContext securityContext, final String topicName, final AclOperation operation ) { final Set<AclOperation> authorizedOperations = securityContext.getServiceContext() .getTopicClient().describeTopic(topicName).authorizedOperations(); // Kakfa 2.2 or lower do not support authorizedOperations(). In case of running on a // unsupported broker version, then the authorizeOperation will be null. if (authorizedOperations != null && !authorizedOperations.contains(operation)) { // This error message is similar to what Kafka throws when it cannot access the topic // due to an authorization error. I used this message to keep a consistent message. throw new KsqlTopicAuthorizationException(operation, Collections.singleton(topicName)); } }
@Test public void shouldDenyIfAuthorizedOperationsDoesNotContainREAD() { // Given: givenTopicPermissions(TOPIC_1, Collections.singleton(AclOperation.WRITE)); // When: final Exception e = assertThrows( KsqlTopicAuthorizationException.class, () -> accessValidator.checkTopicAccess(securityContext, TOPIC_NAME_1, AclOperation.READ) ); // Then: assertThat(e.getMessage(), containsString(String.format( "Authorization denied to Read on topic(s): [%s]", TOPIC_1.name() ))); }
@Override public void abort(OutputBufferId bufferId) { checkArgument(bufferId.getId() == outputBufferId.getId(), "Invalid bufferId"); destroy(); }
@Test public void testAbort() { SpoolingOutputBuffer buffer = createSpoolingOutputBuffer(); // add three pages into a file for (int i = 0; i < 3; i++) { addPage(buffer, createPage(i)); } // add two page in memory for (int i = 3; i < 5; i++) { addPage(buffer, createPage(i)); } try { buffer.abort(INVALID_BUFFER_ID); } catch (IllegalArgumentException e) { assertEquals(e.getMessage(), "Invalid bufferId"); } compareTotalBuffered(buffer, 5); buffer.abort(BUFFER_ID); compareTotalBuffered(buffer, 0); }
public int setClosed() throws ClosedChannelException { while (true) { int curBits = status.get(); if ((curBits & STATUS_CLOSED_MASK) != 0) { throw new ClosedChannelException(); } if (status.compareAndSet(curBits, curBits | STATUS_CLOSED_MASK)) { return curBits & (~STATUS_CLOSED_MASK); } } }
@Test public void testSetClosed() throws ClosedChannelException { CloseableReferenceCount clr = new CloseableReferenceCount(); assertTrue("Reference count should be open", clr.isOpen()); clr.setClosed(); assertFalse("Reference count should be closed", clr.isOpen()); }
public static JoinParams create( final ColumnName keyColName, final LogicalSchema leftSchema, final LogicalSchema rightSchema ) { final boolean appendKey = neitherContain(keyColName, leftSchema, rightSchema); return new JoinParams( new KsqlValueJoiner(leftSchema.value().size(), rightSchema.value().size(), appendKey ? 1 : 0 ), createSchema(keyColName, leftSchema, rightSchema) ); }
@Test public void shouldThrowOnKeyTypeMismatch() { // Given: final LogicalSchema intKeySchema = LogicalSchema.builder() .keyColumn(ColumnName.of("BOB"), SqlTypes.INTEGER) .valueColumn(ColumnName.of("BLUE"), SqlTypes.STRING) .valueColumn(ColumnName.of("GREEN"), SqlTypes.INTEGER) .build() .withPseudoAndKeyColsInValue(false); // When: final Exception e = assertThrows( KsqlException.class, () -> JoinParamsFactory.create(ColumnName.of("BOB"), intKeySchema, RIGHT_SCHEMA) ); // Then: assertThat(e.getMessage(), containsString( "Invalid join. Key types differ: INTEGER vs STRING")); }
public static boolean isNotAllowedSendTopic(String topic) { return NOT_ALLOWED_SEND_TOPIC_SET.contains(topic); }
@Test public void testIsNotAllowedSendTopic() { boolean res; for (String topic : TopicValidator.getNotAllowedSendTopicSet()) { res = TopicValidator.isNotAllowedSendTopic(topic); assertThat(res).isTrue(); } String topic = "test_allowed_send_topic"; res = TopicValidator.isNotAllowedSendTopic(topic); assertThat(res).isFalse(); }
public List<String> toPrefix(String in) { List<String> tokens = buildTokens(alignINClause(in)); List<String> output = new ArrayList<>(); List<String> stack = new ArrayList<>(); for (String token : tokens) { if (isOperand(token)) { if (token.equals(")")) { while (openParanthesesFound(stack)) { output.add(stack.remove(stack.size() - 1)); } if (!stack.isEmpty()) { // temporarily fix for issue #189 stack.remove(stack.size() - 1); } } else { while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) { output.add(stack.remove(stack.size() - 1)); } stack.add(token); } } else { output.add(token); } } while (!stack.isEmpty()) { output.add(stack.remove(stack.size() - 1)); } return output; }
@Test public void testTwoInnerParenthesis() { String query = "a and b AND ( ( ( a > c AND b > d ) OR ( x = y ) ) ) OR t > u"; List<String> list = parser.toPrefix(query); assertEquals(Arrays.asList("a", "b", "and", "a", "c", ">", "b", "d", ">", "AND", "x", "y", "=", "OR", "AND", "t", "u", ">", "OR"), list); }
@Override public synchronized Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); final double threshold = conf.getLoadBalancerBrokerThresholdShedderPercentage() / 100.0; final Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); final double minThroughputThreshold = conf.getLoadBalancerBundleUnloadMinThroughputThreshold() * MB; final double avgUsage = getBrokerAvgUsage(loadData, conf.getLoadBalancerHistoryResourcePercentage(), conf); if (avgUsage == 0) { log.warn("average max resource usage is 0"); return selectedBundlesCache; } loadData.getBrokerData().forEach((broker, brokerData) -> { final LocalBrokerData localData = brokerData.getLocalData(); final double currentUsage = brokerAvgResourceUsage.getOrDefault(broker, 0.0); if (currentUsage < avgUsage + threshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is not overloaded, ignoring at this point ({})", broker, localData.printResourceUsage()); } return; } double percentOfTrafficToOffload = currentUsage - avgUsage - threshold + ADDITIONAL_THRESHOLD_PERCENT_MARGIN; double brokerCurrentThroughput = localData.getMsgThroughputIn() + localData.getMsgThroughputOut(); double minimumThroughputToOffload = brokerCurrentThroughput * percentOfTrafficToOffload; if (minimumThroughputToOffload < minThroughputThreshold) { if (log.isDebugEnabled()) { log.debug("[{}] broker is planning to shed throughput {} MByte/s less than " + "minimumThroughputThreshold {} MByte/s, skipping bundle unload ({})", broker, minimumThroughputToOffload / MB, minThroughputThreshold / MB, localData.printResourceUsage()); } return; } log.info( "Attempting to shed load on {}, which has max resource usage above avgUsage and threshold {}%" + " > {}% + {}% -- Offloading at least {} MByte/s of traffic," + " left throughput {} MByte/s ({})", broker, 100 * currentUsage, 100 * avgUsage, 100 * threshold, minimumThroughputToOffload / MB, (brokerCurrentThroughput - minimumThroughputToOffload) / MB, localData.printResourceUsage()); if (localData.getBundles().size() > 1) { filterAndSelectBundle(loadData, recentlyUnloadedBundles, broker, localData, minimumThroughputToOffload); } else if (localData.getBundles().size() == 1) { log.warn( "HIGH USAGE WARNING : Sole namespace bundle {} is overloading broker {}. " + "No Load Shedding will be done on this broker", localData.getBundles().iterator().next(), broker); } else { log.warn("Broker {} is overloaded despite having no bundles", broker); } }); if (selectedBundlesCache.isEmpty() && conf.isLowerBoundarySheddingEnabled()) { tryLowerBoundaryShedding(loadData, conf); } return selectedBundlesCache; }
@Test public void testBrokerWithSingleBundle() { LoadData loadData = new LoadData(); LocalBrokerData broker1 = new LocalBrokerData(); broker1.setBandwidthIn(new ResourceUsage(999, 1000)); broker1.setBandwidthOut(new ResourceUsage(999, 1000)); broker1.setBundles(Sets.newHashSet("bundle-1")); BundleData bundle1 = new BundleData(); TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData(); timeAverageMessageData.setMsgThroughputIn(1000); timeAverageMessageData.setMsgThroughputOut(1000); bundle1.setShortTermData(timeAverageMessageData); loadData.getBundleData().put("bundle-1", bundle1); loadData.getBrokerData().put("broker-1", new BrokerData(broker1)); assertTrue(thresholdShedder.findBundlesForUnloading(loadData, conf).isEmpty()); }
public static SslContextFactory.Client createClientSideSslContextFactory(AbstractConfig config) { Map<String, Object> sslConfigValues = config.valuesWithPrefixAllOrNothing("listeners.https."); final SslContextFactory.Client ssl = new SslContextFactory.Client(); configureSslContextFactoryKeyStore(ssl, sslConfigValues); configureSslContextFactoryTrustStore(ssl, sslConfigValues); configureSslContextFactoryAlgorithms(ssl, sslConfigValues); configureSslContextFactoryEndpointIdentification(ssl, sslConfigValues); return ssl; }
@Test public void testCreateClientSideSslContextFactoryDefaultValues() { Map<String, String> configMap = new HashMap<>(); configMap.put("ssl.keystore.location", "/path/to/keystore"); configMap.put("ssl.keystore.password", "123456"); configMap.put("ssl.key.password", "123456"); configMap.put("ssl.truststore.location", "/path/to/truststore"); configMap.put("ssl.truststore.password", "123456"); configMap.put("ssl.provider", "SunJSSE"); configMap.put("ssl.cipher.suites", "SSL_RSA_WITH_RC4_128_SHA,SSL_RSA_WITH_RC4_128_MD5"); configMap.put("ssl.secure.random.implementation", "SHA1PRNG"); RestServerConfig config = RestServerConfig.forPublic(null, configMap); SslContextFactory.Client ssl = SSLUtils.createClientSideSslContextFactory(config); assertEquals(SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ssl.getKeyStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ssl.getTrustStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, ssl.getProtocol()); assertArrayEquals(Arrays.asList(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); assertEquals(SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, ssl.getKeyManagerFactoryAlgorithm()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, ssl.getTrustManagerFactoryAlgorithm()); }
static void encodeImageRemoval( final UnsafeBuffer encodingBuffer, final int offset, final int captureLength, final int length, final String channel, final int sessionId, final int streamId, final long id) { int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length); encodingBuffer.putInt(offset + encodedLength, sessionId, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodingBuffer.putInt(offset + encodedLength, streamId, LITTLE_ENDIAN); encodedLength += SIZE_OF_INT; encodingBuffer.putLong(offset + encodedLength, id, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodeTrailingString( encodingBuffer, offset + encodedLength, captureLength - SIZE_OF_INT * 2 - SIZE_OF_LONG, channel); }
@Test void encodeImageRemovalShouldWriteChannelLast() { final int offset = 0; final String channel = "aeron:udp?endpoint=224.10.9.8"; final int sessionId = 13; final int streamId = 42; final long id = Long.MAX_VALUE; final int captureLength = 3 * SIZE_OF_INT + SIZE_OF_LONG + channel.length(); encodeImageRemoval(buffer, offset, captureLength, captureLength, channel, sessionId, streamId, id); assertEquals(captureLength, buffer.getInt(offset, LITTLE_ENDIAN)); assertEquals(captureLength, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN)); assertNotEquals(0, buffer.getLong(offset + SIZE_OF_INT * 2, LITTLE_ENDIAN)); assertEquals(sessionId, buffer.getInt(offset + LOG_HEADER_LENGTH, LITTLE_ENDIAN)); assertEquals(streamId, buffer.getInt(offset + LOG_HEADER_LENGTH + SIZE_OF_INT, LITTLE_ENDIAN)); assertEquals(id, buffer.getLong(offset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2, LITTLE_ENDIAN)); assertEquals(channel, buffer.getStringAscii(offset + LOG_HEADER_LENGTH + SIZE_OF_INT * 2 + SIZE_OF_LONG, LITTLE_ENDIAN)); }