focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@SuppressWarnings({"OverlyComplexMethod", "JavadocReference", "NestedTryStatement"}) void reserveMemory(long size) throws MemoryReservationException { long availableOrReserved = tryReserveMemory(size); // optimist! if (availableOrReserved >= size) { return; } // no luck throw new MemoryReservationException( String.format( "Could not allocate %d bytes, only %d bytes are remaining. This usually indicates " + "that you are requesting more memory than you have reserved. " + "However, when running an old JVM version it can also be caused by slow garbage collection. " + "Try to upgrade to Java 8u72 or higher if running on an old Java version.", size, availableOrReserved)); }
@Test void testReserveMemoryOverLimitFails() { UnsafeMemoryBudget budget = createUnsafeMemoryBudget(); assertThatExceptionOfType(MemoryReservationException.class) .isThrownBy(() -> budget.reserveMemory(120L)); }
public final RecurrenceId getRecurrenceId() { return recurrenceId; }
@Test public final void testGetRecurrenceeId() { final RecurrenceId recurrenceIdTest = new RecurrenceId("Fraud Detection", "17/07/16 16:27:25"); Assert.assertEquals(recurrenceIdTest, jobMetaData.getRecurrenceId()); }
public long getCount() { return delegate.countDocuments(); }
@Test void getCount() { final var collection = jacksonCollection("simple", Simple.class); assertThat(collection.count()).isEqualTo(0); collection.insert(new Simple(null, "foo")); assertThat(collection.count()).isEqualTo(1); collection.insert(new Simple(null, "bar")); assertThat(collection.count()).isEqualTo(2); assertThat(collection.count((DBObject) new BasicDBObject(Map.of("name", Map.of("$in", List.of("foo", "bar")))))) .isEqualTo(2); assertThat(collection.count((DBObject) new BasicDBObject("name", "bar"))).isOne(); collection.remove(DBQuery.empty()); assertThat(collection.count()).isEqualTo(0); }
static Properties resolveConsumerProperties(Map<String, String> options, Object keySchema, Object valueSchema) { Properties properties = from(options); withSerdeConsumerProperties(true, options, keySchema, properties); withSerdeConsumerProperties(false, options, valueSchema, properties); return properties; }
@Test public void when_consumerProperties_formatIsUnknown_then_itIsIgnored() { // key Map<String, String> keyOptions = Map.of(OPTION_KEY_FORMAT, UNKNOWN_FORMAT); assertThat(resolveConsumerProperties(keyOptions)).isEmpty(); // value Map<String, String> valueOptions = Map.of( OPTION_KEY_FORMAT, UNKNOWN_FORMAT, OPTION_VALUE_FORMAT, UNKNOWN_FORMAT ); assertThat(resolveConsumerProperties(valueOptions)).isEmpty(); }
public void parseStepParameter( Map<String, Map<String, Object>> allStepOutputData, Map<String, Parameter> workflowParams, Map<String, Parameter> stepParams, Parameter param, String stepId) { parseStepParameter( allStepOutputData, workflowParams, stepParams, param, stepId, new HashSet<>()); }
@Test public void testParseStepParameter() { StringParameter bar = StringParameter.builder().name("bar").expression("step1__foo + '-1';").build(); paramEvaluator.parseStepParameter( Collections.emptyMap(), Collections.emptyMap(), Collections.singletonMap( "foo", StringParameter.builder().evaluatedResult("123").evaluatedTime(123L).build()), bar, "step1"); assertEquals("123-1", bar.getEvaluatedResult()); }
public ValidationResult validateMessagesAndAssignOffsets(PrimitiveRef.LongRef offsetCounter, MetricsRecorder metricsRecorder, BufferSupplier bufferSupplier) { if (sourceCompressionType == CompressionType.NONE && targetCompression.type() == CompressionType.NONE) { // check the magic value if (!records.hasMatchingMagic(toMagic)) return convertAndAssignOffsetsNonCompressed(offsetCounter, metricsRecorder); else // Do in-place validation, offset assignment and maybe set timestamp return assignOffsetsNonCompressed(offsetCounter, metricsRecorder); } else return validateMessagesAndAssignOffsetsCompressed(offsetCounter, metricsRecorder, bufferSupplier); }
@Test public void testOffsetAssignmentAfterDownConversionV2ToV1Compressed() { long offset = 1234567; long now = System.currentTimeMillis(); Compression compression = Compression.gzip().build(); MemoryRecords records = createRecords(RecordBatch.MAGIC_VALUE_V2, now, compression); checkOffsets(records, 0); checkOffsets(new LogValidator( records, new TopicPartition("topic", 0), time, CompressionType.GZIP, compression, false, RecordBatch.MAGIC_VALUE_V1, TimestampType.CREATE_TIME, 5000L, 5000L, RecordBatch.NO_PARTITION_LEADER_EPOCH, AppendOrigin.CLIENT, MetadataVersion.latestTesting() ).validateMessagesAndAssignOffsets( PrimitiveRef.ofLong(offset), metricsRecorder, RequestLocal.withThreadConfinedCaching().bufferSupplier() ).validatedRecords, offset); }
public AstNode rewrite(final AstNode node, final C context) { return rewriter.process(node, context); }
@Test public void shouldRewriteQueryWithWindow() { // Given: final WindowExpression window = mock(WindowExpression.class); final WindowExpression rewrittenWindow = mock(WindowExpression.class); final Query query = givenQuery(Optional.of(window), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty()); when(mockRewriter.apply(window, context)).thenReturn(rewrittenWindow); // When: final AstNode rewritten = rewriter.rewrite(query, context); // Then: assertThat(rewritten, equalTo(new Query( location, rewrittenSelect, rewrittenRelation, Optional.of(rewrittenWindow), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(refinementInfo), false, optionalInt)) ); }
@Override public void start() { threadLocalSettings.load(); }
@Test public void start_calls_ThreadLocalSettings_load() { underTest.start(); verify(threadLocalSettings).load(); verifyNoMoreInteractions(threadLocalSettings); }
@Override public boolean trySetCapacity(int capacity) { return get(trySetCapacityAsync(capacity)); }
@Test public void testClearExpire() throws InterruptedException { RBoundedBlockingQueue<Integer> queue = redisson.getBoundedBlockingQueue("queue1"); queue.trySetCapacity(10); queue.add(1); queue.add(2); queue.expireAt(System.currentTimeMillis() + 100); queue.clearExpire(); Thread.sleep(500); assertThat(queue).containsExactly(1, 2); }
@Override public void connectorOffsets(String connName, Callback<ConnectorOffsets> cb) { ClusterConfigState configSnapshot = configBackingStore.snapshot(); try { if (!configSnapshot.contains(connName)) { cb.onCompletion(new NotFoundException("Connector " + connName + " not found"), null); return; } // The worker asynchronously processes the request and completes the passed callback when done worker.connectorOffsets(connName, configSnapshot.connectorConfig(connName), cb); } catch (Throwable t) { cb.onCompletion(t, null); } }
@Test public void testConnectorOffsetsConnectorNotFound() { when(configStore.snapshot()).thenReturn(SNAPSHOT); AbstractHerder herder = testHerder(); FutureCallback<ConnectorOffsets> cb = new FutureCallback<>(); herder.connectorOffsets("unknown-connector", cb); ExecutionException e = assertThrows(ExecutionException.class, () -> cb.get(1000, TimeUnit.MILLISECONDS)); assertEquals(NotFoundException.class, e.getCause().getClass()); }
public DeviceKeyId deviceKeyId() { String s = get(DEVICE_KEY_ID, null); return s == null ? null : DeviceKeyId.deviceKeyId(s); }
@Test public void testSetDeviceKeyId() { // change device key id SW_BDC.deviceKeyId(DEVICE_KEY_ID_NEW); assertEquals("Incorrect deviceKeyId", DEVICE_KEY_ID_NEW, SW_BDC.deviceKeyId()); // clear device key id SW_BDC.deviceKeyId(null); assertEquals("Incorrect deviceKeyId", null, SW_BDC.deviceKeyId()); }
@Override public String toString() { StringBuilder builder = new StringBuilder("AfterAll.of("); Joiner.on(", ").appendTo(builder, subTriggers); builder.append(")"); return builder.toString(); }
@Test public void testToString() { Trigger trigger = AfterAll.of(StubTrigger.named("t1"), StubTrigger.named("t2")); assertEquals("AfterAll.of(t1, t2)", trigger.toString()); }
public static String normalize(String url) { return normalize(url, false); }
@Test public void formatTest() { String url = "//www.hutool.cn//aaa/\\bbb?a=1&b=2"; String normalize = URLUtil.normalize(url); assertEquals("http://www.hutool.cn//aaa//bbb?a=1&b=2", normalize); }
@Override public Addresses loadAddresses(ClientConnectionProcessListenerRegistry listenerRunner) { List<String> configuredAddresses = networkConfig.getAddresses(); if (configuredAddresses.isEmpty()) { configuredAddresses.add("127.0.0.1"); } Addresses addresses = new Addresses(); List<Address> allAddresses = new ArrayList<>(); for (String address : configuredAddresses) { Addresses socketAddresses = AddressHelper.getSocketAddresses(address, listenerRunner); addresses.addAll(socketAddresses); } allAddresses.addAll(addresses.primary()); allAddresses.addAll(addresses.secondary()); listenerRunner.onPossibleAddressesCollected(allAddresses); return addresses; }
@Test public void whenNoAddresses() throws UnknownHostException { ClientNetworkConfig config = new ClientNetworkConfig(); DefaultAddressProvider provider = new DefaultAddressProvider(config, () -> false); Addresses addresses = provider.loadAddresses(createConnectionProcessListenerRunner()); assertPrimary(addresses, new Address("127.0.0.1", 5701)); assertSecondary(addresses, new Address("127.0.0.1", 5702), new Address("127.0.0.1", 5703)); }
public void register(Runnable shutdownFunction) { shutdownStack.push(shutdownFunction); }
@Test public void testOrdering() throws InterruptedException { ShutdownHook hook = new ShutdownHook(); CountDownLatch latchOne = new CountDownLatch(1); CountDownLatch latchTwo = new CountDownLatch(1); CountDownLatch latchThree = new CountDownLatch(1); hook.register(() -> { try { latchTwo.await(); latchThree.countDown(); } catch (InterruptedException e) { throw new RuntimeException(e); } }); hook.register(() -> { try { latchOne.await(); latchTwo.countDown(); } catch (InterruptedException e) { throw new RuntimeException(e); } }); hook.register(latchOne::countDown); CompletableFuture.runAsync(hook); boolean completed = latchThree.await(5_000L, TimeUnit.MILLISECONDS); assertThat("Last latch did not complete", completed, is(true)); }
@Override public List<Container> allocateContainers(ResourceBlacklistRequest blackList, List<ResourceRequest> oppResourceReqs, ApplicationAttemptId applicationAttemptId, OpportunisticContainerContext opportContext, long rmIdentifier, String appSubmitter) throws YarnException { // Update black list. updateBlacklist(blackList, opportContext); // Add OPPORTUNISTIC requests to the outstanding ones. opportContext.addToOutstandingReqs(oppResourceReqs); Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist()); Set<String> allocatedNodes = new HashSet<>(); List<Container> allocatedContainers = new ArrayList<>(); // Satisfy the outstanding OPPORTUNISTIC requests. boolean continueLoop = true; while (continueLoop) { continueLoop = false; List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>(); for (SchedulerRequestKey schedulerKey : opportContext.getOutstandingOpReqs().descendingKeySet()) { // Allocated containers : // Key = Requested Capability, // Value = List of Containers of given cap (the actual container size // might be different than what is requested, which is why // we need the requested capability (key) to match against // the outstanding reqs) int remAllocs = -1; int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat(); if (maxAllocationsPerAMHeartbeat > 0) { remAllocs = maxAllocationsPerAMHeartbeat - allocatedContainers.size() - getTotalAllocations(allocations); if (remAllocs <= 0) { LOG.info("Not allocating more containers as we have reached max " + "allocations per AM heartbeat {}", maxAllocationsPerAMHeartbeat); break; } } Map<Resource, List<Allocation>> allocation = allocate( rmIdentifier, opportContext, schedulerKey, applicationAttemptId, appSubmitter, nodeBlackList, allocatedNodes, remAllocs); if (allocation.size() > 0) { allocations.add(allocation); continueLoop = true; } } matchAllocation(allocations, allocatedContainers, opportContext); } return allocatedContainers; }
@Test public void testAllocationWithNodeLabels() throws Exception { ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance( new ArrayList<>(), new ArrayList<>()); List<ResourceRequest> reqs = Arrays.asList(ResourceRequest.newInstance(PRIORITY_NORMAL, "*", CAPABILITY_1GB, 1, true, "label", OPPORTUNISTIC_REQ)); ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(0L, 1), 1); oppCntxt.updateNodeList( Arrays.asList( RemoteNode.newInstance( NodeId.newInstance("h1", 1234), "h1:1234", "/r1"))); List<Container> containers = allocator.allocateContainers( blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser"); /* Since there is no node satisfying node label constraints, requests won't get fulfilled. */ Assert.assertEquals(0, containers.size()); Assert.assertEquals(1, oppCntxt.getOutstandingOpReqs().size()); oppCntxt.updateNodeList( Arrays.asList( RemoteNode.newInstance( NodeId.newInstance("h1", 1234), "h1:1234", "/r1", "label"))); containers = allocator.allocateContainers( blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser"); Assert.assertEquals(1, containers.size()); Assert.assertEquals(0, oppCntxt.getOutstandingOpReqs().size()); }
@Override public DeterministicKey getKey(KeyPurpose purpose) { return getKeys(purpose, 1).get(0); }
@Test public void signMessage() throws Exception { ECKey key = chain.getKey(KeyChain.KeyPurpose.RECEIVE_FUNDS); key.verifyMessage("test", key.signMessage("test")); }
public static ShorthandProjectionSegment bind(final ShorthandProjectionSegment segment, final TableSegment boundTableSegment, final Map<String, TableSegmentBinderContext> tableBinderContexts) { ShorthandProjectionSegment result = copy(segment); if (segment.getOwner().isPresent()) { expandVisibleColumns(getProjectionSegmentsByTableAliasOrName(tableBinderContexts, segment.getOwner().get().getIdentifier().getValue()), result); } else { expandNoOwnerProjections(boundTableSegment, tableBinderContexts, result); } return result; }
@Test void assertBindWithoutOwnerForJoinTableSegment() { ShorthandProjectionSegment shorthandProjectionSegment = new ShorthandProjectionSegment(0, 0); JoinTableSegment boundTableSegment = new JoinTableSegment(); boundTableSegment.getDerivedJoinTableProjectionSegments().add(new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("order_id")))); ShorthandProjectionSegment actual = ShorthandProjectionSegmentBinder.bind(shorthandProjectionSegment, boundTableSegment, Collections.emptyMap()); assertThat(actual.getActualProjectionSegments().size(), is(1)); ProjectionSegment visibleColumn = actual.getActualProjectionSegments().iterator().next(); assertThat(visibleColumn.getColumnLabel(), is("order_id")); assertTrue(visibleColumn.isVisible()); }
@Override public boolean getBoolean(int index) { checkIndex(index, 1); return buffer.getBoolean(index); }
@Test public void testGetBoolean() { ByteBuf buf = Unpooled.buffer(10); while (buf.isWritable()) { buf.writeBoolean(true); } ReplayingDecoderByteBuf buffer = new ReplayingDecoderByteBuf(buf); boolean error; int i = 0; try { for (;;) { buffer.getBoolean(i); i++; } } catch (Signal e) { error = true; } assertTrue(error); assertEquals(10, i); buf.release(); }
public synchronized NumaResourceAllocation allocateNumaNodes( Container container) throws ResourceHandlerException { NumaResourceAllocation allocation = allocate(container.getContainerId(), container.getResource()); if (allocation != null) { try { // Update state store. context.getNMStateStore().storeAssignedResources(container, NUMA_RESOURCE_TYPE, Arrays.asList(allocation)); } catch (IOException e) { releaseNumaResource(container.getContainerId()); throw new ResourceHandlerException(e); } } return allocation; }
@Test public void testAllocateNumaNodeWithRoundRobinFashionAssignment() throws Exception { NumaResourceAllocation nodeInfo1 = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000001"), Resource.newInstance(2048, 2))); Assert.assertEquals("0", String.join(",", nodeInfo1.getMemNodes())); Assert.assertEquals("0", String.join(",", nodeInfo1.getCpuNodes())); NumaResourceAllocation nodeInfo2 = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000002"), Resource.newInstance(2048, 2))); Assert.assertEquals("1", String.join(",", nodeInfo2.getMemNodes())); Assert.assertEquals("1", String.join(",", nodeInfo2.getCpuNodes())); NumaResourceAllocation nodeInfo3 = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000003"), Resource.newInstance(2048, 2))); Assert.assertEquals("0", String.join(",", nodeInfo3.getMemNodes())); Assert.assertEquals("0", String.join(",", nodeInfo3.getCpuNodes())); NumaResourceAllocation nodeInfo4 = numaResourceAllocator .allocateNumaNodes(getContainer( ContainerId.fromString("container_1481156246874_0001_01_000003"), Resource.newInstance(2048, 2))); Assert.assertEquals("1", String.join(",", nodeInfo4.getMemNodes())); Assert.assertEquals("1", String.join(",", nodeInfo4.getCpuNodes())); }
public AlertResult send(String content) { AlertResult alertResult = new AlertResult(); String url; try { if (WeChatType.APP.getValue().equals(wechatParams.getSendType())) { String token = getToken(); assert token != null; url = String.format(WeChatConstants.WECHAT_PUSH_URL, wechatParams.getSendUrl(), token); } else { url = wechatParams.getWebhook(); } return checkWeChatSendMsgResult(HttpUtils.post(url, content)); } catch (Exception e) { logger.error("send we chat alert msg exception : {}", e.getMessage()); alertResult.setMessage("send we chat alert fail"); alertResult.setSuccess(false); } return alertResult; }
@Ignore @Test public void testSendAPPMarkDownMsg() { WeChatAlert weChatAlert = new WeChatAlert(); AlertConfig alertConfig = new AlertConfig(); weChatConfig.put(WeChatConstants.SEND_TYPE, WeChatType.APP.getValue()); alertConfig.setType("WeChat"); alertConfig.setParam(weChatConfig); weChatAlert.setConfig(alertConfig); AlertResult alertResult = weChatAlert.send(AlertBaseConstant.ALERT_TEMPLATE_TITLE, AlertBaseConstant.ALERT_TEMPLATE_MSG); Assert.assertEquals(true, alertResult.getSuccess()); }
public MessagesRequestSpec simpleQueryParamsToFullRequestSpecification(final String query, final Set<String> streams, final String timerangeKeyword, final List<String> fields, final String sort, final SortSpec.Direction sortOrder, final int from, final int size) { return new MessagesRequestSpec(query, streams, timerangeParser.parseTimeRange(timerangeKeyword), sort, sortOrder, from, size, fields); }
@Test void createsProperRequestSpec() { doReturn(KeywordRange.create("last 1 day", "UTC")).when(timerangeParser).parseTimeRange("1d"); final AggregationRequestSpec aggregationRequestSpec = toTest.simpleQueryParamsToFullRequestSpecification("http_method:GET", Set.of("000000000000000000000001"), "1d", List.of("http_method", "controller"), List.of("avg:took_ms")); assertThat(aggregationRequestSpec).isEqualTo(new AggregationRequestSpec( "http_method:GET", Set.of("000000000000000000000001"), KeywordRange.create("last 1 day", "UTC"), List.of(new Grouping("http_method"), new Grouping("controller")), List.of(new Metric("avg", "took_ms")) ) ); }
@Override public List<KinesisSource> split(int desiredNumSplits, PipelineOptions options) throws Exception { List<KinesisSource> sources = new ArrayList<>(); KinesisReaderCheckpoint checkpoint; // in case split() is called upon existing checkpoints for further splitting: if (this.initialCheckpoint != null) { checkpoint = this.initialCheckpoint; } // in case a new checkpoint is created from scratch: else { AwsOptions awsOptions = options.as(AwsOptions.class); ClientConfiguration config = spec.getClientConfiguration(); try (KinesisClient client = buildClient(awsOptions, KinesisClient.builder(), config)) { checkpoint = generateInitCheckpoint(spec, client); } } for (KinesisReaderCheckpoint partition : checkpoint.splitInto(desiredNumSplits)) { sources.add(new KinesisSource(spec, partition)); } return sources; }
@Test public void testSplitGeneratesCorrectNumberOfSources() throws Exception { mockShards(kinesisClient, 3); KinesisSource source = sourceWithMockedKinesisClient(spec()); assertThat(source.split(1, options).size()).isEqualTo(1); assertThat(source.split(2, options).size()).isEqualTo(2); assertThat(source.split(3, options).size()).isEqualTo(3); // there are only 3 shards, no more than 3 splits can be created assertThat(source.split(4, options).size()).isEqualTo(3); }
public void process() throws Exception { if (_segmentMetadata.getTotalDocs() == 0) { LOGGER.info("Skip preprocessing empty segment: {}", _segmentMetadata.getName()); return; } // Segment processing has to be done with a local directory. File indexDir = new File(_indexDirURI); // This fixes the issue of temporary files not getting deleted after creating new inverted indexes. removeInvertedIndexTempFiles(indexDir); try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) { // Update default columns according to the schema. if (_schema != null) { DefaultColumnHandler defaultColumnHandler = DefaultColumnHandlerFactory .getDefaultColumnHandler(indexDir, _segmentMetadata, _indexLoadingConfig, _schema, segmentWriter); defaultColumnHandler.updateDefaultColumns(); _segmentMetadata = new SegmentMetadataImpl(indexDir); _segmentDirectory.reloadMetadata(); } else { LOGGER.warn("Skip creating default columns for segment: {} without schema", _segmentMetadata.getName()); } // Update single-column indices, like inverted index, json index etc. List<IndexHandler> indexHandlers = new ArrayList<>(); // We cannot just create all the index handlers in a random order. // Specifically, ForwardIndexHandler needs to be executed first. This is because it modifies the segment metadata // while rewriting forward index to create a dictionary. Some other handlers (like the range one) assume that // metadata was already been modified by ForwardIndexHandler. IndexHandler forwardHandler = createHandler(StandardIndexes.forward()); indexHandlers.add(forwardHandler); forwardHandler.updateIndices(segmentWriter); // Now that ForwardIndexHandler.updateIndices has been updated, we can run all other indexes in any order _segmentMetadata = new SegmentMetadataImpl(indexDir); _segmentDirectory.reloadMetadata(); for (IndexType<?, ?, ?> type : IndexService.getInstance().getAllIndexes()) { if (type != StandardIndexes.forward()) { IndexHandler handler = createHandler(type); indexHandlers.add(handler); handler.updateIndices(segmentWriter); // Other IndexHandler classes may modify the segment metadata while creating a temporary forward // index to generate their respective indexes from if the forward index was disabled. This new metadata is // needed to construct other indexes like RangeIndex. _segmentMetadata = _segmentDirectory.getSegmentMetadata(); } } // Perform post-cleanup operations on the index handlers. for (IndexHandler handler : indexHandlers) { handler.postUpdateIndicesCleanup(segmentWriter); } // Add min/max value to column metadata according to the prune mode. ColumnMinMaxValueGeneratorMode columnMinMaxValueGeneratorMode = _indexLoadingConfig.getColumnMinMaxValueGeneratorMode(); if (columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE) { ColumnMinMaxValueGenerator columnMinMaxValueGenerator = new ColumnMinMaxValueGenerator(_segmentMetadata, segmentWriter, columnMinMaxValueGeneratorMode); columnMinMaxValueGenerator.addColumnMinMaxValue(); // NOTE: This step may modify the segment metadata. When adding new steps after this, un-comment the next line. // _segmentMetadata = new SegmentMetadataImpl(indexDir); } segmentWriter.save(); } // Startree creation will load the segment again, so we need to close and re-open the segment writer to make sure // that the other required indices (e.g. forward index) are up-to-date. try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) { // Create/modify/remove star-trees if required. processStarTrees(indexDir); _segmentDirectory.reloadMetadata(); segmentWriter.save(); } }
@Test public void testV3CleanupIndices() throws Exception { constructV3Segment(); SegmentMetadataImpl segmentMetadata = new SegmentMetadataImpl(_indexDir); assertEquals(segmentMetadata.getVersion(), SegmentVersion.v3); // V3 use single file for all column indices. File segmentDirectoryPath = SegmentDirectoryPaths.segmentDirectoryFor(_indexDir, SegmentVersion.v3); File singleFileIndex = new File(segmentDirectoryPath, "columns.psf"); // There are a few indices initially. Remove them to prepare an initial state. long initFileSize = singleFileIndex.length(); try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, getDefaultIndexLoadingConfig(), null)) { processor.process(); } assertTrue(singleFileIndex.length() < initFileSize); initFileSize = singleFileIndex.length(); // Need to create two default columns with Bytes and JSON string for H3 and JSON index. // Other kinds of indices can all be put on column3 with String values. String strColumn = "column3"; IndexLoadingConfig indexLoadingConfig = getDefaultIndexLoadingConfig(); indexLoadingConfig.setInvertedIndexColumns(new HashSet<>(Collections.singletonList(strColumn))); indexLoadingConfig.setRangeIndexColumns(new HashSet<>(Collections.singletonList(strColumn))); indexLoadingConfig.setTextIndexColumns(new HashSet<>(Collections.singletonList(strColumn))); indexLoadingConfig.setFSTIndexColumns(new HashSet<>(Collections.singletonList(strColumn))); indexLoadingConfig.setBloomFilterConfigs(ImmutableMap.of(strColumn, new BloomFilterConfig(0.1, 1024, true))); // Create all kinds of indices. try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, indexLoadingConfig, null)) { processor.process(); } long addedLength = 0; try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentDirectory.Reader reader = segmentDirectory.createReader()) { addedLength += reader.getIndexFor(strColumn, StandardIndexes.inverted()).size() + 8; addedLength += reader.getIndexFor(strColumn, StandardIndexes.range()).size() + 8; addedLength += reader.getIndexFor(strColumn, StandardIndexes.fst()).size() + 8; addedLength += reader.getIndexFor(strColumn, StandardIndexes.bloomFilter()).size() + 8; assertTrue(reader.hasIndexFor(strColumn, StandardIndexes.text())); } assertEquals(singleFileIndex.length(), initFileSize + addedLength); // Remove all kinds of indices, and size gets back initial size. try (SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentPreProcessor processor = new SegmentPreProcessor(segmentDirectory, getDefaultIndexLoadingConfig(), null)) { processor.process(); } assertEquals(singleFileIndex.length(), initFileSize); }
public static RpcClient createClient(String clientName, ConnectionType connectionType, Map<String, String> labels) { return createClient(clientName, connectionType, null, null, labels); }
@Test void testCreatedClientWhenConnectionTypeNotMappingThenThrowException() { assertThrows(Exception.class, () -> { RpcClientFactory.createClient("testClient", mock(ConnectionType.class), Collections.singletonMap("labelKey", "labelValue")); }); }
@VisibleForTesting static EnumSet<MetricType> parseMetricTypes(String typeComponent) { final String[] split = typeComponent.split(LIST_DELIMITER); if (split.length == 1 && split[0].equals("*")) { return ALL_METRIC_TYPES; } return EnumSet.copyOf( Arrays.stream(split) .map(s -> ConfigurationUtils.convertToEnum(s, MetricType.class)) .collect(Collectors.toSet())); }
@Test void testParseMetricTypesMultiple() { final EnumSet<MetricType> types = DefaultMetricFilter.parseMetricTypes("meter,counter"); assertThat(types).containsExactlyInAnyOrder(MetricType.METER, MetricType.COUNTER); }
@Udf(description = "Returns the cotangent of an INT value") public Double cot( @UdfParameter( value = "value", description = "The value in radians to get the cotangent of." ) final Integer value ) { return cot(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleLessThanNegative2Pi() { assertThat(udf.cot(-9.1), closeTo(2.9699983263892054, 0.000000000000001)); assertThat(udf.cot(-6.3), closeTo(-59.46619211372627, 0.000000000000001)); assertThat(udf.cot(-7), closeTo(-1.1475154224051356, 0.000000000000001)); assertThat(udf.cot(-7L), closeTo(-1.1475154224051356, 0.000000000000001)); }
@VisibleForTesting List<MappingRule> getMappingRules(MappingRulesDescription rules) { List<MappingRule> mappingRules = new ArrayList<>(); for (Rule rule : rules.getRules()) { checkMandatoryParameters(rule); MappingRuleMatcher matcher = createMatcher(rule); MappingRuleAction action = createAction(rule); setFallbackToAction(rule, action); MappingRule mappingRule = new MappingRule(matcher, action); mappingRules.add(mappingRule); } return mappingRules; }
@Test public void testTypeUnset() { rule.setType(null); expected.expect(IllegalArgumentException.class); expected.expectMessage("Rule type is undefined"); ruleCreator.getMappingRules(description); }
public Optional<String> findGenerateKeyColumnName(final String logicTableName) { return Optional.ofNullable(shardingTables.get(logicTableName)).filter(each -> each.getGenerateKeyColumn().isPresent()).flatMap(ShardingTable::getGenerateKeyColumn); }
@Test void assertNotFindGenerateKeyColumn() { assertFalse(createMinimumShardingRule().findGenerateKeyColumnName("sub_logic_table").isPresent()); }
public SplitBrainMergePolicy getMergePolicy(String className) { if (className == null) { throw new InvalidConfigurationException("Class name is mandatory!"); } return getOrPutIfAbsent(mergePolicyMap, className, policyConstructorFunction); }
@Test public void getMergePolicy_withNullPolicy() { assertThatThrownBy(() -> mergePolicyProvider.getMergePolicy(null)) .isInstanceOf(InvalidConfigurationException.class); }
public void register(Class<?> cls) { if (!extRegistry.registeredClassIdMap.containsKey(cls)) { while (extRegistry.classIdGenerator < registeredId2ClassInfo.length && registeredId2ClassInfo[extRegistry.classIdGenerator] != null) { extRegistry.classIdGenerator++; } register(cls, extRegistry.classIdGenerator); } }
@Test public void testRegisterClass() { Fury fury = Fury.builder().withLanguage(Language.JAVA).requireClassRegistration(false).build(); ClassResolver classResolver = fury.getClassResolver(); classResolver.register(org.apache.fury.test.bean.Foo.class); Assert.assertThrows( IllegalArgumentException.class, () -> classResolver.register(org.apache.fury.test.bean.Foo.class, 100)); Assert.assertThrows( IllegalArgumentException.class, () -> classResolver.register(org.apache.fury.test.bean.Foo.createCompatibleClass1())); classResolver.register(Interface1.class, 200); Assert.assertThrows( IllegalArgumentException.class, () -> classResolver.register(Interface2.class, 200)); }
public static byte[] tryCompress(String str, String encoding) { if (str == null || str.length() == 0) { return new byte[0]; } ByteArrayOutputStream out = new ByteArrayOutputStream(); try (GZIPOutputStream gzip = new GZIPOutputStream(out)) { gzip.write(str.getBytes(encoding)); } catch (Exception e) { e.printStackTrace(); } return out.toByteArray(); }
@Test void testTryCompressWithEmptyString() { assertEquals(0, IoUtils.tryCompress("", "UTF-8").length); assertEquals(0, IoUtils.tryCompress(null, "UTF-8").length); }
@ProcessElement public void processElement( @Element KV<ByteString, ChangeStreamRecord> changeStreamRecordKV, OutputReceiver<KV<ByteString, ChangeStreamMutation>> receiver) { ChangeStreamRecord inputRecord = changeStreamRecordKV.getValue(); if (inputRecord instanceof ChangeStreamMutation) { receiver.output(KV.of(changeStreamRecordKV.getKey(), (ChangeStreamMutation) inputRecord)); } }
@Test public void shouldNotOutputHeartbeats() { Heartbeat heartbeat = mock(Heartbeat.class); doFn.processElement(KV.of(ByteString.copyFromUtf8("test"), heartbeat), outputReceiver); verify(outputReceiver, never()).output(any()); }
public DeterministicSeed getKeyChainSeed() { keyChainGroupLock.lock(); try { DeterministicSeed seed = keyChainGroup.getActiveKeyChain().getSeed(); if (seed == null) throw new ECKey.MissingPrivateKeyException(); return seed; } finally { keyChainGroupLock.unlock(); } }
@Test public void checkSeed() throws MnemonicException { wallet.getKeyChainSeed().check(); }
public static Timestamp toTimestamp(BigDecimal bigDecimal) { final BigDecimal nanos = bigDecimal.remainder(BigDecimal.ONE.scaleByPowerOfTen(9)); final BigDecimal seconds = bigDecimal.subtract(nanos).scaleByPowerOfTen(-9).add(MIN_SECONDS); return Timestamp.ofTimeSecondsAndNanos(seconds.longValue(), nanos.intValue()); }
@Test(expected = IllegalArgumentException.class) public void testToTimestampThrowsExceptionWhenThereIsAnOverflow() { TimestampUtils.toTimestamp(new BigDecimal("315537897600000000000")); }
@Override public boolean containsAll(Collection<?> c) { for (Object o : c) { if (!contains(o)) return false; } return true; }
@Test void testContainsAll() { RangeSet rangeSet = new RangeSet(5, 10); assertTrue(rangeSet.containsAll(mkSet(5, 6, 7, 8, 9))); assertFalse(rangeSet.containsAll(mkSet(5, 6, 10))); }
@Operation(summary = "Resolve SAML artifact") @PostMapping(value = {"/backchannel/saml/v4/entrance/resolve_artifact", "/backchannel/saml/v4/idp/resolve_artifact"}) public ResponseEntity resolveArtifact(HttpServletRequest request, HttpServletResponse response) throws SamlParseException { try { final var artifactResolveRequest = artifactResolveService.startArtifactResolveProcess(request); artifactResponseService.generateResponse(response, artifactResolveRequest); return new ResponseEntity(HttpStatus.OK); } catch (ClassCastException ex) { return new ResponseEntity(HttpStatus.BAD_REQUEST); } }
@Test void successfullResolveArtifactTest() throws SamlParseException { ArtifactResolveRequest artifactResolveRequest = new ArtifactResolveRequest(); when(artifactResolveServiceMock.startArtifactResolveProcess(any(HttpServletRequest.class))).thenReturn(artifactResolveRequest); artifactController.resolveArtifact(httpServletRequestMock, httpServletResponseMock); verify(artifactResolveServiceMock, times(1)).startArtifactResolveProcess(any(HttpServletRequest.class)); verify(artifactResponseServiceMock, times(1)).generateResponse(any(HttpServletResponse.class), any(ArtifactResolveRequest.class)); }
@Udf public String concatWS( @UdfParameter(description = "Separator string and values to join") final String... inputs) { if (inputs == null || inputs.length < 2) { throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments."); } final String separator = inputs[0]; if (separator == null) { return null; } return Arrays.stream(inputs, 1, inputs.length) .filter(Objects::nonNull) .collect(Collectors.joining(separator)); }
@Test public void shouldReturnSingleInputUnchanged() { assertThat(udf.concatWS("SEP", "singular"), is("singular")); assertThat(udf.concatWS(ByteBuffer.wrap(new byte[] {1}), ByteBuffer.wrap(new byte[] {2})), is(ByteBuffer.wrap(new byte[] {2}))); }
public void flatten() throws IOException { // for dynamic XFA forms there is no flatten as this would mean to do a rendering // from the XFA content into a static PDF. if (xfaIsDynamic()) { LOG.warn("Flatten for a dynamix XFA form is not supported"); return; } List<PDField> fields = new ArrayList<>(); for (PDField field: getFieldTree()) { fields.add(field); } flatten(fields, false); }
@Test void testFlatten() throws IOException { File file = new File(OUT_DIR, "AlignmentTests-flattened.pdf"); try (PDDocument testPdf = Loader.loadPDF(new File(IN_DIR, "AlignmentTests.pdf"))) { testPdf.getDocumentCatalog().getAcroForm().flatten(); assertTrue(testPdf.getDocumentCatalog().getAcroForm().getFields().isEmpty()); testPdf.save(file); } // compare rendering if (!TestPDFToImage.doTestFile(file, IN_DIR.getAbsolutePath(), OUT_DIR.getAbsolutePath())) { // don't fail, rendering is different on different systems, result must be viewed manually System.out.println("Rendering of " + file + " failed or is not identical to expected rendering in " + IN_DIR + " directory"); } }
@SuppressWarnings({"checkstyle:CyclomaticComplexity", "checkstyle:FinalParameters"}) protected static int send( final String customerId, final byte[] bytes, final HttpPost httpPost, final HttpHost proxy, CloseableHttpClient httpClient, final ResponseHandler responseHandler ) { int statusCode = DEFAULT_STATUS_CODE; if (bytes != null && bytes.length > 0 && httpPost != null && customerId != null) { // add the body to the request final MultipartEntityBuilder builder = MultipartEntityBuilder.create(); builder.setMode(HttpMultipartMode.LEGACY); builder.addTextBody("cid", customerId); builder.addBinaryBody("file", bytes, ContentType.DEFAULT_BINARY, "filename"); httpPost.setEntity(builder.build()); httpPost.addHeader("api-version", "phone-home-v1"); // set the HTTP config RequestConfig config = RequestConfig.custom() .setConnectTimeout(Timeout.ofMilliseconds(REQUEST_TIMEOUT_MS)) .setConnectionRequestTimeout(Timeout.ofMilliseconds(REQUEST_TIMEOUT_MS)) .setResponseTimeout(Timeout.ofMilliseconds(REQUEST_TIMEOUT_MS)) .build(); CloseableHttpResponse response = null; try { if (proxy != null) { log.debug("setting proxy to {}", proxy); config = RequestConfig.copy(config).setProxy(proxy).build(); httpPost.setConfig(config); final DefaultProxyRoutePlanner routePlanner = new DefaultProxyRoutePlanner(proxy); if (httpClient == null) { httpClient = HttpClientBuilder .create() .setRoutePlanner(routePlanner) .setDefaultRequestConfig(config) .build(); } } else { if (httpClient == null) { httpClient = HttpClientBuilder.create().setDefaultRequestConfig(config).build(); } } response = httpClient.execute(httpPost); if (responseHandler != null) { responseHandler.handle(response); } // send request log.debug("POST request returned {}", new StatusLine(response).toString()); statusCode = response.getCode(); } catch (IOException e) { log.error("Could not submit metrics to Confluent: {}", e.getMessage()); } finally { if (httpClient != null) { try { httpClient.close(); } catch (IOException e) { log.warn("could not close http client", e); } } if (response != null) { try { response.close(); } catch (IOException e) { log.warn("could not close http response", e); } } } } else { statusCode = HttpStatus.SC_BAD_REQUEST; } return statusCode; }
@Test public void testSubmitIgnoresNullInput() { // Given HttpPost p = mock(HttpPost.class); byte[] nullData = null; // When WebClient.send(customerId, nullData, p, null); // Then verifyNoMoreInteractions(p); }
@Override public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException { if(status.getLength() == TransferStatus.UNKNOWN_LENGTH) { return new Write.Append(false).withStatus(status); } return new Write.Append(status.isExists()).withStatus(status); }
@Test public void testAppendZeroBytes() throws Exception { final DAVUploadFeature feature = new DAVUploadFeature(session); final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(feature.append(test, new TransferStatus().exists(true).withLength(0L).withRemote(new DAVAttributesFinderFeature(session).find(test))).append); new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public Object evaluate(final ProcessingDTO processingDTO) { Number input = (Number) getFromPossibleSources(name, processingDTO) .orElse(mapMissingTo); if (input == null) { throw new KiePMMLException("Failed to retrieve input number for " + name); } return evaluate(input); }
@Test void evaluate() { String fieldName = "fieldName"; Number input = 24; KiePMMLNormContinuous kiePMMLNormContinuous = getKiePMMLNormContinuous(fieldName, null, null); ProcessingDTO processingDTO = new ProcessingDTO(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.singletonList(new KiePMMLNameValue(fieldName, input)), Collections.emptyList(), Collections.emptyList()); Number retrieved = (Number) kiePMMLNormContinuous.evaluate(processingDTO); Number expected = kiePMMLNormContinuous.linearNorms.get(0).getNorm() + ((input.doubleValue() - kiePMMLNormContinuous.linearNorms.get(0).getOrig()) / (kiePMMLNormContinuous.linearNorms.get(1).getOrig() - kiePMMLNormContinuous.linearNorms.get(0).getOrig())) * (kiePMMLNormContinuous.linearNorms.get(1).getNorm() - kiePMMLNormContinuous.linearNorms.get(0).getNorm()); assertThat(retrieved).isEqualTo(expected); }
public static CharSequence join(CharSequence separator, Iterable<? extends CharSequence> elements) { ObjectUtil.checkNotNull(separator, "separator"); ObjectUtil.checkNotNull(elements, "elements"); Iterator<? extends CharSequence> iterator = elements.iterator(); if (!iterator.hasNext()) { return EMPTY_STRING; } CharSequence firstElement = iterator.next(); if (!iterator.hasNext()) { return firstElement; } StringBuilder builder = new StringBuilder(firstElement); do { builder.append(separator).append(iterator.next()); } while (iterator.hasNext()); return builder; }
@Test public void testJoin() { assertEquals("", StringUtil.join(",", Collections.<CharSequence>emptyList()).toString()); assertEquals("a", StringUtil.join(",", Collections.singletonList("a")).toString()); assertEquals("a,b", StringUtil.join(",", Arrays.asList("a", "b")).toString()); assertEquals("a,b,c", StringUtil.join(",", Arrays.asList("a", "b", "c")).toString()); assertEquals("a,b,c,null,d", StringUtil.join(",", Arrays.asList("a", "b", "c", null, "d")).toString()); }
@Override public void configureSsl(ConnectorSsl ssl, String name, int port) { TlsContext tlsContext = getTlsContext(name, port); SSLParameters parameters = tlsContext.parameters(); ssl.setSslContext(tlsContext.sslContext().context()); ssl.setEnabledProtocolVersions(List.of(parameters.getProtocols())); ssl.setEnabledCipherSuites(List.of(parameters.getCipherSuites())); if (parameters.getNeedClientAuth()) { ssl.setClientAuth(ConnectorSsl.ClientAuth.NEED); } else if (parameters.getWantClientAuth()) { ssl.setClientAuth(ConnectorSsl.ClientAuth.WANT); } else { ssl.setClientAuth(ConnectorSsl.ClientAuth.DISABLED); } }
@Test void creates_sslcontextfactory_from_tlscontext() { TlsContext tlsContext = createTlsContext(); var provider = new SimpleTlsContextBasedProvider(tlsContext); DefaultConnectorSsl ssl = new DefaultConnectorSsl(); provider.configureSsl(ssl, "dummyContainerId", 8080); assertArrayEquals(tlsContext.parameters().getCipherSuites(), ssl.createSslContextFactory().getIncludeCipherSuites()); }
@Nonnull public static <T> Traverser<T> traverseArray(@Nonnull T[] array) { return new ArrayTraverser<>(array); }
@Test public void when_traverseArrayWithNull_then_skipNulls() { Traverser<Integer> trav = traverseArray(new Integer[] {1, null, 2}); assertEquals(1, (int) trav.next()); assertEquals(2, (int) trav.next()); assertNull(trav.next()); }
public String generatePreview(List<TextAnswer> reviewTextAnswers) { if (reviewTextAnswers.isEmpty()) { return ""; } String answer = reviewTextAnswers.get(0).getContent(); if (answer.length() > PREVIEW_LENGTH) { return answer.substring(0, PREVIEW_LENGTH); } return answer; }
@Test void 답변_내용이_미리보기_최대_글자를_넘는_경우_미리보기_길이만큼_잘라서_반환한다() { // given ReviewPreviewGenerator reviewPreviewGenerator = new ReviewPreviewGenerator(); String answer = "*".repeat(151); TextAnswer textAnswer = new TextAnswer(1, answer); // when String actual = reviewPreviewGenerator.generatePreview(List.of(textAnswer)); // then assertThat(actual).hasSize(150); }
public static Object convertValue(final Object value, final Class<?> convertType) throws SQLFeatureNotSupportedException { ShardingSpherePreconditions.checkNotNull(convertType, () -> new SQLFeatureNotSupportedException("Type can not be null")); if (null == value) { return convertNullValue(convertType); } if (value.getClass() == convertType) { return value; } if (value instanceof LocalDateTime) { return convertLocalDateTimeValue((LocalDateTime) value, convertType); } if (value instanceof Timestamp) { return convertTimestampValue((Timestamp) value, convertType); } if (URL.class.equals(convertType)) { return convertURL(value); } if (value instanceof Number) { return convertNumberValue(value, convertType); } if (value instanceof Date) { return convertDateValue((Date) value, convertType); } if (value instanceof byte[]) { return convertByteArrayValue((byte[]) value, convertType); } if (boolean.class.equals(convertType)) { return convertBooleanValue(value); } if (String.class.equals(convertType)) { return value.toString(); } try { return convertType.cast(value); } catch (final ClassCastException ignored) { throw new SQLFeatureNotSupportedException("getObject with type"); } }
@Test void assertConvertValue() throws SQLException { Object object = new Object(); assertThat(ResultSetUtils.convertValue(object, String.class), is(object.toString())); }
public List<StreamTestMatch> testMatch(Message message) { final List<StreamTestMatch> matches = Lists.newArrayList(); for (final Stream stream : streams) { final StreamTestMatch match = new StreamTestMatch(stream); for (final StreamRule streamRule : stream.getStreamRules()) { try { final Rule rule = new Rule(stream, streamRule, stream.getMatchingType()); match.addRule(rule); } catch (InvalidStreamRuleTypeException e) { LOG.warn("Invalid stream rule type. Skipping matching for this rule. " + e.getMessage(), e); } } match.matchMessage(message); matches.add(match); } return matches; }
@Test public void testOrTestMatch() throws Exception { final StreamMock stream = getStreamMock("test", Stream.MatchingType.OR); final StreamRuleMock rule1 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield1", "type", StreamRuleType.PRESENCE.toInteger(), "stream_id", stream.getId() )); final StreamRuleMock rule2 = new StreamRuleMock(ImmutableMap.of( "_id", new ObjectId(), "field", "testfield2", "value", "^test", "type", StreamRuleType.REGEX.toInteger(), "stream_id", stream.getId() )); stream.setStreamRules(Lists.newArrayList(rule1, rule2)); final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream)); // Without testfield1 and testfield2 in the message. final Message message1 = getMessage(); final StreamRouterEngine.StreamTestMatch testMatch1 = engine.testMatch(message1).get(0); final Map<StreamRule, Boolean> matches1 = testMatch1.getMatches(); assertFalse(testMatch1.isMatched()); assertFalse(matches1.get(rule1)); assertFalse(matches1.get(rule2)); // With testfield1 but no-matching testfield2 in the message. final Message message2 = getMessage(); message2.addField("testfield1", "testvalue"); message2.addField("testfield2", "no-testvalue"); final StreamRouterEngine.StreamTestMatch testMatch2 = engine.testMatch(message2).get(0); final Map<StreamRule, Boolean> matches2 = testMatch2.getMatches(); assertTrue(testMatch2.isMatched()); assertTrue(matches2.get(rule1)); assertFalse(matches2.get(rule2)); // With testfield1 and matching testfield2 in the message. final Message message3 = getMessage(); message3.addField("testfield1", "testvalue"); message3.addField("testfield2", "testvalue2"); final StreamRouterEngine.StreamTestMatch testMatch3 = engine.testMatch(message3).get(0); final Map<StreamRule, Boolean> matches3 = testMatch3.getMatches(); assertTrue(testMatch3.isMatched()); assertTrue(matches3.get(rule1)); assertTrue(matches3.get(rule2)); }
public Annotator getAnnotator(AnnotationStyle style) { switch (style) { case JACKSON: case JACKSON2: return new Jackson2Annotator(generationConfig); case JSONB1: return new Jsonb1Annotator(generationConfig); case JSONB2: return new Jsonb2Annotator(generationConfig); case GSON: return new GsonAnnotator(generationConfig); case MOSHI1: return new Moshi1Annotator(generationConfig); case NONE: return new NoopAnnotator(); default: throw new IllegalArgumentException("Unrecognised annotation style: " + style); } }
@Test public void canCreateCorrectAnnotatorFromClass() { assertThat(factory.getAnnotator(Jackson2Annotator.class), is(instanceOf(Jackson2Annotator.class))); }
public static ReadChangeStream readChangeStream() { return ReadChangeStream.create(); }
@Test public void testReadChangeStreamFailsValidation() { BigtableIO.ReadChangeStream readChangeStream = BigtableIO.readChangeStream() .withProjectId("project") .withInstanceId("instance") .withTableId("table"); // Validating table fails because table does not exist. thrown.expect(IllegalArgumentException.class); readChangeStream.validate(TestPipeline.testingPipelineOptions()); }
@PUT @Path("{id}") @Timed @ApiOperation(value = "Update index set") @AuditEvent(type = AuditEventTypes.INDEX_SET_UPDATE) @ApiResponses(value = { @ApiResponse(code = 403, message = "Unauthorized"), @ApiResponse(code = 409, message = "Mismatch of IDs in URI path and payload"), }) public IndexSetSummary update(@ApiParam(name = "id", required = true) @PathParam("id") String id, @ApiParam(name = "Index set configuration", required = true) @Valid @NotNull IndexSetUpdateRequest updateRequest) { checkPermission(RestPermissions.INDEXSETS_EDIT, id); final IndexSetConfig oldConfig = indexSetService.get(id) .orElseThrow(() -> new NotFoundException("Index set <" + id + "> not found")); final IndexSetConfig defaultIndexSet = indexSetService.getDefault(); final boolean isDefaultSet = oldConfig.equals(defaultIndexSet); if (isDefaultSet && !updateRequest.isWritable()) { throw new ClientErrorException("Default index set must be writable.", Response.Status.CONFLICT); } checkDataTieringNotNull(updateRequest.useLegacyRotation(), updateRequest.dataTieringConfig()); final IndexSetConfig indexSetConfig = updateRequest.toIndexSetConfig(id, oldConfig); final Optional<Violation> violation = indexSetValidator.validate(indexSetConfig); if (violation.isPresent()) { throw new BadRequestException(violation.get().message()); } final IndexSetConfig savedObject = indexSetService.save(indexSetConfig); return IndexSetSummary.fromIndexSetConfig(savedObject, isDefaultSet); }
@Test public void update() { final IndexSetConfig indexSetConfig = IndexSetConfig.create( "id", "new title", "description", true, false, "prefix", 1, 0, MessageCountRotationStrategy.class.getCanonicalName(), MessageCountRotationStrategyConfig.create(1000), NoopRetentionStrategy.class.getCanonicalName(), NoopRetentionStrategyConfig.create(1), ZonedDateTime.of(2016, 10, 10, 12, 0, 0, 0, ZoneOffset.UTC), "standard", "index-template", null, 1, false ); final IndexSetConfig updatedIndexSetConfig = indexSetConfig.toBuilder() .title("new title") .build(); when(indexSetService.get("id")).thenReturn(Optional.of(indexSetConfig)); when(indexSetService.save(indexSetConfig)).thenReturn(updatedIndexSetConfig); final IndexSetSummary summary = indexSetsResource.update("id", IndexSetUpdateRequest.fromIndexSetConfig(indexSetConfig)); verify(indexSetService, times(1)).get("id"); verify(indexSetService, times(1)).save(indexSetConfig); verify(indexSetService, times(1)).getDefault(); verifyNoMoreInteractions(indexSetService); // The real update wouldn't replace the index template name… final IndexSetConfig actual = summary.toIndexSetConfig(false).toBuilder() .indexTemplateName("index-template") .build(); assertThat(actual).isEqualTo(updatedIndexSetConfig); }
@Override public ExecuteContext before(ExecuteContext context) throws Exception { if (MARK.get() != null) { return context; } MARK.set(Boolean.TRUE); try { ready(); return doBefore(context); } finally { MARK.remove(); } }
@Test public void test() throws Exception { final MarkInterceptor markInterceptor = new MarkInterceptor() { @Override protected ExecuteContext doBefore(ExecuteContext context) { return context; } @Override protected void ready() { } @Override public ExecuteContext after(ExecuteContext context) { return context; } @Override public ExecuteContext onThrow(ExecuteContext context) { return context; } }; final CountDownLatch countDownLatch = new CountDownLatch(1); final ExecuteContext executeContext = buildContext(new Object()); final ExecuteContext before = markInterceptor.before(executeContext); final Thread thread = new Thread(() -> { try { markInterceptor.before(executeContext); countDownLatch.countDown(); } catch (Exception exception) { // ignored } }); thread.start(); countDownLatch.await(); Assert.assertEquals(before, executeContext); }
RegistryEndpointProvider<Optional<URL>> initializer() { return new Initializer(); }
@Test public void testInitializer_handleResponse_unrecognized() throws IOException, RegistryException { Mockito.when(mockResponse.getStatusCode()).thenReturn(-1); // Unrecognized try { testBlobPusher.initializer().handleResponse(mockResponse); Assert.fail("Multiple 'Location' headers should be a registry error"); } catch (RegistryErrorException ex) { MatcherAssert.assertThat( ex.getMessage(), CoreMatchers.containsString("Received unrecognized status code -1")); } }
public synchronized Schema create(URI id, String refFragmentPathDelimiters) { URI normalizedId = id.normalize(); if (!schemas.containsKey(normalizedId)) { URI baseId = removeFragment(id).normalize(); if (!schemas.containsKey(baseId)) { logger.debug("Reading schema: " + baseId); final JsonNode baseContent = contentResolver.resolve(baseId); schemas.put(baseId, new Schema(baseId, baseContent, null)); } final Schema baseSchema = schemas.get(baseId); if (normalizedId.toString().contains("#")) { JsonNode childContent = fragmentResolver.resolve(baseSchema.getContent(), '#' + id.getFragment(), refFragmentPathDelimiters); schemas.put(normalizedId, new Schema(normalizedId, childContent, baseSchema)); } } return schemas.get(normalizedId); }
@Test public void createWithSelfRef() throws URISyntaxException { URI schemaUri = getClass().getResource("/schema/address.json").toURI(); SchemaStore schemaStore = new SchemaStore(); Schema addressSchema = schemaStore.create(schemaUri, "#/."); Schema selfRefSchema = schemaStore.create(addressSchema, "#", "#/."); assertThat(addressSchema, is(sameInstance(selfRefSchema))); }
public TransactWriteItem buildTransactWriteItemForDeletion(final UUID identifier, final byte deviceId) { return TransactWriteItem.builder() .delete(Delete.builder() .tableName(tableName) .key(getPrimaryKey(identifier, deviceId)) .build()) .build(); }
@Test void buildTransactWriteItemForDeletion() { final RepeatedUseSignedPreKeyStore<K> keys = getKeyStore(); final UUID identifier = UUID.randomUUID(); final byte deviceId2 = 2; final K retainedPreKey = generateSignedPreKey(); keys.store(identifier, Device.PRIMARY_ID, generateSignedPreKey()).join(); keys.store(identifier, deviceId2, retainedPreKey).join(); getDynamoDbClient().transactWriteItems(TransactWriteItemsRequest.builder() .transactItems(keys.buildTransactWriteItemForDeletion(identifier, Device.PRIMARY_ID)) .build()); assertEquals(Optional.empty(), keys.find(identifier, Device.PRIMARY_ID).join()); assertEquals(Optional.of(retainedPreKey), keys.find(identifier, deviceId2).join()); }
@Override public void isEqualTo(@Nullable Object expected) { super.isEqualTo(expected); }
@Test public void isEqualTo_WithoutToleranceParameter_Fail_Longer() { expectFailureWhenTestingThat(array(2.2f, 3.3f)).isEqualTo(array(2.2f, 3.3f, 4.4f)); assertFailureKeys("expected", "but was", "wrong length", "expected", "but was"); assertFailureValueIndexed("expected", 1, "3"); assertFailureValueIndexed("but was", 1, "2"); }
public long getFilePointer() throws IOException { if (input instanceof RandomAccessFile) { return ((RandomAccessFile) input).getFilePointer(); } else if (input instanceof DataInputStream) { throw new UnsupportedOperationException("Can not get file pointer for Hollow Blob Input of type DataInputStream"); } else { throw new UnsupportedOperationException("Unknown Hollow Blob Input type"); } }
@Test public void testGetFilePointer() throws IOException { try (HollowBlobInput inStream = HollowBlobInput.modeBasedSelector(MemoryMode.ON_HEAP, mockBlob)) { inStream.getFilePointer(); fail(); } catch (UnsupportedOperationException e) { // pass } catch (Exception e) { fail(); } HollowBlobInput inBuffer = HollowBlobInput.modeBasedSelector(MemoryMode.SHARED_MEMORY_LAZY, mockBlob); assertEquals(0, inBuffer.getFilePointer()); // first byte is 0 }
public static KafkaRebalanceState rebalanceState(KafkaRebalanceStatus kafkaRebalanceStatus) { if (kafkaRebalanceStatus != null) { Condition rebalanceStateCondition = rebalanceStateCondition(kafkaRebalanceStatus); String statusString = rebalanceStateCondition != null ? rebalanceStateCondition.getType() : null; if (statusString != null) { return KafkaRebalanceState.valueOf(statusString); } } return null; }
@Test public void testValidSingleState() { KafkaRebalanceStatus kafkaRebalanceStatus = new KafkaRebalanceStatusBuilder() .withConditions( new ConditionBuilder() .withType(KafkaRebalanceState.Rebalancing.toString()) .withStatus("True") .build()) .build(); KafkaRebalanceState state = KafkaRebalanceUtils.rebalanceState(kafkaRebalanceStatus); assertThat(state, is(KafkaRebalanceState.Rebalancing)); }
@VisibleForTesting static Map<String, ExternalResourceDriver> externalResourceDriversFromConfig( Configuration config, PluginManager pluginManager) { final Set<String> resourceSet = getExternalResourceSet(config); if (resourceSet.isEmpty()) { return Collections.emptyMap(); } final Iterator<ExternalResourceDriverFactory> factoryIterator = pluginManager.load(ExternalResourceDriverFactory.class); final Map<String, ExternalResourceDriverFactory> externalResourceFactories = new HashMap<>(); factoryIterator.forEachRemaining( externalResourceDriverFactory -> externalResourceFactories.put( externalResourceDriverFactory.getClass().getName(), externalResourceDriverFactory)); final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>(); for (String resourceName : resourceSet) { final ConfigOption<String> driverClassOption = key(ExternalResourceOptions .getExternalResourceDriverFactoryConfigOptionForResource( resourceName)) .stringType() .noDefaultValue(); final String driverFactoryClassName = config.get(driverClassOption); if (StringUtils.isNullOrWhitespaceOnly(driverFactoryClassName)) { LOG.warn( "Could not find driver class name for {}. Please make sure {} is configured.", resourceName, driverClassOption.key()); continue; } ExternalResourceDriverFactory externalResourceDriverFactory = externalResourceFactories.get(driverFactoryClassName); if (externalResourceDriverFactory != null) { DelegatingConfiguration delegatingConfiguration = new DelegatingConfiguration( config, ExternalResourceOptions .getExternalResourceParamConfigPrefixForResource( resourceName)); try { externalResourceDrivers.put( resourceName, externalResourceDriverFactory.createExternalResourceDriver( delegatingConfiguration)); LOG.info("Add external resources driver for {}.", resourceName); } catch (Exception e) { LOG.warn( "Could not instantiate driver with factory {} for {}. {}", driverFactoryClassName, resourceName, e); } } else { LOG.warn( "Could not find factory class {} for {}.", driverFactoryClassName, resourceName); } } return externalResourceDrivers; }
@Test public void testConstructExternalResourceDriversFromConfig() { final Configuration config = new Configuration(); final String driverFactoryClassName = TestingExternalResourceDriverFactory.class.getName(); final Map<Class<?>, Iterator<?>> plugins = new HashMap<>(); plugins.put( ExternalResourceDriverFactory.class, IteratorUtils.singletonIterator(new TestingExternalResourceDriverFactory())); final PluginManager testingPluginManager = new TestingPluginManager(plugins); config.set( ExternalResourceOptions.EXTERNAL_RESOURCE_LIST, Collections.singletonList(RESOURCE_NAME_1)); config.setString( ExternalResourceOptions.getExternalResourceDriverFactoryConfigOptionForResource( RESOURCE_NAME_1), driverFactoryClassName); final Map<String, ExternalResourceDriver> externalResourceDrivers = ExternalResourceUtils.externalResourceDriversFromConfig( config, testingPluginManager); assertThat(externalResourceDrivers.size(), is(1)); assertThat( externalResourceDrivers.get(RESOURCE_NAME_1), instanceOf(TestingExternalResourceDriver.class)); }
@Nonnull @Override public <T> Future<T> submit(@Nonnull Callable<T> task) { submitted.mark(); return delegate.submit(new InstrumentedCallable<>(task)); }
@Test public void testSubmitCallable() throws Exception { assertThat(submitted.getCount()).isZero(); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isZero(); assertThat(duration.getCount()).isZero(); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); final Object obj = new Object(); Future<Object> theFuture = instrumentedScheduledExecutor.submit(() -> { assertThat(submitted.getCount()).isEqualTo(1); assertThat(running.getCount()).isEqualTo(1); assertThat(completed.getCount()).isZero(); assertThat(duration.getCount()).isZero(); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); return obj; }); assertThat(theFuture.get()).isEqualTo(obj); assertThat(submitted.getCount()).isEqualTo(1); assertThat(running.getCount()).isZero(); assertThat(completed.getCount()).isEqualTo(1); assertThat(duration.getCount()).isEqualTo(1); assertThat(duration.getSnapshot().size()).isEqualTo(1); assertThat(scheduledOnce.getCount()).isZero(); assertThat(scheduledRepetitively.getCount()).isZero(); assertThat(scheduledOverrun.getCount()).isZero(); assertThat(percentOfPeriod.getCount()).isZero(); }
@Udf(description = "Converts a TIME value into the" + " string representation of the time in the given format." + " The format pattern should be in the format expected" + " by java.time.format.DateTimeFormatter") public String formatTime( @UdfParameter( description = "TIME value.") final Time time, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (time == null || formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalTime.ofNanoOfDay(TimeUnit.MILLISECONDS.toNanos(time.getTime())).format(formatter); } catch (ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format time " + LocalTime.ofNanoOfDay(time.getTime() * 1000000) + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldReturnNullOnNullFormat() { // When: final String result = udf.formatTime(new Time(65000), null); // Then: assertThat(result, is(nullValue())); }
@Override public void close() { close(Duration.ofMillis(0)); }
@Test public void shouldThrowOnAbortTransactionIfProducerIsClosed() { buildMockProducer(true); producer.close(); assertThrows(IllegalStateException.class, producer::abortTransaction); }
public static Deserializer<? extends IP> deserializer() { return (data, offset, length) -> { final ByteBuffer bb = ByteBuffer.wrap(data, offset, length); byte version = (byte) (bb.get() >> 4 & 0xf); switch (version) { case 4: return IPv4.deserializer().deserialize(data, offset, length); case 6: return IPv6.deserializer().deserialize(data, offset, length); default: throw new DeserializationException("Invalid IP version"); } }; }
@Test(expected = DeserializationException.class) public void testBadIpVersion() throws Exception { Deserializer ipDeserializer = IP.deserializer(); ipDeserializer.deserialize(badHeaderBytes, 0, badHeaderLength * 4); }
@Override public void run() throws Exception { //init all file systems List<PinotFSSpec> pinotFSSpecs = _spec.getPinotFSSpecs(); for (PinotFSSpec pinotFSSpec : pinotFSSpecs) { PinotFSFactory.register(pinotFSSpec.getScheme(), pinotFSSpec.getClassName(), new PinotConfiguration(pinotFSSpec)); } //Get list of files to process URI inputDirURI = new URI(_spec.getInputDirURI()); if (inputDirURI.getScheme() == null) { inputDirURI = new File(_spec.getInputDirURI()).toURI(); } PinotFS inputDirFS = PinotFSFactory.create(inputDirURI.getScheme()); List<String> filteredFiles = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(inputDirFS, inputDirURI, _spec.getIncludeFileNamePattern(), _spec.getExcludeFileNamePattern(), _spec.isSearchRecursively()); //Get outputFS for writing output pinot segments URI outputDirURI = new URI(_spec.getOutputDirURI()); if (outputDirURI.getScheme() == null) { outputDirURI = new File(_spec.getOutputDirURI()).toURI(); } PinotFS outputDirFS = PinotFSFactory.create(outputDirURI.getScheme()); outputDirFS.mkdir(outputDirURI); //Get staging directory for temporary output pinot segments String stagingDir = _spec.getExecutionFrameworkSpec().getExtraConfigs().get(STAGING_DIR_FIELD); Preconditions.checkNotNull(stagingDir, "Please set config: stagingDir under 'executionFrameworkSpec.extraConfigs'"); URI stagingDirURI = URI.create(stagingDir); if (stagingDirURI.getScheme() == null) { stagingDirURI = new File(stagingDir).toURI(); } if (!outputDirURI.getScheme().equals(stagingDirURI.getScheme())) { throw new RuntimeException(String .format("The scheme of staging directory URI [%s] and output directory URI [%s] has to be same.", stagingDirURI, outputDirURI)); } if (outputDirFS.exists(stagingDirURI)) { LOGGER.info("Clearing out existing staging directory: [{}]", stagingDirURI); outputDirFS.delete(stagingDirURI, true); } outputDirFS.mkdir(stagingDirURI); Path stagingInputDir = new Path(stagingDirURI.toString(), "input"); outputDirFS.mkdir(stagingInputDir.toUri()); Path stagingSegmentTarUri = new Path(stagingDirURI.toString(), SEGMENT_TAR_SUBDIR_NAME); outputDirFS.mkdir(stagingSegmentTarUri.toUri()); // numDataFiles is guaranteed to be greater than zero since listMatchedFilesWithRecursiveOption will throw // runtime exception if the matched files list is empty. int numDataFiles = filteredFiles.size(); LOGGER.info("Creating segments with data files: {}", filteredFiles); if (!SegmentGenerationJobUtils.useGlobalDirectorySequenceId(_spec.getSegmentNameGeneratorSpec())) { Map<String, List<String>> localDirIndex = new HashMap<>(); for (String filteredFile : filteredFiles) { java.nio.file.Path filteredParentPath = Paths.get(filteredFile).getParent(); if (!localDirIndex.containsKey(filteredParentPath.toString())) { localDirIndex.put(filteredParentPath.toString(), new ArrayList<>()); } localDirIndex.get(filteredParentPath.toString()).add(filteredFile); } for (String parentPath : localDirIndex.keySet()) { List<String> siblingFiles = localDirIndex.get(parentPath); Collections.sort(siblingFiles); for (int i = 0; i < siblingFiles.size(); i++) { URI inputFileURI = SegmentGenerationUtils.getFileURI(siblingFiles.get(i), SegmentGenerationUtils.getDirectoryURI(parentPath)); createInputFileUriAndSeqIdFile(inputFileURI, outputDirFS, stagingInputDir, i); } } } else { for (int i = 0; i < numDataFiles; i++) { URI inputFileURI = SegmentGenerationUtils.getFileURI(filteredFiles.get(i), inputDirURI); createInputFileUriAndSeqIdFile(inputFileURI, outputDirFS, stagingInputDir, i); } } try { // Set up the job Job job = Job.getInstance(getConf()); job.setJobName(getClass().getSimpleName()); // Our class is in the batch-ingestion-hadoop plugin, so we want to pick a class // that's in the main jar (the pinot-all-${PINOT_VERSION}-jar-with-dependencies.jar) job.setJarByClass(SegmentGenerationJobSpec.class); // Disable speculative execution, as otherwise two map tasks can wind up writing to the same staging file. job.getConfiguration().setBoolean(MRJobConfig.MAP_SPECULATIVE, false); // But we have to copy ourselves to HDFS, and add us to the distributed cache, so // that the mapper code is available. addMapperJarToDistributedCache(job, outputDirFS, stagingDirURI); org.apache.hadoop.conf.Configuration jobConf = job.getConfiguration(); String hadoopTokenFileLocation = System.getenv("HADOOP_TOKEN_FILE_LOCATION"); if (hadoopTokenFileLocation != null) { jobConf.set("mapreduce.job.credentials.binary", hadoopTokenFileLocation); } int jobParallelism = _spec.getSegmentCreationJobParallelism(); if (jobParallelism <= 0 || jobParallelism > numDataFiles) { jobParallelism = numDataFiles; } jobConf.setInt(JobContext.NUM_MAPS, jobParallelism); // Pinot plugins are necessary to launch Pinot ingestion job from every mapper. // In order to ensure pinot plugins would be loaded to each worker, this method // tars entire plugins directory and set this file into Distributed cache. // Then each mapper job will untar the plugin tarball, and set system properties accordingly. // Note that normally we'd just use Hadoop's support for putting jars on the // classpath via the distributed cache, but some of the plugins (e.g. the pinot-parquet // input format) include Hadoop classes, which can be incompatibile with the Hadoop // installation/jars being used to run the mapper, leading to errors such as: // java.lang.NoSuchMethodError: org.apache.hadoop.ipc.RPC.getServer(... // packPluginsToDistributedCache(job, outputDirFS, stagingDirURI); // Add dependency jars, if we're provided with a directory containing these. String dependencyJarsSrcDir = _spec.getExecutionFrameworkSpec().getExtraConfigs().get(DEPS_JAR_DIR_FIELD); if (dependencyJarsSrcDir != null) { Path dependencyJarsDestPath = new Path(stagingDirURI.toString(), DEPS_JAR_SUBDIR_NAME); addJarsToDistributedCache(job, new File(dependencyJarsSrcDir), outputDirFS, dependencyJarsDestPath.toUri(), false); } _spec.setOutputDirURI(stagingSegmentTarUri.toUri().toString()); jobConf.set(SEGMENT_GENERATION_JOB_SPEC, new Yaml().dumpAsMap(_spec)); _spec.setOutputDirURI(outputDirURI.toString()); job.setMapperClass(getMapperClass()); job.setNumReduceTasks(0); job.setInputFormatClass(TextInputFormat.class); job.setOutputFormatClass(TextOutputFormat.class); job.setMapOutputKeyClass(LongWritable.class); job.setMapOutputValueClass(Text.class); FileInputFormat.addInputPath(job, stagingInputDir); FileOutputFormat.setOutputPath(job, new Path(stagingDir, "output")); // Submit the job job.waitForCompletion(true); if (!job.isSuccessful()) { throw new RuntimeException("Job failed: " + job); } LOGGER.info("Moving segment tars from staging directory [{}] to output directory [{}]", stagingDirURI, outputDirURI); moveFiles(outputDirFS, new Path(stagingDir, SEGMENT_TAR_SUBDIR_NAME).toUri(), outputDirURI, _spec.isOverwriteOutput()); } finally { LOGGER.info("Trying to clean up staging directory: [{}]", stagingDirURI); outputDirFS.delete(stagingDirURI, true); } }
@Test public void testSegmentGeneration() throws Exception { File testDir = Files.createTempDirectory("testSegmentGeneration-").toFile(); testDir.delete(); testDir.mkdirs(); File inputDir = new File(testDir, "input"); inputDir.mkdirs(); File inputFile = new File(inputDir, "input.csv"); FileUtils.writeLines(inputFile, Lists.newArrayList("col1,col2", "value1,1", "value2,2")); final String outputFilename = "myTable_OFFLINE_0.tar.gz"; final String otherFilename = "myTable_OFFLINE_100.tar.gz"; File outputDir = new File(testDir, "output"); FileUtils.touch(new File(outputDir, outputFilename)); FileUtils.touch(new File(outputDir, otherFilename)); // Set up schema file. final String schemaName = "myTable"; File schemaFile = new File(testDir, "myTable.schema"); Schema schema = new SchemaBuilder() .setSchemaName(schemaName) .addSingleValueDimension("col1", DataType.STRING) .addMetric("col2", DataType.INT) .build(); FileUtils.write(schemaFile, schema.toPrettyJsonString(), StandardCharsets.UTF_8); // Set up table config file. File tableConfigFile = new File(testDir, "myTable.table"); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE) .setTableName("myTable") .setNumReplicas(1) .build(); FileUtils.write(tableConfigFile, tableConfig.toJsonString(), StandardCharsets.UTF_8); File stagingDir = new File(testDir, "staging"); stagingDir.mkdir(); // Add the staging output dir, which should cause code to fail unless we've added code to remove // the staging dir if it exists. FileUtils.touch(new File(stagingDir, "output")); // Set up a plugins dir, with a sub-directory. We'll use an external jar, // since using a class inside of Pinot to find the enclosing jar is somehow // finding the directory of classes vs. the actual jar, on the build server // (though it works fine in other configurations). File pluginsDir = new File(testDir, "plugins"); File myPluginDir = new File(pluginsDir, "my-plugin"); myPluginDir.mkdirs(); File pluginJar = new File(StringUtils.class.getProtectionDomain().getCodeSource().getLocation().toURI()); FileUtils.copyFile(pluginJar, new File(myPluginDir, pluginJar.getName())); // Set up dependency jars dir. // FUTURE set up jar with class that we need for reading file, so we know it's working File dependencyJarsDir = new File(testDir, "jars"); dependencyJarsDir.mkdir(); File extraJar = new File(Gson.class.getProtectionDomain().getCodeSource().getLocation().toURI()); FileUtils.copyFile(extraJar, new File(dependencyJarsDir, extraJar.getName())); SegmentGenerationJobSpec jobSpec = new SegmentGenerationJobSpec(); jobSpec.setJobType("SegmentCreation"); jobSpec.setInputDirURI(inputDir.toURI().toString()); jobSpec.setOutputDirURI(outputDir.toURI().toString()); jobSpec.setOverwriteOutput(false); RecordReaderSpec recordReaderSpec = new RecordReaderSpec(); recordReaderSpec.setDataFormat("csv"); recordReaderSpec.setClassName(CSVRecordReader.class.getName()); recordReaderSpec.setConfigClassName(CSVRecordReaderConfig.class.getName()); jobSpec.setRecordReaderSpec(recordReaderSpec); TableSpec tableSpec = new TableSpec(); tableSpec.setTableName("myTable"); tableSpec.setSchemaURI(schemaFile.toURI().toString()); tableSpec.setTableConfigURI(tableConfigFile.toURI().toString()); jobSpec.setTableSpec(tableSpec); ExecutionFrameworkSpec efSpec = new ExecutionFrameworkSpec(); efSpec.setName("hadoop"); efSpec.setSegmentGenerationJobRunnerClassName(HadoopSegmentGenerationJobRunner.class.getName()); Map<String, String> extraConfigs = new HashMap<>(); extraConfigs.put("stagingDir", stagingDir.toURI().toString()); extraConfigs.put("dependencyJarDir", dependencyJarsDir.getAbsolutePath()); efSpec.setExtraConfigs(extraConfigs); jobSpec.setExecutionFrameworkSpec(efSpec); PinotFSSpec pfsSpec = new PinotFSSpec(); pfsSpec.setScheme("file"); pfsSpec.setClassName(LocalPinotFS.class.getName()); jobSpec.setPinotFSSpecs(Collections.singletonList(pfsSpec)); jobSpec.setFailOnEmptySegment(true); System.setProperty(PluginManager.PLUGINS_DIR_PROPERTY_NAME, pluginsDir.getAbsolutePath()); HadoopSegmentGenerationJobRunner jobRunner = new HadoopSegmentGenerationJobRunner(jobSpec); jobRunner.run(); Assert.assertFalse(stagingDir.exists()); // The output directory should still have the original file in it. File oldSegmentFile = new File(outputDir, otherFilename); Assert.assertTrue(oldSegmentFile.exists()); // The output directory should have the original file in it (since we aren't overwriting) File newSegmentFile = new File(outputDir, outputFilename); Assert.assertTrue(newSegmentFile.exists()); Assert.assertTrue(newSegmentFile.isFile()); Assert.assertTrue(newSegmentFile.length() == 0); // Now run again, but this time with overwriting of output files, and confirm we got a valid segment file. jobSpec.setOverwriteOutput(true); jobRunner = new HadoopSegmentGenerationJobRunner(jobSpec); jobRunner.run(); Assert.assertFalse(stagingDir.exists()); // The original file should still be there. Assert.assertTrue(oldSegmentFile.exists()); Assert.assertTrue(newSegmentFile.exists()); Assert.assertTrue(newSegmentFile.isFile()); Assert.assertTrue(newSegmentFile.length() > 0); // FUTURE - validate contents of file? }
public ClassTemplateSpec generate(DataSchema schema, DataSchemaLocation location) { pushCurrentLocation(location); final ClassTemplateSpec result = processSchema(schema, null, null); popCurrentLocation(); return result; }
@Test public void testPrimitiveDataSchema() { final IntegerDataSchema intSchema = new IntegerDataSchema(); final TemplateSpecGenerator generator = new TemplateSpecGenerator(_resolver); final PrimitiveTemplateSpec spec = (PrimitiveTemplateSpec) generator.generate(intSchema, _location); Assert.assertEquals(spec.getBindingName(), Integer.class.getName()); }
public T findAndModify(Bson filter, Bson fields, Bson sort, boolean remove, Bson update, boolean returnNew, boolean upsert) { if (remove) { throw new IllegalArgumentException("Removing objects is not supported!"); } var options = new FindOneAndUpdateOptions() .projection(fields) .sort(sort) .returnDocument(returnNew ? ReturnDocument.AFTER : ReturnDocument.BEFORE) .upsert(upsert); try { return delegate.findOneAndUpdate(filter, update, options); } catch (MongoServerException e) { throw possiblyAsDuplicateKeyError(e); } }
@Test void findAndModifyWithBsonVariants() { final var collection = spy(jacksonCollection("simple", Simple.class)); final var query = DBQuery.empty(); final var update = new DBUpdate.Builder().set("name", "foo"); collection.findAndModify(query, update); verify(collection).findAndModify(eq(query), isNull(), isNull(), eq(false), eq(update), eq(false), eq(false)); }
@Override protected List<MatchResult> match(List<String> specs) throws IOException { return match(new File(".").getAbsolutePath(), specs); }
@Test public void testMatchWithoutParentDirectory() throws Exception { // TODO: Java core test failing on windows, https://github.com/apache/beam/issues/20478 assumeFalse(SystemUtils.IS_OS_WINDOWS); Path pattern = LocalResourceId.fromPath(temporaryFolder.getRoot().toPath(), true /* isDirectory */) .resolve("non_existing_dir", StandardResolveOptions.RESOLVE_DIRECTORY) .resolve("*", StandardResolveOptions.RESOLVE_FILE) .getPath(); assertTrue(toFilenames(localFileSystem.match(ImmutableList.of(pattern.toString()))).isEmpty()); }
public void addKeyOccurrence( KeyOccurrence occ ) { if ( null != occ ) { String sourceFolder = occ.getSourceFolder(); if ( sourceFolder == null ) { throw new RuntimeException( "No source folder found for key: " + occ.getKey() + " in package " + occ.getMessagesPackage() ); } String messagesPackage = occ.getMessagesPackage(); // Do we have a map for the source folders? // If not, add one... // Map<String, List<KeyOccurrence>> packageOccurrences = sourcePackageOccurrences.get( sourceFolder ); if ( packageOccurrences == null ) { packageOccurrences = new HashMap<>(); sourcePackageOccurrences.put( sourceFolder, packageOccurrences ); } // Do we have a map entry for the occurrences list in the source folder? // If not, add a list for the messages package // List<KeyOccurrence> occurrences = packageOccurrences.get( messagesPackage ); if ( occurrences == null ) { occurrences = new ArrayList<>(); occurrences.add( occ ); packageOccurrences.put( messagesPackage, occurrences ); } else { int index = Collections.binarySearch( occurrences, occ ); if ( index < 0 ) { // Add it to the list, keep it sorted... // occurrences.add( -index - 1, occ ); } } } }
@Test public void testAddKeyOccurrence() throws Exception { MessagesSourceCrawler messagesSourceCrawler = new MessagesSourceCrawler( null, null, null, null ); // After adding an occurrence stating "Source Folder" and "Message Package", it should be retrieved final String thePackage = "a.b.c.Package"; KeyOccurrence keyOccurrence = new KeyOccurrence( null, "Some Source Folder", thePackage, 1, 2, null, null, null ); messagesSourceCrawler.addKeyOccurrence( keyOccurrence ); // Check results List<KeyOccurrence> messagesPackage = messagesSourceCrawler.getOccurrencesForPackage( thePackage ); assertNotNull( messagesPackage ); assertEquals( 1, messagesPackage.size() ); assertEquals( keyOccurrence, messagesPackage.get( 0 ) ); }
public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(time.timer(Long.MAX_VALUE), future); }
@Test public void doNotBlockIfPollConditionIsSatisfied() { NetworkClient mockNetworkClient = mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(new LogContext(), mockNetworkClient, metadata, time, 100, 1000, Integer.MAX_VALUE); // expect poll, but with no timeout consumerClient.poll(time.timer(Long.MAX_VALUE), () -> false); verify(mockNetworkClient).poll(eq(0L), anyLong()); }
public TableProperty buildDataCachePartitionDuration() { if (properties.containsKey(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)) { dataCachePartitionDuration = TimeUtils.parseHumanReadablePeriodOrDuration( properties.get(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION)); } return this; }
@Test public void testBuildDataCachePartitionDuration() throws IOException { // 1. Write objects to file File file = new File(fileName); file.createNewFile(); DataOutputStream out = new DataOutputStream(new FileOutputStream(file)); HashMap<String, String> properties = new HashMap<>(); properties.put(PropertyAnalyzer.PROPERTIES_DATACACHE_PARTITION_DURATION, "3 month"); TableProperty tableProperty = new TableProperty(properties); tableProperty.write(out); out.flush(); out.close(); // 2. Read objects from file DataInputStream in = new DataInputStream(new FileInputStream(file)); TableProperty readTableProperty = TableProperty.read(in); Assert.assertNotNull(readTableProperty.buildProperty(OperationType.OP_ALTER_TABLE_PROPERTIES)); in.close(); }
@Override public synchronized void createSchema(ConnectorSession session, String schemaName, Map<String, Object> properties) { if (schemas.contains(schemaName)) { throw new PrestoException(ALREADY_EXISTS, format("Schema [%s] already exists", schemaName)); } schemas.add(schemaName); }
@Test public void testCreateSchema() { assertEquals(metadata.listSchemaNames(SESSION), ImmutableList.of("default")); metadata.createSchema(SESSION, "test", ImmutableMap.of()); assertEquals(metadata.listSchemaNames(SESSION), ImmutableList.of("default", "test")); assertEquals(metadata.listTables(SESSION, "test"), ImmutableList.of()); SchemaTableName tableName = new SchemaTableName("test", "first_table"); metadata.createTable( SESSION, new ConnectorTableMetadata( tableName, ImmutableList.of(), ImmutableMap.of()), false); assertEquals(metadata.listTables(SESSION, Optional.empty()), ImmutableList.of(tableName)); assertEquals(metadata.listTables(SESSION, Optional.of("test")), ImmutableList.of(tableName)); assertEquals(metadata.listTables(SESSION, Optional.of("default")), ImmutableList.of()); }
public boolean incrementUsageWithDefaultQuotaLimit(TenantCapacity tenantCapacity) { TenantCapacityMapper tenantCapacityMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.TENANT_CAPACITY); MapperContext context = new MapperContext(); context.putUpdateParameter(FieldConstant.GMT_MODIFIED, tenantCapacity.getGmtModified()); context.putWhereParameter(FieldConstant.TENANT_ID, tenantCapacity.getTenant()); context.putWhereParameter(FieldConstant.USAGE, tenantCapacity.getQuota()); MapperResult mapperResult = tenantCapacityMapper.incrementUsageWithDefaultQuotaLimit(context); try { int affectRow = jdbcTemplate.update(mapperResult.getSql(), mapperResult.getParamList().toArray()); return affectRow == 1; } catch (CannotGetJdbcConnectionException e) { FATAL_LOG.error("[db-error]", e); throw e; } }
@Test void testIncrementUsageWithDefaultQuotaLimit() { TenantCapacity tenantCapacity = new TenantCapacity(); Timestamp timestamp = new Timestamp(System.currentTimeMillis()); tenantCapacity.setGmtModified(timestamp); tenantCapacity.setTenant("test"); tenantCapacity.setQuota(1); when(jdbcTemplate.update(anyString(), eq(timestamp), eq("test"), eq(1))).thenReturn(1); assertTrue(service.incrementUsageWithDefaultQuotaLimit(tenantCapacity)); //mock get connection fail when(jdbcTemplate.update(anyString(), eq(timestamp), eq("test"), eq(1))).thenThrow( new CannotGetJdbcConnectionException("conn fail")); try { service.incrementUsageWithDefaultQuotaLimit(tenantCapacity); assertTrue(false); } catch (Exception e) { assertEquals("conn fail", e.getMessage()); } }
public static <T> ThrowingConsumer<StreamRecord<T>, Exception> getRecordProcessor1( TwoInputStreamOperator<T, ?, ?> operator) { boolean canOmitSetKeyContext; if (operator instanceof AbstractStreamOperator) { canOmitSetKeyContext = canOmitSetKeyContext((AbstractStreamOperator<?>) operator, 0); } else { canOmitSetKeyContext = operator instanceof KeyContextHandler && !((KeyContextHandler) operator).hasKeyContext1(); } if (canOmitSetKeyContext) { return operator::processElement1; } else if (operator instanceof AsyncStateProcessing && ((AsyncStateProcessing) operator).isAsyncStateProcessingEnabled()) { return ((AsyncStateProcessing) operator).getRecordProcessor(1); } else { return record -> { operator.setKeyContextElement1(record); operator.processElement1(record); }; } }
@Test void testGetRecordProcessor1() throws Exception { TestOperator operator1 = new TestOperator(); TestOperator operator2 = new TestKeyContextHandlerOperator(true, true); TestOperator operator3 = new TestKeyContextHandlerOperator(false, true); RecordProcessorUtils.getRecordProcessor1(operator1).accept(new StreamRecord<>("test")); assertThat(operator1.setKeyContextElement1Called).isTrue(); assertThat(operator1.processElement1Called).isTrue(); RecordProcessorUtils.getRecordProcessor1(operator2).accept(new StreamRecord<>("test")); assertThat(operator2.setKeyContextElement1Called).isTrue(); assertThat(operator2.processElement1Called).isTrue(); RecordProcessorUtils.getRecordProcessor1(operator3).accept(new StreamRecord<>("test")); assertThat(operator3.setKeyContextElement1Called).isFalse(); assertThat(operator3.processElement1Called).isTrue(); }
@Override public boolean match(Message msg, StreamRule rule) { return Optional.ofNullable(rule.getInverted()) // When this rule is inverted, it should never match .map(inverted -> !inverted) // If `inverted` is `null`, we always return `true` .orElse(true); }
@Test public void matchAlwaysReturnsFalseIfInverted() throws Exception { assertThat(matcher.match( message, new StreamRuleMock(Map.of("_id", "stream-rule-id", "inverted", true)))) .isFalse(); }
public MapStoreConfig getMapStoreConfig() { return mapStoreConfig; }
@Test public void testGetMapStoreConfig() { MapStoreConfig mapStoreConfig = new MapConfig().getMapStoreConfig(); assertNotNull(mapStoreConfig); assertFalse(mapStoreConfig.isEnabled()); }
public long getImageJournalId() { return imageJournalId; }
@Test public void test() { StorageInfo info = new StorageInfo(20, ImageFormatVersion.v2); Assert.assertEquals(20, info.getImageJournalId()); }
@POST @Path("/{connector}/restart") @Operation(summary = "Restart the specified connector") public Response restartConnector(final @PathParam("connector") String connector, final @Context HttpHeaders headers, final @DefaultValue("false") @QueryParam("includeTasks") @Parameter(description = "Whether to also restart tasks") Boolean includeTasks, final @DefaultValue("false") @QueryParam("onlyFailed") @Parameter(description = "Whether to only restart failed tasks/connectors")Boolean onlyFailed, final @Parameter(hidden = true) @QueryParam("forward") Boolean forward) throws Throwable { RestartRequest restartRequest = new RestartRequest(connector, onlyFailed, includeTasks); String forwardingPath = "/connectors/" + connector + "/restart"; if (restartRequest.forceRestartConnectorOnly()) { // For backward compatibility, just restart the connector instance and return OK with no body FutureCallback<Void> cb = new FutureCallback<>(); herder.restartConnector(connector, cb); requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, null, forward); return Response.noContent().build(); } // In all other cases, submit the async restart request and return connector state FutureCallback<ConnectorStateInfo> cb = new FutureCallback<>(); herder.restartConnectorAndTasks(restartRequest, cb); Map<String, String> queryParameters = new HashMap<>(); queryParameters.put("includeTasks", includeTasks.toString()); queryParameters.put("onlyFailed", onlyFailed.toString()); ConnectorStateInfo stateInfo = requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, queryParameters, null, new TypeReference<ConnectorStateInfo>() { }, new IdentityTranslator<>(), forward); return Response.accepted().entity(stateInfo).build(); }
@Test public void testRestartConnectorAndTasksRebalanceNeeded() { RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, true, false); final ArgumentCaptor<Callback<ConnectorStateInfo>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new RebalanceNeededException("Request cannot be completed because a rebalance is expected")) .when(herder).restartConnectorAndTasks(eq(restartRequest), cb.capture()); ConnectRestException ex = assertThrows(ConnectRestException.class, () -> connectorsResource.restartConnector(CONNECTOR_NAME, NULL_HEADERS, restartRequest.includeTasks(), restartRequest.onlyFailed(), FORWARD) ); assertEquals(Response.Status.CONFLICT.getStatusCode(), ex.statusCode()); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { try { final CloudTrailRecord record = objectMapper.readValue(rawMessage.getPayload(), CloudTrailRecord.class); final String source = configuration.getString(Config.CK_OVERRIDE_SOURCE, "aws-cloudtrail"); final Message message = messageFactory.createMessage(record.getConstructedMessage(), source, DateTime.parse(record.eventTime)); message.addFields(record.additionalFieldsAsMap()); message.addField("full_message", record.getFullMessage()); message.addField(AWS.SOURCE_GROUP_IDENTIFIER, true); return message; } catch (Exception e) { throw new RuntimeException("Could not deserialize CloudTrail record.", e); } }
@Test public void testNoAdditionalEventDataField() { final CloudTrailCodec codec = new CloudTrailCodec(Configuration.EMPTY_CONFIGURATION, new ObjectMapperProvider().get(), messageFactory); final RawMessage rawMessage = new RawMessage(("{\n" + "\"eventVersion\": \"1.05\",\n" + "\"userIdentity\": {\n" + "\"type\": \"IAMUser\",\n" + "\"principalId\": \"AIDAJHGSCCCCBBBBAAAA\",\n" + "\"arn\": \"arn:aws:iam::1111122221111:user/some.user\",\n" + "\"accountId\": \"1111122221111\",\n" + "\"userName\": \"some.user\"" + "},\n" + "\"eventTime\": \"2020-08-19T14:12:28Z\",\n" + "\"eventSource\": \"signin.amazonaws.com\",\n" + "\"eventName\": \"ConsoleLogin\",\n" + "\"awsRegion\": \"us-east-1\",\n" + "\"sourceIPAddress\": \"127.0.0.1\",\n" + "\"userAgent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36\",\n" + "\"requestParameters\": null,\n" + "\"responseElements\": {\n" + "\"ConsoleLogin\": \"Success\"\n" + "},\n" + "\"eventID\": \"df38ed44-32d4-43f6-898f-5a55d260a2bb\",\n" + "\"eventType\": \"AwsConsoleSignIn\",\n" + "\"recipientAccountId\": \"1111122221111\"\n" + "}").getBytes(StandardCharsets.UTF_8)); Message message = codec.decode(rawMessage); assertNull(message.getField("additional_event_data")); }
@Benchmark @Threads(16) // Use several threads since we expect contention during logging public void testLogging(ManyExpectedCallsLoggingClientAndService client) { LOG.warn("log me"); }
@Test public void testLogging() throws Exception { ManyExpectedCallsLoggingClientAndService service = new ManyExpectedCallsLoggingClientAndService(); new BeamFnLoggingClientBenchmark().testLogging(service); service.tearDown(); }
@Override public boolean isActive() { // As java.nio.ServerSocketChannel.isBound() will continue to return true even after the channel was closed // we will also need to check if it is open. return isOpen() && bound; }
@Test public void testIsActiveFalseAfterClose() throws Exception { NioServerDomainSocketChannel serverSocketChannel = new NioServerDomainSocketChannel(); EventLoopGroup group = new NioEventLoopGroup(1); File file = newRandomTmpFile(); try { group.register(serverSocketChannel).syncUninterruptibly(); Channel channel = serverSocketChannel.bind( newUnixDomainSocketAddress(file.getAbsolutePath())) .syncUninterruptibly().channel(); assertTrue(channel.isActive()); assertTrue(channel.isOpen()); channel.close().syncUninterruptibly(); assertFalse(channel.isOpen()); assertFalse(channel.isActive()); } finally { group.shutdownGracefully(); file.delete(); } }
static void closeSilently( final ServerWebSocket webSocket, final int code, final String message) { try { final ImmutableMap<String, String> finalMessage = ImmutableMap.of( "error", message != null ? message : "" ); final String json = ApiJsonMapper.INSTANCE.get().writeValueAsString(finalMessage); webSocket .writeFinalTextFrame(json, r -> { }) .close((short) code, truncate(message)); } catch (final Exception e) { LOG.info("Exception caught closing websocket", e); } }
@Test public void shouldHandleNullMessage() throws Exception { // When: SessionUtil.closeSilently(websocket, INVALID_MESSAGE_TYPE.code(), null); // Then: verify(websocket).close(codeCaptor.capture(), reasonCaptor.capture()); assertThat(reasonCaptor.getValue(), is("")); }
public void updateWorkflowInstance(WorkflowInstance instance, RunRequest runRequest) { if (!runRequest.isFreshRun() && instance.getRuntimeDag() != null) { // For restart, set the baseline aggregatedInfo using the previous run aggregated info. instance.setAggregatedInfo(AggregatedViewHelper.computeAggregatedView(instance, true)); } // set run id to the latest but unknown, which will be set later instance.setWorkflowRunId(Constants.LATEST_ONE); instance.setWorkflowUuid(IdHelper.getOrCreateUuid(runRequest.getRequestId())); instance.setExecutionId(null); // clear execution id if set instance.setRunConfig(runRequest.toRunConfig()); instance.setRunParams(runRequest.getRunParams()); instance.setStepRunParams(runRequest.getStepRunParams()); instance.setInitiator(runRequest.getInitiator()); instance.setStatus(WorkflowInstance.Status.CREATED); instance.setRequestTime(runRequest.getRequestTime()); // create time will be set within transaction to ensure the same order as instance id. // not set startTime and endTime, modified time will be set by DB automatically instance.setRuntimeOverview(null); // always null before running instance.setArtifacts(runRequest.getArtifacts()); // set runtime dag instance.setRuntimeDag(dagTranslator.translate(instance)); // validate run params if (runRequest.getStepRunParams() != null) { Checks.checkTrue( runRequest.getStepRunParams().keySet().stream() .allMatch(instance.getRuntimeDag()::containsKey), "non-existing step id detected in step param overrides: inputs %s vs dag %s", runRequest.getStepRunParams().keySet(), instance.getRuntimeDag().keySet()); } // set initial timeline instance.setTimeline( new Timeline(Collections.singletonList(runRequest.getInitiator().getTimelineEvent()))); try { initiateWorkflowParamsAndProperties(instance, runRequest); } catch (RuntimeException e) { if (runRequest.isPersistFailedRun()) { instance.setStatus(WorkflowInstance.Status.FAILED); instance .getTimeline() .add( TimelineDetailsEvent.from( Details.create(e, false, "Failed to initiate workflow params and properties"))); } else { throw e; } } }
@Test public void testUpdateWorkflowInstance() { Map<String, ParamDefinition> runParams = Collections.singletonMap("p1", ParamDefinition.buildParamDefinition("p1", "d1")); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .runParams(runParams) .build(); Workflow workflow = definition.getWorkflow(); WorkflowInstance instance = workflowHelper.createWorkflowInstance(workflow, 12345L, 1, new RunProperties(), request); assertEquals(workflow.getId(), instance.getWorkflowId()); assertEquals(WorkflowInstance.Status.CREATED, instance.getStatus()); assertEquals(12345L, instance.getInternalId().longValue()); assertNotNull(instance.getParams()); assertNotNull(instance.getWorkflowUuid()); // For manual initiator, verify if params are generated. Mockito.verify(paramsManager, Mockito.times(1)).generateMergedWorkflowParams(instance, request); request = RunRequest.builder() .initiator(new TimeInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .runParams(runParams) .build(); WorkflowInstance createdInstance = workflowHelper.createWorkflowInstance(workflow, 123L, 1, new RunProperties(), request); // For trigger based, verify if null has been passed. Mockito.verify(paramsManager, Mockito.times(1)) .generateMergedWorkflowParams(createdInstance, request); }
public InetAddress getHostAddress() { return hostAddress; }
@Test public void setIp() { DummyConnectionConfiguration.Builder builder = newUnitTestBuilder(); final String ip = "192.168.0.1"; builder.setHost(ip); DummyConnectionConfiguration connectionConfiguration = builder.build(); assertEquals('/' + ip, connectionConfiguration.getHostAddress().toString()); }
public static String buildMetaDataPath(final String path) { String join = String.join(PATH_SEPARATOR, META_DATA, path); return join.replaceAll("//", PATH_SEPARATOR); }
@Test public void testBuildMetaDataPath() { String metadata = RandomStringUtils.randomAlphanumeric(10); String metaDataPath = DefaultPathConstants.buildMetaDataPath(metadata); assertThat(metaDataPath, notNullValue()); assertThat(String.join(SEPARATOR, META_DATA_PARENT, metadata), equalTo(metaDataPath)); }
@Override public <KEY> URIMappingResult<KEY> mapUris(List<URIKeyPair<KEY>> requestUriKeyPairs) throws ServiceUnavailableException { if (requestUriKeyPairs == null || requestUriKeyPairs.isEmpty()) { return new URIMappingResult<>(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()); } // API assumes that all requests will be made to the same service, just use the first request to get the service name and act as sample uri URI sampleURI = requestUriKeyPairs.get(0).getRequestUri(); String serviceName = LoadBalancerUtil.getServiceNameFromUri(sampleURI); // To achieve scatter-gather, we require the following information PartitionAccessor accessor = _partitionInfoProvider.getPartitionAccessor(serviceName); Map<Integer, Ring<URI>> rings = _hashRingProvider.getRings(sampleURI); HashFunction<Request> hashFunction = _hashRingProvider.getRequestHashFunction(serviceName); Map<Integer, Set<KEY>> unmapped = new HashMap<>(); // Pass One Map<Integer, List<URIKeyPair<KEY>>> requestsByPartition = distributeToPartitions(requestUriKeyPairs, accessor, unmapped); // Pass Two Map<URI, Integer> hostToParitionId = new HashMap<>(); Map<URI, Set<KEY>> hostToKeySet = distributeToHosts(requestsByPartition, rings, hashFunction, hostToParitionId, unmapped); return new URIMappingResult<>(hostToKeySet, unmapped, hostToParitionId); }
@Test public void testMapUrisPartitionedOnly() throws ServiceUnavailableException { int partitionCount = 10; int requestPerPartition = 100; int totalHostCount = 100; HashRingProvider ringProvider = createStaticHashRingProvider(totalHostCount, partitionCount, getHashFunction(false)); PartitionInfoProvider infoProvider = createRangeBasedPartitionInfoProvider(partitionCount); URIMapper mapper = new RingBasedUriMapper(ringProvider, infoProvider); List<URIKeyPair<Integer>> requests = testUtil.generateRequests(partitionCount, requestPerPartition); URIMappingResult<Integer> results = mapper.mapUris(requests); Map<URI, Set<Integer>> mapping = results.getMappedKeys(); Map<URI, Integer> hostToPartitionId= results.getHostPartitionInfo(); // No unmapped keys Assert.assertTrue(results.getUnmappedKeys().isEmpty()); // Without sticky routing, one host should be returned for each partition Assert.assertEquals(10, mapping.size()); Assert.assertEquals(10, hostToPartitionId.size()); for (Map.Entry<URI, Integer> entry : hostToPartitionId.entrySet()) { // partition ids are correctly assigned for each URI Assert.assertTrue(entry.getKey().toString().contains(String.valueOf(entry.getValue()))); } Set<Integer> mappedKeys = mapping.values().stream().reduce(new HashSet<>(), (e1, e2) -> { e1.addAll(e2); return e1; }); int mappedKeyCount = mapping.values().stream().map(Set::size).reduce(Integer::sum).get(); // Collective exhaustiveness and mutual exclusiveness Assert.assertEquals(partitionCount * requestPerPartition, mappedKeys.size()); Assert.assertEquals(partitionCount * requestPerPartition, mappedKeyCount); }
public static List<ExecutionTask> getInterBrokerReplicaTasksToReexecute(Set<TopicPartition> partitionsInMovement, Collection<ExecutionTask> candidateTasksToReexecute) { List<ExecutionTask> tasksToReexecute = new ArrayList<>(); for (ExecutionTask executionTask : candidateTasksToReexecute) { TopicPartition tp = executionTask.proposal().topicPartition(); if (!partitionsInMovement.contains(tp)) { tasksToReexecute.add(executionTask); } } if (!tasksToReexecute.isEmpty()) { LOG.info("Found tasks to re-execute: {} while detected in-movement partitions: {}.", tasksToReexecute, partitionsInMovement); } return tasksToReexecute; }
@Test public void testGetInterBrokerReplicaTasksToReexecute() { Set<TopicPartition> partitionsInMovement = Set.of(P0, P1); ReplicaPlacementInfo replicaPlacementInfoPlaceHolder = new ReplicaPlacementInfo(BROKER_ID_PLACEHOLDER); ExecutionProposal proposalForPartition0 = new ExecutionProposal(P0, PRODUCE_SIZE_IN_BYTES, replicaPlacementInfoPlaceHolder, Collections.singletonList(replicaPlacementInfoPlaceHolder), Collections.singletonList(replicaPlacementInfoPlaceHolder)); ExecutionProposal proposalForPartition1 = new ExecutionProposal(P1, PRODUCE_SIZE_IN_BYTES, replicaPlacementInfoPlaceHolder, Collections.singletonList(replicaPlacementInfoPlaceHolder), Collections.singletonList(replicaPlacementInfoPlaceHolder)); ExecutionProposal proposalForPartition2 = new ExecutionProposal(P2, PRODUCE_SIZE_IN_BYTES, replicaPlacementInfoPlaceHolder, Collections.singletonList(replicaPlacementInfoPlaceHolder), Collections.singletonList(replicaPlacementInfoPlaceHolder)); ExecutionTask executionTaskForPartition0 = new ExecutionTask(EXECUTION_ID_PLACEHOLDER, proposalForPartition0, ExecutionTask.TaskType.INTER_BROKER_REPLICA_ACTION, EXECUTION_ALERTING_THRESHOLD_MS); ExecutionTask executionTaskForPartition1 = new ExecutionTask(EXECUTION_ID_PLACEHOLDER, proposalForPartition1, ExecutionTask.TaskType.INTER_BROKER_REPLICA_ACTION, EXECUTION_ALERTING_THRESHOLD_MS); ExecutionTask executionTaskForPartition2 = new ExecutionTask(EXECUTION_ID_PLACEHOLDER, proposalForPartition2, ExecutionTask.TaskType.INTER_BROKER_REPLICA_ACTION, EXECUTION_ALERTING_THRESHOLD_MS); // Case 1: all partitions in candidate tasks are already in movement. Expect nothing to re-execute List<ExecutionTask> tasks1 = List.of(executionTaskForPartition0, executionTaskForPartition1); List<ExecutionTask> tasksToReexecute1 = ExecutionUtils.getInterBrokerReplicaTasksToReexecute(partitionsInMovement, tasks1); Assert.assertTrue(tasksToReexecute1.isEmpty()); // Case 2: some of partitions in candidate tasks are in movement and some are not. Expect some tasks to re-execute List<ExecutionTask> tasks2 = List.of(executionTaskForPartition0, executionTaskForPartition1, executionTaskForPartition2); List<ExecutionTask> tasksToReexecute2 = ExecutionUtils.getInterBrokerReplicaTasksToReexecute(partitionsInMovement, tasks2); Assert.assertEquals(1, tasksToReexecute2.size()); Assert.assertEquals(P2, tasksToReexecute2.get(0).proposal().topicPartition()); // Case 3: Partitions of candidate tasks is subset of partitions in movement. Expect nothing to re-execute List<ExecutionTask> tasks3 = List.of(executionTaskForPartition0); List<ExecutionTask> tasksToReexecute3 = ExecutionUtils.getInterBrokerReplicaTasksToReexecute(partitionsInMovement, tasks3); Assert.assertTrue(tasksToReexecute3.isEmpty()); }
public OptionalInt getFirstDynamicSegment() { return IntStream.range(0, segments.size()) .filter(i -> segments.get(i).hasVariables()) .findFirst(); }
@Test public void testFirstDynamicSegment() { ResourceGroupIdTemplate template = new ResourceGroupIdTemplate("test.${USER}.${SOURCE}"); assertEquals(template.getFirstDynamicSegment(), OptionalInt.of(1)); template = new ResourceGroupIdTemplate("test.pipeline.job_${pipeline}_user:${USER}.${USER}"); assertEquals(template.getFirstDynamicSegment(), OptionalInt.of(2)); }
@VisibleForTesting @Override public BlockType getBlockType() { return HeaderFormat.getBlockType(header); }
@Test public void testGetBlockType() { replication = 3; preferredBlockSize = 128*1024*1024; INodeFile inf = createINodeFile(replication, preferredBlockSize); assertEquals(inf.getBlockType(), CONTIGUOUS); INodeFile striped = createStripedINodeFile(preferredBlockSize); assertEquals(striped.getBlockType(), STRIPED); }
@Override public CSimpleExpression compilePredicate(CamelContext camelContext, String script) { return doCompile(camelContext, script, true); }
@Test public void testCompilerPredicate() { JoorCSimpleCompiler compiler = new JoorCSimpleCompiler(); compiler.start(); CSimpleExpression method = compiler.compilePredicate(context, "${bodyAs(int)} > 100"); Exchange exchange = new DefaultExchange(context); exchange.getMessage().setBody("123"); boolean out = method.matches(exchange); Assertions.assertTrue(out); exchange.getMessage().setBody("44"); out = method.matches(exchange); Assertions.assertFalse(out); compiler.stop(); }
@Override public void registerRecoveryResource(final String dataSourceName, final XADataSource xaDataSource) { userTransactionService.registerResource(new AtomikosXARecoverableResource(dataSourceName, xaDataSource)); }
@Test void assertRegisterRecoveryResource() { transactionManagerProvider.registerRecoveryResource("ds1", xaDataSource); verify(userTransactionService).registerResource(any(AtomikosXARecoverableResource.class)); }
@Override public Set<String> getMapping(URL consumerURL) { Set<String> mappingByUrl = ServiceNameMapping.getMappingByUrl(consumerURL); if (mappingByUrl != null) { return mappingByUrl; } return mappingCacheManager.get(ServiceNameMapping.buildMappingKey(consumerURL)); }
@Test void testGetServices() { url = url.addParameter(PROVIDED_BY, "app1,app2"); Set<String> services = mapping.getMapping(url); Assertions.assertTrue(services.contains("app1")); Assertions.assertTrue(services.contains("app2")); // // remove mapping cache, check get() works. // mapping.removeCachedMapping(ServiceNameMapping.buildMappingKey(url)); // services = mapping.initInterfaceAppMapping(url); // Assertions.assertTrue(services.contains("remote-app1")); // Assertions.assertTrue(services.contains("remote-app2")); // Assertions.assertNotNull(mapping.getCachedMapping(url)); // Assertions.assertIterableEquals(mapping.getCachedMapping(url), services); }
@Override public void run(Namespace namespace, Liquibase liquibase) throws Exception { final String context = getContext(namespace); final boolean isDryRun = Boolean.TRUE.equals(namespace.getBoolean("dry-run")); if (Boolean.TRUE.equals(namespace.getBoolean("all"))) { if (isDryRun) { liquibase.changeLogSync(context, new OutputStreamWriter(printStream, StandardCharsets.UTF_8)); } else { liquibase.changeLogSync(context); } } else { if (isDryRun) { liquibase.markNextChangeSetRan(context, new OutputStreamWriter(printStream, StandardCharsets.UTF_8)); } else { liquibase.markNextChangeSetRan(context); } } }
@Test void testFastForwardAll() throws Exception { // Create the "persons" table manually and add some data try (Handle handle = dbi.open()) { handle.execute("create table persons(id int, name varchar(255))"); handle.execute("insert into persons (id, name) values (12, 'Greg Young')"); } // Fast-forward all the changes fastForwardCommand.run(null, new Namespace(Map.of("all", true, "dry-run", false)), conf); // No migrations is performed new DbMigrateCommand<>( TestMigrationConfiguration::getDataSource, TestMigrationConfiguration.class, "migrations.xml") .run(null, new Namespace(Map.of()), conf); // Nothing is added to the persons table try (Handle handle = dbi.open()) { assertThat(handle.createQuery("select count(*) from persons") .mapTo(Integer.class) .first()) .isEqualTo(1); } }
@Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { return inject(statement, new TopicProperties.Builder()); }
@Test public void shouldThrowIfCleanupPolicyConfigPresentInCreateStream() { // Given: givenStatement("CREATE STREAM x (FOO VARCHAR) WITH (kafka_topic='foo', partitions=1, cleanup_policy='whatever');"); // When: final Exception e = assertThrows( KsqlException.class, () -> injector.inject(statement, builder) ); // Then: assertThat( e.getMessage(), containsString("Invalid config variable in the WITH clause: CLEANUP_POLICY.\n" + "The CLEANUP_POLICY config is automatically inferred based on the type of source (STREAM or TABLE).\n" + "Users can't set the CLEANUP_POLICY config manually.")); }
public static <T extends Serializable> T ensureSerializable(T value) { return clone(value); }
@Test public void testEnsureSerializableWithUnserializableCoderByJava() { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage("unable to serialize"); SerializableUtils.ensureSerializable(new UnserializableCoderByJava()); }
@Override public Collection<String> getEnhancedTableNames() { return Collections.emptySet(); }
@Test void assertGetEnhancedTableMapper() { assertThat(new LinkedList<>(ruleAttribute.getEnhancedTableNames()), is(Collections.emptyList())); }
@Override public void lock() { try { lock(-1, null, false); } catch (InterruptedException e) { throw new IllegalStateException(); } }
@Test public void testInCluster() throws Exception { testInCluster(redisson -> { for (int i = 0; i < 3; i++) { RLock lock = redisson.getLock("myLock"); lock.lock(); assertThat(lock.isLocked()).isTrue(); lock.unlock(); assertThat(lock.isLocked()).isFalse(); } }); }
protected String getLogBuffer( VariableSpace space, String logChannelId, LogStatus status, String limit ) { return getLogBuffer( space, logChannelId, status, limit, 0 ); }
@Test public void testSecondJobExecutionDoesNotReturnLogFromFirstExecution() throws Exception { try ( MockedStatic<KettleLogStore> kettleLogStoreMockedStatic = mockStatic( KettleLogStore.class ); MockedStatic<LoggingRegistry> loggingRegistryMockedStatic = mockStatic( LoggingRegistry.class ) ) { LoggingBuffer lb = spy( new LoggingBuffer( 10 ) ); kettleLogStoreMockedStatic.when( KettleLogStore::getAppender ).thenReturn( lb ); doCallRealMethod().when( lb ).getBuffer( anyString(), anyBoolean(), anyInt() ); LoggingRegistry lr = mock( LoggingRegistry.class ); loggingRegistryMockedStatic.when( LoggingRegistry::getInstance ).thenReturn( lr ); doReturn( List.of( "1" ) ).when( lr ).getLogChannelChildren( anyString() ); Field privateLoggingRegistryField = LoggingBuffer.class.getDeclaredField( "loggingRegistry" ); privateLoggingRegistryField.setAccessible( true ); ReflectionUtils.setField( privateLoggingRegistryField, lb, lr ); ConcurrentSkipListMap<Integer, BufferLine> bl = new ConcurrentSkipListMap<>(); Field privateLBufferField = LoggingBuffer.class.getDeclaredField( "buffer" ); privateLBufferField.setAccessible( true ); ReflectionUtils.setField( privateLBufferField, lb, bl ); KettleLoggingEvent kLE1 = spy( KettleLoggingEvent.class ); LogMessage lm = new LogMessage( "First Job Execution Logging Event", "1", LogLevel.BASIC ); kLE1.setMessage( lm ); BufferLine firstBufferLine = new BufferLine( kLE1 ); Field bufferSequenceNum = BufferLine.class.getDeclaredField( "sequence" ); ReflectionUtils.makeAccessible( bufferSequenceNum ); int startingBufferSequence = ( (AtomicInteger) ReflectionUtils.getField( bufferSequenceNum, firstBufferLine ) ).intValue(); addToBuffer( bl, firstBufferLine ); VariableSpace vs = mock( VariableSpace.class ); BaseLogTable baseLogTable = new BaseLogTableTestImpl( vs, null, "", "", "" ); String s1 = baseLogTable.getLogBuffer( vs, "1", LogStatus.START, "", startingBufferSequence ); assertTrue( s1.contains( "First Job Execution Logging Event" ) ); KettleLoggingEvent kLE2 = spy( KettleLoggingEvent.class ); LogMessage lm2 = new LogMessage( "Second Job Execution Logging Event", "1", LogLevel.BASIC ); kLE2.setMessage( lm2 ); addToBuffer( bl, new BufferLine( kLE2 ) ); String s2 = baseLogTable.getLogBuffer( vs, "1", LogStatus.START, "", startingBufferSequence + 1 ); assertFalse( s2.contains( "First Job Execution Logging Event" ) ); assertTrue( s2.contains( "Second Job Execution Logging Event" ) ); } }
InputFile.Status status(String moduleKeyWithBranch, DefaultInputFile inputFile, String hash) { InputFile.Status statusFromScm = findStatusFromScm(inputFile); if (statusFromScm != null) { return statusFromScm; } return checkChangedWithProjectRepositories(moduleKeyWithBranch, inputFile, hash); }
@Test public void detect_status() { ScmChangedFiles changedFiles = new ScmChangedFiles(null); StatusDetection statusDetection = new StatusDetection(projectRepositories, changedFiles); assertThat(statusDetection.status("foo", createFile("src/Foo.java"), "ABCDE")).isEqualTo(InputFile.Status.SAME); assertThat(statusDetection.status("foo", createFile("src/Foo.java"), "XXXXX")).isEqualTo(InputFile.Status.CHANGED); assertThat(statusDetection.status("foo", createFile("src/Other.java"), "QWERT")).isEqualTo(InputFile.Status.ADDED); }
@Override public void createNetworkPolicy(NetworkPolicy networkPolicy) { checkNotNull(networkPolicy, ERR_NULL_NETWORK_POLICY); checkArgument(!Strings.isNullOrEmpty(networkPolicy.getMetadata().getUid()), ERR_NULL_NETWORK_POLICY_UID); k8sNetworkPolicyStore.createNetworkPolicy(networkPolicy); log.info(String.format(MSG_NETWORK_POLICY, networkPolicy.getMetadata().getName(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullNetworkPolicy() { target.createNetworkPolicy(null); }
@Override public <T> TableConfig set(ConfigOption<T> option, T value) { configuration.set(option, value); return this; }
@Test void testGetInvalidAbbreviationLocalTimeZone() { CONFIG_BY_CONFIGURATION.set("table.local-time-zone", "PST"); assertThatThrownBy(CONFIG_BY_CONFIGURATION::getLocalTimeZone) .isInstanceOf(ValidationException.class) .hasMessageContaining("Invalid time zone."); }