focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); for (final GarbageCollectorMXBean gc : garbageCollectors) { final String name = WHITESPACE.matcher(gc.getName()).replaceAll("-"); gauges.put(name(name, "count"), (Gauge<Long>) gc::getCollectionCount); gauges.put(name(name, "time"), (Gauge<Long>) gc::getCollectionTime); } return Collections.unmodifiableMap(gauges); }
@Test public void autoDiscoversGCs() { assertThat(new GarbageCollectorMetricSet().getMetrics().keySet()) .isNotEmpty(); }
@Override @SuppressFBWarnings(value = "EI_EXPOSE_REP") public KsqlConfig getKsqlConfig() { return ksqlConfig; }
@Test public void shouldNotWriteConfigIfExists() { // Given: addPollResult(KafkaConfigStore.CONFIG_MSG_KEY, savedProperties); expectRead(consumerBefore); // When: getKsqlConfig(); // Then: verifyNoMoreInteractions(producer); }
private Instance parseInstance(Service service, InstancePublishInfo instanceInfo) { Instance result = InstanceUtil.parseToApiInstance(service, instanceInfo); Optional<InstanceMetadata> metadata = metadataManager .getInstanceMetadata(service, instanceInfo.getMetadataId()); metadata.ifPresent(instanceMetadata -> InstanceUtil.updateInstanceMetadata(result, instanceMetadata)); return result; }
@Test void testParseInstance() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Class<ServiceStorage> serviceStorageClass = ServiceStorage.class; Method parseInstance = serviceStorageClass.getDeclaredMethod("parseInstance", Service.class, InstancePublishInfo.class); parseInstance.setAccessible(true); Instance instance = (Instance) parseInstance.invoke(serviceStorage, SERVICE, instancePublishInfo); Mockito.verify(namingMetadataManager).getInstanceMetadata(SERVICE, instancePublishInfo.getMetadataId()); assertNotNull(instance); }
public static List<Integer> asIntegerList(@Nonnull int[] array) { checkNotNull(array, "null array"); return new AbstractList<>() { @Override public Integer get(int index) { return array[index]; } @Override public int size() { return array.length; } }; }
@Test public void testToIntegerList_whenNotEmpty() { List<Integer> result = asIntegerList(new int[]{1, 2, 3, 4}); assertEquals(asList(1, 2, 3, 4), result); }
@Udf(description = "Returns all substrings of the input that matches the given regex pattern") public List<String> regexpExtractAll( @UdfParameter(description = "The regex pattern") final String pattern, @UdfParameter(description = "The input string to apply regex on") final String input ) { return regexpExtractAll(pattern, input, 0); }
@Test(expected = KsqlFunctionException.class) public void shouldHaveBadPattern() { udf.regexpExtractAll("(()", "test string"); }
public static MountPOptions mountDefaults(AlluxioConfiguration conf) { return MountPOptions.newBuilder() .setCommonOptions(commonDefaults(conf)) .setReadOnly(false) .setShared(false) .build(); }
@Test public void mountOptionsDefaults() { MountPOptions options = FileSystemOptionsUtils.mountDefaults(mConf); assertNotNull(options); assertFalse(options.getShared()); assertFalse(options.getReadOnly()); assertEquals(0, options.getPropertiesMap().size()); }
@Override public DeviceDescription discoverDeviceDetails() { log.debug("Adding description for Waveserver Ai device"); NetconfSession session = getNetconfSession(); Device device = getDevice(handler().data().deviceId()); try { XPath xp = XPathFactory.newInstance().newXPath(); Node node = TEMPLATE_MANAGER.doRequest(session, "discoverDeviceDetails"); String chassisId = xp.evaluate("waveserver-chassis/mac-addresses/chassis/base/text()", node); chassisId = chassisId.replace(":", ""); SparseAnnotations annotationDevice = DefaultAnnotations.builder() .set("name", xp.evaluate("waveserver-system/host-name/current-host-name/text()", node)) .build(); return new DefaultDeviceDescription(device.id().uri(), Device.Type.OTN, "Ciena", "WaverserverAi", xp.evaluate("waveserver-software/status/active-version/text()", node), xp.evaluate("waveserver-chassis/identification/serial-number/text()", node), new ChassisId(Long.valueOf(chassisId, 16)), (SparseAnnotations) annotationDevice); } catch (NetconfException | XPathExpressionException e) { log.error("Unable to retrieve device information for device {}, {}", device.chassisId(), e); } return new DefaultDeviceDescription(device.id().uri(), Device.Type.OTN, "Ciena", "WaverserverAi", "unknown", "unknown", device.chassisId()); }
@Test public void testDiscoverDeviceDetails() { XPath xp = XPathFactory.newInstance().newXPath(); SparseAnnotations expectAnnotation = DefaultAnnotations.builder() .set("hostname", "hostnameWaveServer") .build(); DefaultDeviceDescription expectResult = new DefaultDeviceDescription( mockDeviceId.uri(), Device.Type.OTN, "Ciena", "WaverserverAi", "waveserver-1.1.0.302", "M000", new ChassisId(0L), expectAnnotation); try { Node node = doRequest("/response/discoverDeviceDetails.xml", "/rpc-reply/data"); SparseAnnotations annotationDevice = DefaultAnnotations.builder() .set("hostname", xp.evaluate("waveserver-system/host-name/current-host-name/text()", node)) .build(); DefaultDeviceDescription result = new DefaultDeviceDescription( mockDeviceId.uri(), Device.Type.OTN, "Ciena", "WaverserverAi", xp.evaluate("waveserver-software/status/active-version/text()", node), xp.evaluate("waveserver-chassis/identification/serial-number/text()", node), new ChassisId(0L), annotationDevice); assertEquals(expectResult, result); } catch (XPathExpressionException e) { e.printStackTrace(); } }
public static JavaToSqlTypeConverter javaToSqlConverter() { return JAVA_TO_SQL_CONVERTER; }
@Test public void shouldGetSqlMapForImplementationsOfJavaMap() { ImmutableList.<Class<?>>of( HashMap.class, ImmutableMap.class ).forEach(javaType -> { assertThat(javaToSqlConverter().toSqlType(javaType), is(SqlBaseType.MAP)); }); }
CachedLayer writeUncompressed(Blob uncompressedLayerBlob, @Nullable DescriptorDigest selector) throws IOException { // Creates the layers directory if it doesn't exist. Files.createDirectories(cacheStorageFiles.getLayersDirectory()); // Creates the temporary directory. The temporary directory must be in the same FileStore as the // final location for Files.move to work. Files.createDirectories(cacheStorageFiles.getTemporaryDirectory()); try (TempDirectoryProvider tempDirectoryProvider = new TempDirectoryProvider()) { Path temporaryLayerDirectory = tempDirectoryProvider.newDirectory(cacheStorageFiles.getTemporaryDirectory()); // Writes the layer file to the temporary directory. WrittenLayer writtenLayer = writeUncompressedLayerBlobToDirectory(uncompressedLayerBlob, temporaryLayerDirectory); // Moves the temporary directory to the final location. moveIfDoesNotExist( temporaryLayerDirectory, cacheStorageFiles.getLayerDirectory(writtenLayer.layerDigest)); // Updates cachedLayer with the blob information. Path layerFile = cacheStorageFiles.getLayerFile(writtenLayer.layerDigest, writtenLayer.layerDiffId); CachedLayer.Builder cachedLayerBuilder = CachedLayer.builder() .setLayerDigest(writtenLayer.layerDigest) .setLayerDiffId(writtenLayer.layerDiffId) .setLayerSize(writtenLayer.layerSize) .setLayerBlob(Blobs.from(layerFile)); // Write the selector file. if (selector != null) { writeSelector(selector, writtenLayer.layerDigest); } return cachedLayerBuilder.build(); } }
@Test public void testWriteUncompressed() throws IOException { Blob uncompressedLayerBlob = Blobs.from("uncompressedLayerBlob"); DescriptorDigest layerDigest = getDigest(compress(uncompressedLayerBlob)).getDigest(); DescriptorDigest selector = getDigest(Blobs.from("selector")).getDigest(); CachedLayer cachedLayer = cacheStorageWriter.writeUncompressed(uncompressedLayerBlob, selector); verifyCachedLayer(cachedLayer, uncompressedLayerBlob, compress(uncompressedLayerBlob)); // Verifies that the files are present. Path selectorFile = cacheStorageFiles.getSelectorFile(selector); Assert.assertTrue(Files.exists(selectorFile)); Assert.assertEquals(layerDigest.getHash(), Blobs.writeToString(Blobs.from(selectorFile))); }
public Object transform(List<List<String>> raw) { try { return transformer.transform(raw); } catch (Throwable throwable) { throw new CucumberDataTableException( String.format("'%s' could not transform%n%s", toCanonical(), DataTable.create(raw)), throwable); } }
@Test void shouldTransformATableCell() { assertThat(singleCellType.transform(singletonList(singletonList("12"))), equalTo(singletonList(singletonList(12)))); }
@Override public void collect(long elapsedTime, StatementContext ctx) { final Timer timer = getTimer(ctx); timer.update(elapsedTime, TimeUnit.NANOSECONDS); }
@Test public void updatesTimerForNoRawSql() throws Exception { final StatementNameStrategy strategy = new SmartNameStrategy(); final InstrumentedTimingCollector collector = new InstrumentedTimingCollector(registry, strategy); final StatementContext ctx = mock(StatementContext.class); collector.collect(TimeUnit.SECONDS.toNanos(2), ctx); final String name = strategy.getStatementName(ctx); final Timer timer = registry.timer(name); assertThat(name) .isEqualTo(name("sql", "empty")); assertThat(timer.getSnapshot().getMax()) .isEqualTo(2000000000); }
public String javaType(Schema schema) { return javaType(schema, true); }
@Test void javaTypeWithDateTimeTypes() throws Exception { SpecificCompiler compiler = createCompiler(); Schema dateSchema = LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT)); Schema timeSchema = LogicalTypes.timeMillis().addToSchema(Schema.create(Schema.Type.INT)); Schema timeMicrosSchema = LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG)); Schema timestampSchema = LogicalTypes.timestampMillis().addToSchema(Schema.create(Schema.Type.LONG)); Schema timestampMicrosSchema = LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)); Schema timestampNanosSchema = LogicalTypes.timestampNanos().addToSchema(Schema.create(Schema.Type.LONG)); // Date/time types should always use upper level java classes assertEquals("java.time.LocalDate", compiler.javaType(dateSchema), "Should use java.time.LocalDate for date type"); assertEquals("java.time.LocalTime", compiler.javaType(timeSchema), "Should use java.time.LocalTime for time-millis type"); assertEquals("java.time.Instant", compiler.javaType(timestampSchema), "Should use java.time.Instant for timestamp-millis type"); assertEquals("java.time.LocalTime", compiler.javaType(timeMicrosSchema), "Should use java.time.LocalTime for time-micros type"); assertEquals("java.time.Instant", compiler.javaType(timestampMicrosSchema), "Should use java.time.Instant for timestamp-micros type"); assertEquals("java.time.Instant", compiler.javaType(timestampNanosSchema), "Should use java.time.Instant for timestamp-nanos type"); }
public ParsedQuery parse(final String query) throws ParseException { final TokenCollectingQueryParser parser = new TokenCollectingQueryParser(ParsedTerm.DEFAULT_FIELD, ANALYZER); parser.setSplitOnWhitespace(true); parser.setAllowLeadingWildcard(allowLeadingWildcard); final Query parsed = parser.parse(query); final ParsedQuery.Builder builder = ParsedQuery.builder().query(query); builder.tokensBuilder().addAll(parser.getTokens()); final TermCollectingQueryVisitor visitor = new TermCollectingQueryVisitor(ANALYZER, parser.getTokenLookup()); parsed.visit(visitor); builder.termsBuilder().addAll(visitor.getParsedTerms()); return builder.build(); }
@Test void testMultilineQuery() throws ParseException { final ParsedQuery query = parser.parse("foo:bar AND\nlorem:ipsum"); assertThat(query.tokens()) .anySatisfy(token -> { assertThat(token.image()).isEqualTo("foo"); assertThat(token.beginLine()).isEqualTo(1); assertThat(token.beginColumn()).isEqualTo(0); assertThat(token.endLine()).isEqualTo(1); assertThat(token.endColumn()).isEqualTo(3); }) .anySatisfy(token -> { assertThat(token.image()).isEqualTo("lorem"); assertThat(token.beginLine()).isEqualTo(2); assertThat(token.beginColumn()).isEqualTo(0); assertThat(token.endLine()).isEqualTo(2); assertThat(token.endColumn()).isEqualTo(5); }) .anySatisfy(token -> { assertThat(token.image()).isEqualTo("ipsum"); assertThat(token.beginLine()).isEqualTo(2); assertThat(token.beginColumn()).isEqualTo(6); assertThat(token.endLine()).isEqualTo(2); assertThat(token.endColumn()).isEqualTo(11); }); }
public void clear() { mSize = 0; mElements.clear(); }
@Test void testClear() throws Exception { Stack<String> stack = new Stack<String>(); stack.push("one"); stack.push("two"); assertThat(stack.isEmpty(), is(false)); stack.clear(); assertThat(stack.isEmpty(), is(true)); }
@Override public PageResult<DictTypeDO> getDictTypePage(DictTypePageReqVO pageReqVO) { return dictTypeMapper.selectPage(pageReqVO); }
@Test public void testGetDictTypePage() { // mock 数据 DictTypeDO dbDictType = randomPojo(DictTypeDO.class, o -> { // 等会查询到 o.setName("yunai"); o.setType("芋艿"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCreateTime(buildTime(2021, 1, 15)); }); dictTypeMapper.insert(dbDictType); // 测试 name 不匹配 dictTypeMapper.insert(cloneIgnoreId(dbDictType, o -> o.setName("tudou"))); // 测试 type 不匹配 dictTypeMapper.insert(cloneIgnoreId(dbDictType, o -> o.setType("土豆"))); // 测试 status 不匹配 dictTypeMapper.insert(cloneIgnoreId(dbDictType, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 createTime 不匹配 dictTypeMapper.insert(cloneIgnoreId(dbDictType, o -> o.setCreateTime(buildTime(2021, 1, 1)))); // 准备参数 DictTypePageReqVO reqVO = new DictTypePageReqVO(); reqVO.setName("nai"); reqVO.setType("艿"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime(buildBetweenTime(2021, 1, 10, 2021, 1, 20)); // 调用 PageResult<DictTypeDO> pageResult = dictTypeService.getDictTypePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbDictType, pageResult.getList().get(0)); }
@Override public Map<SchemaTableName, List<ColumnMetadata>> listTableColumns(ConnectorSession session, SchemaTablePrefix prefix) { ImmutableMap.Builder<SchemaTableName, List<ColumnMetadata>> columns = ImmutableMap.builder(); List<SchemaTableName> tables; if (prefix.getSchemaName() != null && prefix.getTableName() != null) { tables = ImmutableList.of(new SchemaTableName(prefix.getSchemaName(), prefix.getTableName())); } else { tables = listTables(session, Optional.ofNullable(prefix.getSchemaName())); } for (SchemaTableName tableName : tables) { try { JdbcTableHandle tableHandle = jdbcMetadataCache.getTableHandle(session, tableName); if (tableHandle == null) { continue; } columns.put(tableName, getTableMetadata(session, tableHandle).getColumns()); } catch (TableNotFoundException e) { // table disappeared during listing operation } } return columns.build(); }
@Test public void testListTableColumns() { SchemaTableName tpchOrders = new SchemaTableName("tpch", "orders"); ImmutableList<ColumnMetadata> tpchOrdersColumnMetadata = ImmutableList.of( ColumnMetadata.builder().setName("orderkey").setType(BIGINT).setNullable(false).build(), ColumnMetadata.builder().setName("custkey").setType(BIGINT).setNullable(true).build()); SchemaTableName tpchLineItem = new SchemaTableName("tpch", "lineitem"); ImmutableList<ColumnMetadata> tpchLineItemColumnMetadata = ImmutableList.of( ColumnMetadata.builder().setName("orderkey").setType(BIGINT).setNullable(false).build(), ColumnMetadata.builder().setName("partkey").setType(BIGINT).setNullable(true).build()); //List columns for a given schema and table Map<SchemaTableName, List<ColumnMetadata>> tpchOrdersColumns = metadata.listTableColumns(SESSION, new SchemaTablePrefix("tpch", "orders")); assertThat(tpchOrdersColumns) .containsOnly( entry(tpchOrders, tpchOrdersColumnMetadata)); //List columns for a given schema Map<SchemaTableName, List<ColumnMetadata>> tpchColumns = metadata.listTableColumns(SESSION, new SchemaTablePrefix("tpch")); assertThat(tpchColumns) .containsOnly( entry(tpchOrders, tpchOrdersColumnMetadata), entry(tpchLineItem, tpchLineItemColumnMetadata)); }
public static PluginDefinition forPlugin(Class<? extends TsunamiPlugin> pluginClazz) { Optional<PluginInfo> pluginInfo = Optional.ofNullable(pluginClazz.getAnnotation(PluginInfo.class)); Optional<ForServiceName> targetServiceName = Optional.ofNullable(pluginClazz.getAnnotation(ForServiceName.class)); Optional<ForSoftware> targetSoftware = Optional.ofNullable(pluginClazz.getAnnotation(ForSoftware.class)); boolean isForWebService = pluginClazz.isAnnotationPresent(ForWebService.class); Optional<ForOperatingSystemClass> targetOperatingSystemClass = Optional.ofNullable(pluginClazz.getAnnotation(ForOperatingSystemClass.class)); checkState( pluginInfo.isPresent(), "A @PluginInfo annotation is required when creating a PluginDefinition for plugin: %s", pluginClazz); return new AutoValue_PluginDefinition( pluginInfo.get(), targetServiceName, targetSoftware, isForWebService, targetOperatingSystemClass); }
@Test public void forPlugin_whenPluginHasNoAnnotation_throwsException() { assertThrows( IllegalStateException.class, () -> PluginDefinition.forPlugin(NoAnnotationPlugin.class)); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Override public @Nullable <InputT> TransformEvaluator<InputT> forApplication( AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) throws IOException { return createEvaluator((AppliedPTransform) application); }
@Test public void boundedSourceEvaluatorDynamicSplitsUnsplittable() throws Exception { BoundedReadEvaluatorFactory factory = new BoundedReadEvaluatorFactory(context, options, 0L); PCollection<Long> read = p.apply(Read.from(SourceTestUtils.toUnsplittableSource(CountingSource.upTo(10L)))); SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReads(p); AppliedPTransform<?, ?, ?> transform = DirectGraphs.getProducer(read); when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle()); when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle()); Collection<CommittedBundle<?>> initialInputs = new BoundedReadEvaluatorFactory.InputProvider(context, options) .getInitialInputs(transform, 1); UncommittedBundle<Long> outputBundle = bundleFactory.createBundle(read); when(context.createBundle(read)).thenReturn(outputBundle); List<WindowedValue<?>> outputs = new ArrayList<>(); for (CommittedBundle<?> shardBundle : initialInputs) { TransformEvaluator<?> evaluator = factory.forApplication(transform, null); for (WindowedValue<?> shard : shardBundle.getElements()) { evaluator.processElement((WindowedValue) shard); } TransformResult<?> result = evaluator.finishBundle(); assertThat(result.getWatermarkHold(), equalTo(BoundedWindow.TIMESTAMP_MAX_VALUE)); assertThat( Iterables.size(result.getOutputBundles()), equalTo(Iterables.size(shardBundle.getElements()))); for (UncommittedBundle<?> output : result.getOutputBundles()) { CommittedBundle<?> committed = output.commit(BoundedWindow.TIMESTAMP_MAX_VALUE); for (WindowedValue<?> val : committed.getElements()) { outputs.add(val); } } } assertThat( outputs, containsInAnyOrder( gw(1L), gw(2L), gw(4L), gw(8L), gw(9L), gw(7L), gw(6L), gw(5L), gw(3L), gw(0L))); }
@Override public PMML_MODEL getPMMLModelType() { logger.trace("getPMMLModelType"); return PMML_MODEL.REGRESSION_MODEL; }
@Test void getPMMLModelType() { assertThat(PROVIDER.getPMMLModelType()).isEqualTo(PMML_MODEL.REGRESSION_MODEL); }
@VisibleForTesting public void validateTemplateParams(NotifyTemplateDO template, Map<String, Object> templateParams) { template.getParams().forEach(key -> { Object value = templateParams.get(key); if (value == null) { throw exception(NOTIFY_SEND_TEMPLATE_PARAM_MISS, key); } }); }
@Test public void testCheckTemplateParams_paramMiss() { // 准备参数 NotifyTemplateDO template = randomPojo(NotifyTemplateDO.class, o -> o.setParams(Lists.newArrayList("code"))); Map<String, Object> templateParams = new HashMap<>(); // mock 方法 // 调用,并断言异常 assertServiceException(() -> notifySendService.validateTemplateParams(template, templateParams), NOTIFY_SEND_TEMPLATE_PARAM_MISS, "code"); }
public boolean isReference(Object bean, String beanName) { for (RemotingParser remotingParser : allRemotingParsers) { if (remotingParser.isReference(bean, beanName)) { return true; } } return false; }
@Test public void testIsReferenceFail() { SimpleBean remoteBean = new SimpleBean(); assertFalse(remotingParser.isReference(remoteBean, remoteBean.getClass().getName())); }
public static void checkSingleChar(final Properties props, final String propKey, final MaskAlgorithm<?, ?> algorithm) { checkRequired(props, propKey, algorithm); ShardingSpherePreconditions.checkState(1 == props.getProperty(propKey).length(), () -> new AlgorithmInitializationException(algorithm, "%s's length must be one", propKey)); }
@Test void assertCheckSingleCharFailedWithoutKey() { Properties props = new Properties(); assertThrows(AlgorithmInitializationException.class, () -> MaskAlgorithmPropertiesChecker.checkSingleChar(props, "key", mock(MaskAlgorithm.class))); }
@VisibleForTesting UUID generatePhoneNumberIdentifierIfNotExists(final String phoneNumber) { final UpdateItemResponse response = SET_PNI_TIMER.record(() -> dynamoDbClient.updateItem(UpdateItemRequest.builder() .tableName(tableName) .key(Map.of(KEY_E164, AttributeValues.fromString(phoneNumber))) .updateExpression("SET #pni = if_not_exists(#pni, :pni)") .expressionAttributeNames(Map.of("#pni", ATTR_PHONE_NUMBER_IDENTIFIER)) .expressionAttributeValues(Map.of(":pni", AttributeValues.fromUUID(UUID.randomUUID()))) .returnValues(ReturnValue.ALL_NEW) .build())); return AttributeValues.getUUID(response.attributes(), ATTR_PHONE_NUMBER_IDENTIFIER, null); }
@Test void generatePhoneNumberIdentifierIfNotExists() { final String number = "+18005551234"; assertEquals(phoneNumberIdentifiers.generatePhoneNumberIdentifierIfNotExists(number), phoneNumberIdentifiers.generatePhoneNumberIdentifierIfNotExists(number)); }
@Override public String toString() { return pathString; }
@Test public void testToString() throws Exception { File someFile = tmpFolder.newFile("somefile"); LocalResourceId fileResource = LocalResourceId.fromPath(someFile.toPath(), /* isDirectory */ false); assertThat(fileResource.toString(), not(endsWith(File.separator))); assertThat(fileResource.toString(), containsString("somefile")); assertThat(fileResource.toString(), startsWith(tmpFolder.getRoot().getAbsolutePath())); LocalResourceId dirResource = LocalResourceId.fromPath(someFile.toPath(), /* isDirectory */ true); assertThat(dirResource.toString(), endsWith(File.separator)); assertThat(dirResource.toString(), containsString("somefile")); assertThat(dirResource.toString(), startsWith(tmpFolder.getRoot().getAbsolutePath())); }
public void process() throws Exception { if (_segmentMetadata.getTotalDocs() == 0) { LOGGER.info("Skip preprocessing empty segment: {}", _segmentMetadata.getName()); return; } // Segment processing has to be done with a local directory. File indexDir = new File(_indexDirURI); // This fixes the issue of temporary files not getting deleted after creating new inverted indexes. removeInvertedIndexTempFiles(indexDir); try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) { // Update default columns according to the schema. if (_schema != null) { DefaultColumnHandler defaultColumnHandler = DefaultColumnHandlerFactory .getDefaultColumnHandler(indexDir, _segmentMetadata, _indexLoadingConfig, _schema, segmentWriter); defaultColumnHandler.updateDefaultColumns(); _segmentMetadata = new SegmentMetadataImpl(indexDir); _segmentDirectory.reloadMetadata(); } else { LOGGER.warn("Skip creating default columns for segment: {} without schema", _segmentMetadata.getName()); } // Update single-column indices, like inverted index, json index etc. List<IndexHandler> indexHandlers = new ArrayList<>(); // We cannot just create all the index handlers in a random order. // Specifically, ForwardIndexHandler needs to be executed first. This is because it modifies the segment metadata // while rewriting forward index to create a dictionary. Some other handlers (like the range one) assume that // metadata was already been modified by ForwardIndexHandler. IndexHandler forwardHandler = createHandler(StandardIndexes.forward()); indexHandlers.add(forwardHandler); forwardHandler.updateIndices(segmentWriter); // Now that ForwardIndexHandler.updateIndices has been updated, we can run all other indexes in any order _segmentMetadata = new SegmentMetadataImpl(indexDir); _segmentDirectory.reloadMetadata(); for (IndexType<?, ?, ?> type : IndexService.getInstance().getAllIndexes()) { if (type != StandardIndexes.forward()) { IndexHandler handler = createHandler(type); indexHandlers.add(handler); handler.updateIndices(segmentWriter); // Other IndexHandler classes may modify the segment metadata while creating a temporary forward // index to generate their respective indexes from if the forward index was disabled. This new metadata is // needed to construct other indexes like RangeIndex. _segmentMetadata = _segmentDirectory.getSegmentMetadata(); } } // Perform post-cleanup operations on the index handlers. for (IndexHandler handler : indexHandlers) { handler.postUpdateIndicesCleanup(segmentWriter); } // Add min/max value to column metadata according to the prune mode. ColumnMinMaxValueGeneratorMode columnMinMaxValueGeneratorMode = _indexLoadingConfig.getColumnMinMaxValueGeneratorMode(); if (columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE) { ColumnMinMaxValueGenerator columnMinMaxValueGenerator = new ColumnMinMaxValueGenerator(_segmentMetadata, segmentWriter, columnMinMaxValueGeneratorMode); columnMinMaxValueGenerator.addColumnMinMaxValue(); // NOTE: This step may modify the segment metadata. When adding new steps after this, un-comment the next line. // _segmentMetadata = new SegmentMetadataImpl(indexDir); } segmentWriter.save(); } // Startree creation will load the segment again, so we need to close and re-open the segment writer to make sure // that the other required indices (e.g. forward index) are up-to-date. try (SegmentDirectory.Writer segmentWriter = _segmentDirectory.createWriter()) { // Create/modify/remove star-trees if required. processStarTrees(indexDir); _segmentDirectory.reloadMetadata(); segmentWriter.save(); } }
@Test public void testEnableFSTIndexOnExistingColumnRaw() throws Exception { Set<String> fstColumns = new HashSet<>(); fstColumns.add(EXISTING_STRING_COL_RAW); _indexLoadingConfig.setFSTIndexColumns(fstColumns); constructV3Segment(); SegmentDirectory segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader() .load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentPreProcessor v3Processor = new SegmentPreProcessor(segmentDirectory, _indexLoadingConfig, _newColumnsSchemaWithFST); expectThrows(UnsupportedOperationException.class, () -> v3Processor.process()); constructV1Segment(Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList()); segmentDirectory = SegmentDirectoryLoaderRegistry.getDefaultSegmentDirectoryLoader().load(_indexDir.toURI(), new SegmentDirectoryLoaderContext.Builder().setSegmentDirectoryConfigs(_configuration).build()); SegmentPreProcessor v1Processor = new SegmentPreProcessor(segmentDirectory, _indexLoadingConfig, _newColumnsSchemaWithFST); expectThrows(UnsupportedOperationException.class, () -> v1Processor.process()); }
public static <T extends Type> Type decodeIndexedValue( String rawInput, TypeReference<T> typeReference) { return decoder.decodeEventParameter(rawInput, typeReference); }
@Test public void testDecodeIndexedUint256Value() { Uint256 value = new Uint256(BigInteger.TEN); String encoded = TypeEncoder.encodeNumeric(value); assertEquals( FunctionReturnDecoder.decodeIndexedValue(encoded, new TypeReference<Uint256>() {}), (value)); }
public static long invokeAndReturnLong(Method method, Object targetObj) { try { return method != null ? (long) method.invoke(targetObj) : -1; } catch (Exception e) { return -1; } }
@Test void invokeAndReturnLong() { InternalMethod internalMethod = new InternalMethod(); assertEquals(100L, MethodUtil.invokeAndReturnLong(LONG_METHOD, internalMethod)); assertNotEquals(100L, MethodUtil.invokeAndReturnLong(DOUBLE_METHOD, internalMethod)); }
Optional<CNode> lookup(Topic topic) { INode inode = this.root; Token token = topic.headToken(); while (!topic.isEmpty()) { Optional<INode> child = inode.mainNode().childOf(token); if (!child.isPresent()) { break; } topic = topic.exceptHeadToken(); inode = child.get(); token = topic.headToken(); } if (inode == null || !topic.isEmpty()) { return Optional.empty(); } return Optional.of(inode.mainNode()); }
@Test public void testLookup() { final SubscriptionRequest existingSubscription = clientSubOnTopic("TempSensor1", "/temp"); sut.addToTree(existingSubscription); //Exercise final Optional<CNode> matchedNode = sut.lookup(asTopic("/humidity")); //Verify assertFalse(matchedNode.isPresent(), "Node on path /humidity can't be present"); }
@Override public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception { if (args.isEmpty()) { printHelp(out); return 0; } OutputStream output = out; if (args.size() > 1) { output = Util.fileOrStdout(args.get(args.size() - 1), out); args = args.subList(0, args.size() - 1); } DataFileWriter<GenericRecord> writer = new DataFileWriter<>(new GenericDatumWriter<>()); Schema schema = null; Map<String, byte[]> metadata = new TreeMap<>(); String inputCodec = null; for (String inFile : expandsInputFiles(args)) { InputStream input = Util.fileOrStdin(inFile, in); DataFileStream<GenericRecord> reader = new DataFileStream<>(input, new GenericDatumReader<>()); if (schema == null) { // this is the first file - set up the writer, and store the // Schema & metadata we'll use. schema = reader.getSchema(); for (String key : reader.getMetaKeys()) { if (!DataFileWriter.isReservedMeta(key)) { byte[] metadatum = reader.getMeta(key); metadata.put(key, metadatum); writer.setMeta(key, metadatum); } } inputCodec = reader.getMetaString(DataFileConstants.CODEC); if (inputCodec == null) { inputCodec = DataFileConstants.NULL_CODEC; } writer.setCodec(CodecFactory.fromString(inputCodec)); writer.create(schema, output); } else { // check that we're appending to the same schema & metadata. if (!schema.equals(reader.getSchema())) { err.println("input files have different schemas"); reader.close(); return 1; } for (String key : reader.getMetaKeys()) { if (!DataFileWriter.isReservedMeta(key)) { byte[] metadatum = reader.getMeta(key); byte[] writersMetadatum = metadata.get(key); if (!Arrays.equals(metadatum, writersMetadatum)) { err.println("input files have different non-reserved metadata"); reader.close(); return 2; } } } String thisCodec = reader.getMetaString(DataFileConstants.CODEC); if (thisCodec == null) { thisCodec = DataFileConstants.NULL_CODEC; } if (!inputCodec.equals(thisCodec)) { err.println("input files have different codecs"); reader.close(); return 3; } } writer.appendAllFrom(reader, /* recompress */ false); reader.close(); } writer.close(); return 0; }
@Test void differentCodecFail() throws Exception { Map<String, String> metadata = new HashMap<>(); metadata.put("myMetaKey", "myMetaValue"); File input1 = generateData(name.getMethodName() + "-1.avro", Type.STRING, metadata, DEFLATE); File input2 = generateData(name.getMethodName() + "-2.avro", Type.STRING, metadata, CodecFactory.nullCodec()); File output = new File(OUTPUT_DIR, name.getMethodName() + ".avro"); List<String> args = asList(input1.getAbsolutePath(), input2.getAbsolutePath(), output.getAbsolutePath()); int returnCode = new ConcatTool().run(System.in, System.out, System.err, args); assertEquals(3, returnCode); }
@Override public ResultSet getCrossReference(final String parentCatalog, final String parentSchema, final String parentTable, final String foreignCatalog, final String foreignSchema, final String foreignTable) throws SQLException { return createDatabaseMetaDataResultSet( getDatabaseMetaData().getCrossReference(getActualCatalog(parentCatalog), getActualSchema(parentSchema), parentTable, foreignCatalog, foreignSchema, foreignTable)); }
@Test void assertGetCrossReference() throws SQLException { when(databaseMetaData.getCrossReference("test", null, null, null, null, null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getCrossReference("test", null, null, null, null, null), instanceOf(DatabaseMetaDataResultSet.class)); }
public static CheckpointStorage load( @Nullable CheckpointStorage fromApplication, StateBackend configuredStateBackend, Configuration jobConfig, Configuration clusterConfig, ClassLoader classLoader, @Nullable Logger logger) throws IllegalConfigurationException, DynamicCodeLoadingException { Preconditions.checkNotNull(jobConfig, "jobConfig"); Preconditions.checkNotNull(clusterConfig, "clusterConfig"); Preconditions.checkNotNull(classLoader, "classLoader"); Preconditions.checkNotNull(configuredStateBackend, "statebackend"); // Job level config can override the cluster level config. Configuration mergedConfig = new Configuration(clusterConfig); mergedConfig.addAll(jobConfig); // Legacy state backends always take precedence for backwards compatibility. StateBackend rootStateBackend = (configuredStateBackend instanceof DelegatingStateBackend) ? ((DelegatingStateBackend) configuredStateBackend) .getDelegatedStateBackend() : configuredStateBackend; if (rootStateBackend instanceof CheckpointStorage) { if (logger != null) { logger.info( "Using legacy state backend {} as Job checkpoint storage", rootStateBackend); if (fromApplication != null) { logger.warn( "Checkpoint storage passed via StreamExecutionEnvironment is ignored because legacy state backend '{}' is used. {}", rootStateBackend.getClass().getName(), LEGACY_PRECEDENCE_LOG_MESSAGE); } if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) { logger.warn( "Config option '{}' is ignored because legacy state backend '{}' is used. {}", CheckpointingOptions.CHECKPOINT_STORAGE.key(), rootStateBackend.getClass().getName(), LEGACY_PRECEDENCE_LOG_MESSAGE); } } return (CheckpointStorage) rootStateBackend; } // In the FLINK-2.0, the checkpoint storage from application will not be supported // anymore. if (fromApplication != null) { if (fromApplication instanceof ConfigurableCheckpointStorage) { if (logger != null) { logger.info( "Using job/cluster config to configure application-defined checkpoint storage: {}", fromApplication); if (mergedConfig.get(CheckpointingOptions.CHECKPOINT_STORAGE) != null) { logger.warn( "Config option '{}' is ignored because the checkpoint storage passed via StreamExecutionEnvironment takes precedence.", CheckpointingOptions.CHECKPOINT_STORAGE.key()); } } return ((ConfigurableCheckpointStorage) fromApplication) // Use cluster config for backwards compatibility. .configure(clusterConfig, classLoader); } if (logger != null) { logger.info("Using application defined checkpoint storage: {}", fromApplication); } return fromApplication; } return fromConfig(mergedConfig, classLoader, logger) .orElseGet(() -> createDefaultCheckpointStorage(mergedConfig, classLoader, logger)); }
@Test void testLoadingFromFactory() throws Exception { final Configuration jobConfig = new Configuration(); final Configuration clusterConfig = new Configuration(); jobConfig.set(CheckpointingOptions.CHECKPOINT_STORAGE, WorkingFactory.class.getName()); clusterConfig.set(CheckpointingOptions.CHECKPOINT_STORAGE, "jobmanager"); CheckpointStorage storage1 = CheckpointStorageLoader.load( null, new ModernStateBackend(), jobConfig, clusterConfig, cl, LOG); assertThat(storage1).isInstanceOf(MockStorage.class); CheckpointStorage storage2 = CheckpointStorageLoader.load( null, new ModernStateBackend(), new Configuration(), clusterConfig, cl, LOG); assertThat(storage2).isInstanceOf(JobManagerCheckpointStorage.class); }
public Mono<Void> resetToLatest( KafkaCluster cluster, String group, String topic, Collection<Integer> partitions) { return checkGroupCondition(cluster, group) .flatMap(ac -> offsets(ac, topic, partitions, OffsetSpec.latest()) .flatMap(offsets -> resetOffsets(ac, group, offsets))); }
@Test void resetToLatest() { sendMsgsToPartition(Map.of(0, 10, 1, 10, 2, 10, 3, 10, 4, 10)); commit(Map.of(0, 5L, 1, 5L, 2, 5L)); offsetsResetService.resetToLatest(cluster, groupId, topic, List.of(0, 1)).block(); assertOffsets(Map.of(0, 10L, 1, 10L, 2, 5L)); commit(Map.of(0, 5L, 1, 5L, 2, 5L)); offsetsResetService.resetToLatest(cluster, groupId, topic, null).block(); assertOffsets(Map.of(0, 10L, 1, 10L, 2, 10L, 3, 10L, 4, 10L)); }
public static String dataToAvroSchemaJson(DataSchema dataSchema) { return dataToAvroSchemaJson(dataSchema, new DataToAvroSchemaTranslationOptions()); }
@Test(dataProvider = "toAvroSchemaDataTestTypeRefAnnotationPropagation") public void testToAvroSchemaTestTypeRefAnnotationPropagation(String schemaBeforeTranslation, String expectedAvroSchemaAsString) throws Exception { DataSchema schema = TestUtil.dataSchemaFromString(schemaBeforeTranslation); DataToAvroSchemaTranslationOptions transOptions = new DataToAvroSchemaTranslationOptions(OptionalDefaultMode.TRANSLATE_DEFAULT, JsonBuilder.Pretty.SPACES, EmbedSchemaMode.NONE); transOptions.setTyperefPropertiesExcludeSet(new HashSet<>(Arrays.asList("validate", "java"))); String avroSchemaText = SchemaTranslator.dataToAvroSchemaJson(schema, transOptions); DataMap avroSchemaAsDataMap = TestUtil.dataMapFromString(avroSchemaText); DataMap fieldsPropertiesMap = TestUtil.dataMapFromString(expectedAvroSchemaAsString); assertEquals(fieldsPropertiesMap, avroSchemaAsDataMap); }
public Map<String, Properties> getControllerConfig(final List<String> controllerServers, final long timeoutMillis) throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQClientException, UnsupportedEncodingException { List<String> invokeControllerServers = (controllerServers == null || controllerServers.isEmpty()) ? this.remotingClient.getNameServerAddressList() : controllerServers; if (invokeControllerServers == null || invokeControllerServers.isEmpty()) { return null; } RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_CONTROLLER_CONFIG, null); Map<String, Properties> configMap = new HashMap<>(4); for (String controller : invokeControllerServers) { RemotingCommand response = this.remotingClient.invokeSync(controller, request, timeoutMillis); assert response != null; if (ResponseCode.SUCCESS == response.getCode()) { configMap.put(controller, MixAll.string2Properties(new String(response.getBody(), MixAll.DEFAULT_CHARSET))); } else { throw new MQClientException(response.getCode(), response.getRemark()); } } return configMap; }
@Test public void assertGetControllerConfig() throws RemotingException, InterruptedException, UnsupportedEncodingException, MQClientException { mockInvokeSync(); setResponseBody("{\"key\":\"value\"}"); Map<String, Properties> actual = mqClientAPI.getControllerConfig(Collections.singletonList(defaultBrokerAddr), defaultTimeout); assertNotNull(actual); assertEquals(1L, actual.size()); }
@VisibleForTesting static FlinkSecurityManager fromConfiguration(Configuration configuration) { final ClusterOptions.UserSystemExitMode userSystemExitMode = configuration.get(ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT); boolean haltOnSystemExit = configuration.get(ClusterOptions.HALT_ON_FATAL_ERROR); // If no check is needed, return null so that caller can avoid setting security manager not // to incur any runtime cost. if (userSystemExitMode == ClusterOptions.UserSystemExitMode.DISABLED && !haltOnSystemExit) { return null; } LOG.info( "FlinkSecurityManager is created with {} user system exit mode and {} exit", userSystemExitMode, haltOnSystemExit ? "forceful" : "graceful"); // Add more configuration parameters that need user security manager (currently only for // system exit). return new FlinkSecurityManager(userSystemExitMode, haltOnSystemExit); }
@Test void testInvalidConfiguration() { assertThatThrownBy( () -> { Configuration configuration = new Configuration(); configuration.set(ClusterOptions.INTERCEPT_USER_SYSTEM_EXIT, null); FlinkSecurityManager.fromConfiguration(configuration); }) .isInstanceOf(NullPointerException.class); }
public synchronized ImmutableList<Struct> readTableRecords(String tableId, String... columnNames) throws IllegalStateException { return readTableRecords(tableId, ImmutableList.copyOf(columnNames)); }
@Test public void testReadRecordsWithListOfColumnNamesShouldWorkWhenSpannerReadSucceeds() throws ExecutionException, InterruptedException { // arrange prepareTable(); when(resultSet.next()).thenReturn(true).thenReturn(false); Struct struct = Struct.newBuilder() .set("SingerId") .to(int64(1)) .set("FirstName") .to(string("Marc")) .set("LastName") .to(string("Richards")) .build(); when(resultSet.getCurrentRowAsStruct()).thenReturn(struct); when(spanner.getDatabaseClient(any()).singleUse().read(any(), any(), any())) .thenReturn(resultSet); ImmutableList<String> columnNames = ImmutableList.of("SingerId", "FirstName", "LastName"); // act ImmutableList<Struct> actual = testManager.readTableRecords("Singers", columnNames); // assert ImmutableList<Struct> expected = ImmutableList.of(struct); assertThat(actual).containsExactlyElementsIn(expected); }
@Override public void close() { }
@Test public void shouldSucceed_gapDetectedRemote_retry() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true); AtomicReference<TestRemotePublisher> remotePublisher = new AtomicReference<>(); AtomicInteger remoteCount = new AtomicInteger(0); when(simpleKsqlClient.makeQueryRequestStreamed(any(), any(), any(), any())) .thenAnswer(a -> { remotePublisher.set(new TestRemotePublisher(context)); remoteCount.incrementAndGet(); final Map<String, ?> requestProperties = a.getArgument(3); String continuationToken = (String) requestProperties.get( KsqlRequestConfig.KSQL_REQUEST_QUERY_PUSH_CONTINUATION_TOKEN); if (remoteCount.get() == 1) { assertThat(continuationToken, nullValue()); } else if (remoteCount.get() == 2) { assertThat(continuationToken, notNullValue()); final PushOffsetRange range = PushOffsetRange.deserialize(continuationToken); assertThat(range.getEndOffsets().getDenseRepresentation(), is(ImmutableList.of(0L, 3L))); remotePublisher.get().accept(REMOTE_ROW2); } return createFuture(RestResponse.successful(200, remotePublisher.get())); }); // When: final PushConnectionsHandle handle = handlePushRouting(routing); final AtomicReference<Throwable> exception = new AtomicReference<>(null); handle.onException(exception::set); context.runOnContext(v -> { remotePublisher.get().accept(REMOTE_CONTINUATION_TOKEN1); remotePublisher.get().accept(REMOTE_ROW1); remotePublisher.get().accept(REMOTE_CONTINUATION_TOKEN_GAP); }); Set<List<?>> rows = waitOnRows(2); handle.close(); // Then: verify(simpleKsqlClient, times(2)).makeQueryRequestStreamed(any(), any(), any(), any()); assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true)); assertThat(rows.contains(REMOTE_ROW2.getRow().get().getColumns()), is(true)); }
@Override public boolean isSystemAdministrator() { return false; }
@Test public void isSystemAdministrator() { assertThat(githubWebhookUserSession.isSystemAdministrator()).isFalse(); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testTimerIdWithWrongType() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("TimerId"); thrown.expectMessage("TimerSpec"); thrown.expectMessage("bizzle"); thrown.expectMessage(not(mentionsState())); DoFnSignatures.getSignature( new DoFn<String, String>() { @TimerId("foo") private final String bizzle = "bazzle"; @ProcessElement public void foo(ProcessContext context) {} }.getClass()); }
public static <NodeT, EdgeT> void replaceDirectedNetworkNodes( MutableNetwork<NodeT, EdgeT> network, Function<NodeT, NodeT> function) { checkArgument(network.isDirected(), "Only directed networks are supported, given %s", network); checkArgument( !network.allowsSelfLoops(), "Only networks without self loops are supported, given %s", network); // A map from the existing node to the replacement node Map<NodeT, NodeT> oldNodesToNewNodes = new HashMap<>(network.nodes().size()); for (NodeT currentNode : network.nodes()) { NodeT newNode = function.apply(currentNode); // Skip updating the network if the old node is equivalent to the new node if (!currentNode.equals(newNode)) { oldNodesToNewNodes.put(currentNode, newNode); } } // For each replacement, connect up the existing predecessors and successors to the new node // and then remove the old node. for (Map.Entry<NodeT, NodeT> entry : oldNodesToNewNodes.entrySet()) { NodeT oldNode = entry.getKey(); NodeT newNode = entry.getValue(); network.addNode(newNode); for (NodeT predecessor : ImmutableSet.copyOf(network.predecessors(oldNode))) { for (EdgeT edge : ImmutableSet.copyOf(network.edgesConnecting(predecessor, oldNode))) { network.removeEdge(edge); network.addEdge(predecessor, newNode, edge); } } for (NodeT successor : ImmutableSet.copyOf(network.successors(oldNode))) { for (EdgeT edge : ImmutableSet.copyOf(network.edgesConnecting(oldNode, successor))) { network.removeEdge(edge); network.addEdge(newNode, successor, edge); } } network.removeNode(oldNode); } }
@Test public void testNodeReplacement() { Function<String, String> function = input -> { if ("E".equals(input) || "J".equals(input) || "M".equals(input) || "O".equals(input)) { return input.toLowerCase(); } checkArgument( !input.toLowerCase().equals(input), "All inputs should be in upper case, got %s. " + "This may indicate calling the function on multiple inputs", input); return input; }; MutableNetwork<String, String> network = createNetwork(); Networks.replaceDirectedNetworkNodes(network, function); MutableNetwork<String, String> originalNetwork = createNetwork(); for (String node : originalNetwork.nodes()) { assertEquals( originalNetwork.successors(node).stream() .map(function) .collect(Collectors.toCollection(HashSet::new)), network.successors(function.apply(node))); } assertEquals( network.nodes(), originalNetwork.nodes().stream() .map(function) .collect(Collectors.toCollection(HashSet::new))); }
@Override public int compareTo(final Key key) { return keyDefinition.compareTo(key.keyDefinition); }
@Test void testEqual() throws Exception { final Key a = new Key(KeyDefinition.newKeyDefinition().withId("id").build(), 2); final Key b = new Key(KeyDefinition.newKeyDefinition().withId("id").build(), 2); assertThat(a.compareTo(b)).isEqualTo(0); }
static boolean shouldStoreMessage(final Message message) { // XEP-0334: Implement the <no-store/> hint to override offline storage if (message.getChildElement("no-store", "urn:xmpp:hints") != null) { return false; } // OF-2083: Prevent storing offline message that is already stored if (message.getChildElement("offline", "http://jabber.org/protocol/offline") != null) { return false; } switch (message.getType()) { case chat: // XEP-0160: Messages with a 'type' attribute whose value is "chat" SHOULD be stored offline, with the exception of messages that contain only Chat State Notifications (XEP-0085) [7] content // Iterate through the child elements to see if we can find anything that's not a chat state notification or // real time text notification Iterator<?> it = message.getElement().elementIterator(); while (it.hasNext()) { Object item = it.next(); if (item instanceof Element) { Element el = (Element) item; if (Namespace.NO_NAMESPACE.equals(el.getNamespace())) { continue; } if (!el.getNamespaceURI().equals("http://jabber.org/protocol/chatstates") && !(el.getQName().equals(QName.get("rtt", "urn:xmpp:rtt:0"))) ) { return true; } } } return message.getBody() != null && !message.getBody().isEmpty(); case groupchat: case headline: // XEP-0160: "groupchat" message types SHOULD NOT be stored offline // XEP-0160: "headline" message types SHOULD NOT be stored offline return false; case error: // XEP-0160: "error" message types SHOULD NOT be stored offline, // although a server MAY store advanced message processing errors offline if (message.getChildElement("amp", "http://jabber.org/protocol/amp") == null) { return false; } break; default: // XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline. break; } return true; }
@Test public void shouldNotStoreEmptyChatMessagesWithOnlyChatStates() { Message message = new Message(); message.setType(Message.Type.chat); PacketExtension chatState = new PacketExtension("composing", "http://jabber.org/protocol/chatstates"); message.addExtension(chatState); assertFalse(OfflineMessageStore.shouldStoreMessage(message)); }
public static void copyBody(Message source, Message target) { // Preserve the DataType if both messages are DataTypeAware if (source.hasTrait(MessageTrait.DATA_AWARE)) { target.setBody(source.getBody()); target.setPayloadForTrait(MessageTrait.DATA_AWARE, source.getPayloadForTrait(MessageTrait.DATA_AWARE)); return; } target.setBody(source.getBody()); }
@Test void shouldCopyBodyIfTargetNotDataTypeAware() { Object body = new Object(); Message m1 = new MyMessageType(body); Message m2 = new DefaultMessage((Exchange) null); copyBody(m1, m2); assertSame(body, m2.getBody()); }
@Override public long size() { return size; }
@Test public void testSize() throws CMSMergeException { CountMinSketch sketch = new CountMinSketch(0.00001, 0.99999, 1); assertEquals(0, sketch.size(), 0); sketch.add(1, 11); sketch.add(2, 22); sketch.add(3, 33); long expectedSize = 11 + 22 + 33; assertEquals(expectedSize, sketch.size()); }
public byte[] toBytes() throws IOException { ByteArrayOutputStream out = new ByteArrayOutputStream(); out.write(getCommand()); ByteHelper.writeUnsignedIntLittleEndian(serverId, out); out.write((byte) reportHost.getBytes().length); ByteHelper.writeFixedLengthBytesFromStart(reportHost.getBytes(), reportHost.getBytes().length, out); out.write((byte) reportUser.getBytes().length); ByteHelper.writeFixedLengthBytesFromStart(reportUser.getBytes(), reportUser.getBytes().length, out); out.write((byte) reportPasswd.getBytes().length); ByteHelper.writeFixedLengthBytesFromStart(reportPasswd.getBytes(), reportPasswd.getBytes().length, out); ByteHelper.writeUnsignedShortLittleEndian(reportPort, out); ByteHelper.writeUnsignedIntLittleEndian(0, out);// Fake // rpl_recovery_rank ByteHelper.writeUnsignedIntLittleEndian(0, out);// master id return out.toByteArray(); }
@Test public void toBytesOutput27() throws IOException, InvocationTargetException { // Arrange final RegisterSlaveCommandPacket objectUnderTest = new RegisterSlaveCommandPacket(); objectUnderTest.serverId = 0L; objectUnderTest.reportPort = 0; objectUnderTest.reportPasswd = "foo"; objectUnderTest.reportHost = "foo"; objectUnderTest.reportUser = "foo"; objectUnderTest.setCommand((byte) 0); // Act final byte[] actual = objectUnderTest.toBytes(); // Assert result Assert.assertArrayEquals(new byte[] { (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 3, (byte) 102, (byte) 111, (byte) 111, (byte) 3, (byte) 102, (byte) 111, (byte) 111, (byte) 3, (byte) 102, (byte) 111, (byte) 111, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0, (byte) 0 }, actual); }
@Override public boolean archive(String gcsUrl, byte[] data) { BlobInfo blobInfo = parseBlobInfo(gcsUrl); if (data.length <= options.chunkUploadThresholdInBytes) { // Create the blob in one request. logger.atInfo().log("Archiving data to GCS at '%s' in one request.", gcsUrl); storage.create(blobInfo, data); return true; } // When content is large (1MB or more) it is recommended to write it in chunks via the blob's // channel writer. logger.atInfo().log( "Content is larger than threshold, archiving data to GCS at '%s' in chunks.", gcsUrl); try (WriteChannel writer = storage.writer(blobInfo)) { for (int chunkOffset = 0; chunkOffset < data.length; chunkOffset += options.chunkSizeInBytes) { int chunkSize = Math.min(data.length - chunkOffset, options.chunkSizeInBytes); writer.write(ByteBuffer.wrap(data, chunkOffset, chunkSize)); } return true; } catch (IOException e) { logger.atSevere().withCause(e).log("Unable to archving data to GCS at '%s'.", gcsUrl); return false; } }
@Test public void archive_withLargeSizeString_createsBlobWithWriter() throws IOException { options.chunkSizeInBytes = 8; options.chunkUploadThresholdInBytes = 16; doReturn(mockWriter) .when(mockStorage) .writer(eq(BlobInfo.newBuilder(BUCKET_ID, OBJECT_ID).build())); GoogleCloudStorageArchiver archiver = archiverFactory.create(mockStorage); String dataToArchive = "THIS IS A LONG DATA"; int numOfChunks = (int) Math.ceil((double) dataToArchive.length() / options.chunkSizeInBytes); boolean succeeded = archiver.archive(buildGcsUrl(BUCKET_ID, OBJECT_ID), dataToArchive); assertThat(succeeded).isTrue(); verify(mockWriter, times(numOfChunks)).write(byteBufferCaptor.capture()); assertThat(byteBufferCaptor.getAllValues()) .containsExactly( ByteBuffer.wrap(dataToArchive.getBytes(UTF_8), 0, 8), ByteBuffer.wrap(dataToArchive.getBytes(UTF_8), 8, 8), ByteBuffer.wrap(dataToArchive.getBytes(UTF_8), 16, 3)); }
public String generate(long id) { Date expirationDate = new Date(System.currentTimeMillis() + jwtProperties.getExpirationPeriodInMillis()); return JWT.create() .withClaim(CLAIM_ID, id) .withExpiresAt(expirationDate) .sign(Algorithm.HMAC256(jwtProperties.getSecretKey())); }
@DisplayName("참가자의 정보를 이용하여 jwt 토큰을 발행한다.") @Test void generate() { String token = jwtManager.generate(1L); long attendeeId = jwtManager.extract(token); assertThat(attendeeId).isEqualTo(1L); }
public Map<String, Long> getCodeAndVersion(String projectName, String processDefinitionName, String taskName) throws CodeGenerateUtils.CodeGenerateException { Project project = projectMapper.queryByName(projectName); Map<String, Long> result = new HashMap<>(); // project do not exists, mean task not exists too, so we should directly return init value if (project == null) { result.put("code", CodeGenerateUtils.getInstance().genCode()); result.put("version", 0L); return result; } ProcessDefinition processDefinition = processDefinitionMapper.queryByDefineName(project.getCode(), processDefinitionName); // In the case project exists, but current workflow still not created, we should also return the init // version of it if (processDefinition == null) { result.put("code", CodeGenerateUtils.getInstance().genCode()); result.put("version", 0L); return result; } TaskDefinition taskDefinition = taskDefinitionMapper.queryByName(project.getCode(), processDefinition.getCode(), taskName); if (taskDefinition == null) { result.put("code", CodeGenerateUtils.getInstance().genCode()); result.put("version", 0L); } else { result.put("code", taskDefinition.getCode()); result.put("version", (long) taskDefinition.getVersion()); } return result; }
@Test public void testGetCodeAndVersion() throws CodeGenerateUtils.CodeGenerateException { Project project = getTestProject(); Mockito.when(projectMapper.queryByName(project.getName())).thenReturn(project); ProcessDefinition processDefinition = getTestProcessDefinition(); Mockito.when(processDefinitionMapper.queryByDefineName(project.getCode(), processDefinition.getName())) .thenReturn(processDefinition); TaskDefinition taskDefinition = getTestTaskDefinition(); Mockito.when(taskDefinitionMapper.queryByName(project.getCode(), processDefinition.getCode(), taskDefinition.getName())).thenReturn(taskDefinition); Map<String, Long> result = pythonGateway.getCodeAndVersion(project.getName(), processDefinition.getName(), taskDefinition.getName()); Assertions.assertEquals(result.get("code").longValue(), taskDefinition.getCode()); }
public static String getMaskedStatement(final String query) { try { final ParseTree tree = DefaultKsqlParser.getParseTree(query); return new Visitor().visit(tree); } catch (final Exception | StackOverflowError e) { return fallbackMasking(query); } }
@Test public void shouldMaskMultipleValidStatements() { // Given: // Typo in "WITH" => "WIT" final String query = "CREATE SOURCE CONNECTOR test_connector WITH (" + " \"connector.class\" = 'PostgresSource', \n" + " 'connection.url' = 'jdbc:postgresql://localhost:5432/my.db',\n" + " `mode`='bulk',\n" + " \"topic.prefix\"='jdbc-',\n" + " \"table.whitelist\"='users',\n" + " \"key\"='username');\n" + "CREATE STREAM `stream` (id varchar) WITH ('format' = 'avro', \"kafka_topic\" = 'test_topic', partitions=3);"; // When final String maskedQuery = QueryMask.getMaskedStatement(query); // Then final String expected = "CREATE SOURCE CONNECTOR test_connector WITH " + "(\"connector.class\"='PostgresSource', " + "'connection.url'='[string]', " + "`mode`='[string]', " + "\"topic.prefix\"='[string]', " + "\"table.whitelist\"='[string]', " + "\"key\"='[string]');\n" + "CREATE STREAM `stream` (id varchar) WITH ('format' = 'avro', \"kafka_topic\" = 'test_topic', partitions=3);"; assertThat(maskedQuery, is(expected)); }
public static <T> Inner<T> create() { return new Inner<>(); }
@Test @Category(NeedsRunner.class) public void addNonNullableField() { Schema schema = Schema.builder().addStringField("field1").build(); thrown.expect(IllegalArgumentException.class); pipeline .apply(Create.of(Row.withSchema(schema).addValue("value").build()).withRowSchema(schema)) .apply(AddFields.<Row>create().field("field2", Schema.FieldType.INT32, null)); pipeline.run(); }
public boolean usesBuckets( @NonNull VFSConnectionDetails details ) throws KettleException { return details.hasBuckets() && getResolvedRootPath( details ) == null; }
@Test public void testUsesBucketsReturnsTrueIfHasBucketsAndNoRootPath() throws KettleException { when( vfsConnectionDetails.hasBuckets() ).thenReturn( true ); when( vfsConnectionDetails.getRootPath() ).thenReturn( null ); assertTrue( vfsConnectionManagerHelper.usesBuckets( vfsConnectionDetails ) ); }
@Override public void run() { try { final Set<String> distinctRecurringJobSignatures = getDistinctRecurringJobSignaturesThatDoNotExistAnymore(); final Set<String> distinctScheduledJobSignatures = getDistinctScheduledJobSignaturesThatDoNotExistAnymore(); Set<String> jobsThatCannotBeFound = asSet(distinctRecurringJobSignatures, distinctScheduledJobSignatures); if (!distinctRecurringJobSignatures.isEmpty() || !distinctScheduledJobSignatures.isEmpty()) { String jobStateThatIsNotFound = jobTypeNotFoundLabel(distinctRecurringJobSignatures, distinctScheduledJobSignatures); LOGGER.warn("JobRunr found {} jobs that do not exist anymore in your code. These jobs will fail with a JobNotFoundException (due to a ClassNotFoundException or a MethodNotFoundException)." + "\n\tBelow you can find the method signatures of the jobs that cannot be found anymore: {}", jobStateThatIsNotFound, jobsThatCannotBeFound.stream().map(sign -> "\n\t" + sign + ",").collect(Collectors.joining()) ); } } catch (Exception e) { LOGGER.error("Unexpected exception running `CheckIfAllJobsExistTask`", shouldNotHappenException(e)); } }
@Test void onRunItLogsAllRecurringJobsThatDoNotExist() { when(storageProvider.getRecurringJobs()).thenReturn(new RecurringJobsResult(asList( aDefaultRecurringJob().build(), aDefaultRecurringJob().withJobDetails(classThatDoesNotExistJobDetails()).build() ))); checkIfAllJobsExistTask.run(); assertThat(logger) .hasWarningMessageContaining("JobRunr found RECURRING jobs that do not exist anymore") .hasWarningMessageContaining("i.dont.exist.Class.notImportant(java.lang.Integer)") .hasNoErrorLogMessages(); }
public FEELFnResult<Boolean> invoke(@ParameterName( "point" ) Comparable point, @ParameterName( "range" ) Range range) { if ( point == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be null")); } if ( range == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "range", "cannot be null")); } try { boolean result = (range.getLowEndPoint().compareTo(point) < 0 && range.getHighEndPoint().compareTo(point) > 0) || (range.getLowEndPoint().compareTo(point) == 0 && range.getLowBoundary() == RangeBoundary.CLOSED) || (range.getHighEndPoint().compareTo(point) == 0 && range.getHighBoundary() == RangeBoundary.CLOSED); return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point", "cannot be compared to range")); } }
@Test void invokeParamSingleAndRange() { FunctionTestUtil.assertResult( duringFunction.invoke( "c", new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )), Boolean.TRUE ); FunctionTestUtil.assertResult( duringFunction.invoke( "f", new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED )), Boolean.TRUE ); FunctionTestUtil.assertResult( duringFunction.invoke( "a", new RangeImpl( Range.RangeBoundary.OPEN, "a", "f", Range.RangeBoundary.CLOSED )), Boolean.FALSE ); FunctionTestUtil.assertResult( duringFunction.invoke( "a", new RangeImpl( Range.RangeBoundary.CLOSED, "b", "f", Range.RangeBoundary.CLOSED )), Boolean.FALSE ); }
public static <T extends Comparable<T>> Filter<T> greaterThan(final T value) { return by((ProcessFunction<T, Boolean>) input -> input.compareTo(value) > 0) .described(String.format("x > %s", value)); }
@Test @Category(NeedsRunner.class) public void testFilterGreaterThan() { PCollection<Integer> output = p.apply(Create.of(1, 2, 3, 4, 5, 6, 7)).apply(Filter.greaterThan(4)); PAssert.that(output).containsInAnyOrder(5, 6, 7); p.run(); }
public static UAnnotation create(UTree<?> annotationType, List<UExpression> arguments) { return new AutoValue_UAnnotation(annotationType, ImmutableList.copyOf(arguments)); }
@Test public void equality() { new EqualsTester() .addEqualityGroup(UAnnotation.create(UClassIdent.create("java.lang.Override"))) .addEqualityGroup(UAnnotation.create(UClassIdent.create("java.lang.Deprecated"))) .addEqualityGroup( UAnnotation.create( UClassIdent.create("java.lang.SuppressWarnings"), ULiteral.stringLit("cast"))) .testEquals(); }
public static Type getParamType(Method method, int index) { Type[] types = getParamTypes(method); if (null != types && types.length > index) { return types[index]; } return null; }
@Test public void getParamTypeTest() { Method method = ReflectUtil.getMethod(TestClass.class, "intTest", Integer.class); Type type = TypeUtil.getParamType(method, 0); assertEquals(Integer.class, type); Type returnType = TypeUtil.getReturnType(method); assertEquals(Integer.class, returnType); }
@Override public void refresh() throws JournalException, JournalInconsistentException, InterruptedException { // 1. refresh current db names List<Long> dbNames = null; JournalException exception = null; for (int i = 0; i < RETRY_TIME; ++ i) { if (i != 0) { Thread.sleep(SLEEP_INTERVAL_SEC * 1000L); } try { dbNames = environment.getDatabaseNamesWithPrefix(prefix); break; } catch (DatabaseException e) { String errMsg = String.format("failed to get DB names for %s times!", i + 1); exception = wrapDatabaseException(e, errMsg); } } if (dbNames == null) { if (exception != null) { throw exception; } else { throw new JournalException("failed to get db names!"); } } // 2. no db changed ( roll new db / delete db after checkpoint ) if (dbNames.equals(localDBNames)) { return; } // 3. update db index LOG.info("update dbNames {} -> {}", localDBNames, dbNames); localDBNames = dbNames; calculateNextDbIndex(); }
@Test(expected = JournalException.class) public void refreshFailed(@Mocked BDBEnvironment environment) throws Exception { new Expectations(environment) { { environment.getDatabaseNamesWithPrefix(""); result = new DatabaseNotFoundException("mock mock"); } }; BDBJEJournal journal = new BDBJEJournal(environment); JournalCursor cursor = journal.read(10, 10); cursor.refresh(); }
@Override public Object decorate(RequestedField field, Object value, SearchUser searchUser) { final List<String> ids = parseIDs(value); final EntityTitleRequest req = ids.stream() .map(id -> new EntityIdentifier(id, FIELD_ENTITY_MAPPER.get(field.name()))) .collect(Collectors.collectingAndThen(Collectors.toList(), EntityTitleRequest::new)); final EntitiesTitleResponse response = entityTitleService.getTitles(req, searchUser); return extractTitles(ids, response.entities()).stream() .collect(Collectors.collectingAndThen(Collectors.toList(), titles -> value instanceof Collection<?> ? titles : unwrapIfSingleResult(titles))); }
@Test void testMixedPermittedAndNotPermitted() { final EntitiesTitleResponse response = new EntitiesTitleResponse(Collections.singleton(new EntityTitleResponse("123", "streams", "My stream")), Collections.singleton("456")); final FieldDecorator decorator = new TitleDecorator((request, permissions) -> response); final Object result = decorator.decorate(RequestedField.parse("streams"), Arrays.asList("123", "456"), TestSearchUser.builder().build()); Assertions.assertThat(result) .isInstanceOf(List.class) .asList() .hasSize(2) .contains("My stream") .contains("456"); }
@Override public Iterator<IndexKeyEntries> getSqlRecordIteratorBatch(@Nonnull Comparable value, boolean descending) { return getSqlRecordIteratorBatch(value, descending, null); }
@Test public void getRecordAllAscending() { var expectedOrder = List.of(0, 3, 6, 1, 4, 7, 2, 5, 8); var actual = store.getSqlRecordIteratorBatch(false); assertResult(expectedOrder, actual); }
public int getIndex(String name) { for (int i = 0; i < this.size(); i++) { if (get(i).getName().equals(name)) { return i; } } throw new IllegalArgumentException("param '" + name + "' not found"); }
@Test public void getIndex() { paramsConfig = new ParamsConfig(); ParamConfig one = new ParamConfig("name", "value"); paramsConfig.add(one); ParamConfig two = new ParamConfig("other", "other-value"); paramsConfig.add(two); assertThat(paramsConfig.getIndex("other")).isEqualTo((1)); assertThat(paramsConfig.getIndex("name")).isEqualTo((0)); }
@Override public double score(int[] truth, int[] prediction) { return of(truth, prediction, beta, strategy); }
@Test public void testWeighted() { System.out.println("Weighted-FScore"); int[] truth = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 }; int[] prediction = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 2, 2, 3, 1, 3, 3, 3, 4, 5, 4, 4, 4, 4, 1, 5, 5 }; FScore instance = new FScore(1.0, Averaging.Weighted); double expResult = 0.8907; double result = instance.score(truth, prediction); assertEquals(expResult, result, 1E-4); }
public void setServerContainer(@Nullable final ServerContainer serverContainer) { this.serverContainer = serverContainer; }
@Test public void initServletContextContainerNullTest() { exporter.setServerContainer(null); }
public static Impl join(By clause) { return new Impl(new JoinArguments(clause)); }
@Test @Category(NeedsRunner.class) public void testCoGroupByDifferentFields() { // Inputs. PCollection<Row> pc1 = pipeline .apply( "Create1", Create.of( Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 2, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 5, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 6, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 7, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 8, "ar").build())) .setRowSchema(CG_SCHEMA_1); PCollection<Row> pc2 = pipeline .apply( "Create2", Create.of( Row.withSchema(CG_SCHEMA_2).addValues("user1", 9, "us").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 10, "us").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 11, "il").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 12, "il").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 13, "fr").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 14, "fr").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 15, "ar").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 16, "ar").build())) .setRowSchema(CG_SCHEMA_2); PCollection<Row> pc3 = pipeline .apply( "Create3", Create.of( Row.withSchema(CG_SCHEMA_3).addValues("user1", 17, "us").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 18, "us").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 19, "il").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 20, "il").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 21, "fr").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 22, "fr").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 23, "ar").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 24, "ar").build())) .setRowSchema(CG_SCHEMA_3); // Expected outputs Schema expectedSchema = Schema.builder() .addRowField("key", SIMPLE_CG_KEY_SCHEMA) .addIterableField("pc1", FieldType.row(CG_SCHEMA_1)) .addIterableField("pc2", FieldType.row(CG_SCHEMA_2)) .addIterableField("pc3", FieldType.row(CG_SCHEMA_3)) .build(); Row key1Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user1", "us").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 1, "us").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 2, "us").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_2).addValues("user1", 9, "us").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 10, "us").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_3).addValues("user1", 17, "us").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 18, "us").build())) .build(); Row key2Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user1", "il").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user1", 3, "il").build(), Row.withSchema(CG_SCHEMA_1).addValues("user1", 4, "il").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_2).addValues("user1", 11, "il").build(), Row.withSchema(CG_SCHEMA_2).addValues("user1", 12, "il").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_3).addValues("user1", 19, "il").build(), Row.withSchema(CG_SCHEMA_3).addValues("user1", 20, "il").build())) .build(); Row key3Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user2", "fr").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 5, "fr").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 6, "fr").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_2).addValues("user2", 13, "fr").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 14, "fr").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_3).addValues("user2", 21, "fr").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 22, "fr").build())) .build(); Row key4Joined = Row.withSchema(expectedSchema) .addValue(Row.withSchema(SIMPLE_CG_KEY_SCHEMA).addValues("user2", "ar").build()) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_1).addValues("user2", 7, "ar").build(), Row.withSchema(CG_SCHEMA_1).addValues("user2", 8, "ar").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_2).addValues("user2", 15, "ar").build(), Row.withSchema(CG_SCHEMA_2).addValues("user2", 16, "ar").build())) .addIterable( Lists.newArrayList( Row.withSchema(CG_SCHEMA_3).addValues("user2", 23, "ar").build(), Row.withSchema(CG_SCHEMA_3).addValues("user2", 24, "ar").build())) .build(); PCollection<Row> joined1 = PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3) .apply( "CoGroup1", CoGroup.join("pc1", By.fieldNames("user", "country")) .join("pc2", By.fieldNames("user2", "country2")) .join("pc3", By.fieldNames("user3", "country3"))); PCollection<Row> joined2 = PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3) .apply( "CoGroup2", CoGroup.join("pc1", By.fieldNames("user", "country")) .join("pc2", By.fieldNames("user2", "country2").withSideInput()) .join("pc3", By.fieldNames("user3", "country3"))); PCollection<Row> joined3 = PCollectionTuple.of("pc1", pc1, "pc2", pc2, "pc3", pc3) .apply( "CoGroup3", CoGroup.join("pc1", By.fieldNames("user", "country")) .join("pc2", By.fieldNames("user2", "country2").withSideInput()) .join("pc3", By.fieldNames("user3", "country3").withSideInput())); List<Row> expected = ImmutableList.of(key1Joined, key2Joined, key3Joined, key4Joined); PAssert.that(joined1).satisfies(actual -> containsJoinedFields(expected, actual)); PAssert.that(joined2).satisfies(actual -> containsJoinedFields(expected, actual)); PAssert.that(joined3).satisfies(actual -> containsJoinedFields(expected, actual)); pipeline.run(); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(file.getType().contains(Path.Type.upload)) { // Pending large file upload final Write.Append append = new B2LargeUploadService(session, fileid, new B2WriteFeature(session, fileid)).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { try { final B2BucketResponse info = session.getClient().listBucket(file.getName()); if(null == info) { throw new NotfoundException(file.getAbsolute()); } return this.toAttributes(info); } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map(e); } } else { final String id = fileid.getVersionId(file); if(null == id) { return PathAttributes.EMPTY; } B2FileResponse response; try { response = this.findFileInfo(file, id); } catch(NotfoundException e) { // Try with reset cache after failure finding node id response = this.findFileInfo(file, fileid.getVersionId(file)); } final PathAttributes attr = this.toAttributes(response); if(attr.isDuplicate()) { // Throw failure if latest version has hide marker set and lookup was without explicit version if(StringUtils.isBlank(file.attributes().getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Latest version of %s is duplicate", file)); } throw new NotfoundException(file.getAbsolute()); } } return attr; } }
@Test public void testFindLargeUpload() throws Exception { final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file, Path.Type.upload)); final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final B2StartLargeFileResponse startResponse = session.getClient().startLargeFileUpload( fileid.getVersionId(bucket), file.getName(), null, Collections.emptyMap()); final PathAttributes attributes = new B2AttributesFinderFeature(session, fileid).find(file); assertNotSame(PathAttributes.EMPTY, attributes); assertEquals(0L, attributes.getSize()); new B2ReadFeature(session, fileid).read(file, new TransferStatus(), new DisabledConnectionCallback()).close(); final Path found = new B2ObjectListService(session, fileid).list(bucket, new DisabledListProgressListener()).find( new SimplePathPredicate(file)); assertTrue(found.getType().contains(Path.Type.upload)); new B2ReadFeature(session, fileid).read(found, new TransferStatus(), new DisabledConnectionCallback()).close(); session.getClient().cancelLargeFileUpload(startResponse.getFileId()); }
@Override public void subscribe(final Subscriber<? super Row> subscriber) { if (polling) { throw new IllegalStateException("Cannot set subscriber if polling"); } synchronized (this) { subscribing = true; super.subscribe(subscriber); } }
@Test public void shouldNotSubscribeIfFailed() throws Exception { // Given handleQueryResultError(); CountDownLatch latch = new CountDownLatch(1); context.runOnContext(v -> { // When / Then final Exception e = assertThrows(IllegalStateException.class, () -> queryResult.subscribe(subscriber)); assertThat(e.getMessage(), containsString("Cannot subscribe to failed publisher")); latch.countDown(); }); awaitLatch(latch); }
public static SubqueryTableSegment bind(final SubqueryTableSegment segment, final SQLStatementBinderContext binderContext, final Map<String, TableSegmentBinderContext> tableBinderContexts, final Map<String, TableSegmentBinderContext> outerTableBinderContexts) { fillPivotColumnNamesInBinderContext(segment, binderContext); SQLStatementBinderContext subqueryBinderContext = new SQLStatementBinderContext(segment.getSubquery().getSelect(), binderContext.getMetaData(), binderContext.getCurrentDatabaseName()); subqueryBinderContext.getExternalTableBinderContexts().putAll(binderContext.getExternalTableBinderContexts()); SelectStatement boundSubSelect = new SelectStatementBinder(outerTableBinderContexts).bind(segment.getSubquery().getSelect(), subqueryBinderContext); SubquerySegment boundSubquerySegment = new SubquerySegment(segment.getSubquery().getStartIndex(), segment.getSubquery().getStopIndex(), boundSubSelect, segment.getSubquery().getText()); boundSubquerySegment.setSubqueryType(segment.getSubquery().getSubqueryType()); IdentifierValue subqueryTableName = segment.getAliasSegment().map(AliasSegment::getIdentifier).orElseGet(() -> new IdentifierValue("")); SubqueryTableSegment result = new SubqueryTableSegment(segment.getStartIndex(), segment.getStopIndex(), boundSubquerySegment); segment.getAliasSegment().ifPresent(result::setAlias); tableBinderContexts.put(subqueryTableName.getValue().toLowerCase(), new SimpleTableSegmentBinderContext( SubqueryTableBindUtils.createSubqueryProjections(boundSubSelect.getProjections().getProjections(), subqueryTableName, binderContext.getDatabaseType()))); return result; }
@Test void assertBindWithSubqueryProjectionAlias() { MySQLSelectStatement selectStatement = mock(MySQLSelectStatement.class); when(selectStatement.getDatabaseType()).thenReturn(databaseType); when(selectStatement.getFrom()).thenReturn(Optional.of(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("t_order"))))); ProjectionsSegment projectionsSegment = new ProjectionsSegment(0, 0); ColumnProjectionSegment columnProjectionSegment = new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("order_id"))); columnProjectionSegment.setAlias(new AliasSegment(0, 0, new IdentifierValue("order_id_alias"))); projectionsSegment.getProjections().add(columnProjectionSegment); when(selectStatement.getProjections()).thenReturn(projectionsSegment); SubqueryTableSegment subqueryTableSegment = new SubqueryTableSegment(0, 0, new SubquerySegment(0, 0, selectStatement, "")); subqueryTableSegment.setAlias(new AliasSegment(0, 0, new IdentifierValue("temp"))); ShardingSphereMetaData metaData = createMetaData(); Map<String, TableSegmentBinderContext> tableBinderContexts = new LinkedHashMap<>(); SubqueryTableSegment actual = SubqueryTableSegmentBinder.bind(subqueryTableSegment, new SQLStatementBinderContext(metaData, DefaultDatabase.LOGIC_NAME, databaseType, Collections.emptySet()), tableBinderContexts, Collections.emptyMap()); assertTrue(actual.getAlias().isPresent()); assertTrue(tableBinderContexts.containsKey("temp")); List<ProjectionSegment> projectionSegments = new ArrayList<>(tableBinderContexts.get("temp").getProjectionSegments()); assertThat(projectionSegments.size(), is(1)); assertThat(projectionSegments.get(0), instanceOf(ColumnProjectionSegment.class)); assertTrue(((ColumnProjectionSegment) projectionSegments.get(0)).getColumn().getOwner().isPresent()); assertThat(((ColumnProjectionSegment) projectionSegments.get(0)).getColumn().getOwner().get().getIdentifier().getValue(), is("temp")); assertThat(((ColumnProjectionSegment) projectionSegments.get(0)).getColumn().getIdentifier().getValue(), is("order_id_alias")); }
public static JsonXStream getInstance() { return s_instance; }
@Test(expected=ForbiddenClassException.class, timeout=5000) public void testVoidElementUnmarshalling() throws Exception { XStream xstream = JsonXStream.getInstance(); xstream.fromXML("{'void':null}"); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() == ChatMessageType.GAMEMESSAGE || event.getType() == ChatMessageType.SPAM) { String message = Text.removeTags(event.getMessage()); Matcher dodgyCheckMatcher = DODGY_CHECK_PATTERN.matcher(message); Matcher dodgyProtectMatcher = DODGY_PROTECT_PATTERN.matcher(message); Matcher dodgyBreakMatcher = DODGY_BREAK_PATTERN.matcher(message); Matcher bindingNecklaceCheckMatcher = BINDING_CHECK_PATTERN.matcher(message); Matcher bindingNecklaceUsedMatcher = BINDING_USED_PATTERN.matcher(message); Matcher ringOfForgingCheckMatcher = RING_OF_FORGING_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryCheckMatcher = AMULET_OF_CHEMISTRY_CHECK_PATTERN.matcher(message); Matcher amuletOfChemistryUsedMatcher = AMULET_OF_CHEMISTRY_USED_PATTERN.matcher(message); Matcher amuletOfChemistryBreakMatcher = AMULET_OF_CHEMISTRY_BREAK_PATTERN.matcher(message); Matcher amuletOfBountyCheckMatcher = AMULET_OF_BOUNTY_CHECK_PATTERN.matcher(message); Matcher amuletOfBountyUsedMatcher = AMULET_OF_BOUNTY_USED_PATTERN.matcher(message); Matcher chronicleAddMatcher = CHRONICLE_ADD_PATTERN.matcher(message); Matcher chronicleUseAndCheckMatcher = CHRONICLE_USE_AND_CHECK_PATTERN.matcher(message); Matcher slaughterActivateMatcher = BRACELET_OF_SLAUGHTER_ACTIVATE_PATTERN.matcher(message); Matcher slaughterCheckMatcher = BRACELET_OF_SLAUGHTER_CHECK_PATTERN.matcher(message); Matcher expeditiousActivateMatcher = EXPEDITIOUS_BRACELET_ACTIVATE_PATTERN.matcher(message); Matcher expeditiousCheckMatcher = EXPEDITIOUS_BRACELET_CHECK_PATTERN.matcher(message); Matcher bloodEssenceCheckMatcher = BLOOD_ESSENCE_CHECK_PATTERN.matcher(message); Matcher bloodEssenceExtractMatcher = BLOOD_ESSENCE_EXTRACT_PATTERN.matcher(message); Matcher braceletOfClayCheckMatcher = BRACELET_OF_CLAY_CHECK_PATTERN.matcher(message); if (message.contains(RING_OF_RECOIL_BREAK_MESSAGE)) { notifier.notify(config.recoilNotification(), "Your Ring of Recoil has shattered"); } else if (dodgyBreakMatcher.find()) { notifier.notify(config.dodgyNotification(), "Your dodgy necklace has crumbled to dust."); updateDodgyNecklaceCharges(MAX_DODGY_CHARGES); } else if (dodgyCheckMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyCheckMatcher.group(1))); } else if (dodgyProtectMatcher.find()) { updateDodgyNecklaceCharges(Integer.parseInt(dodgyProtectMatcher.group(1))); } else if (amuletOfChemistryCheckMatcher.find()) { updateAmuletOfChemistryCharges(Integer.parseInt(amuletOfChemistryCheckMatcher.group(1))); } else if (amuletOfChemistryUsedMatcher.find()) { final String match = amuletOfChemistryUsedMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateAmuletOfChemistryCharges(charges); } else if (amuletOfChemistryBreakMatcher.find()) { notifier.notify(config.amuletOfChemistryNotification(), "Your amulet of chemistry has crumbled to dust."); updateAmuletOfChemistryCharges(MAX_AMULET_OF_CHEMISTRY_CHARGES); } else if (amuletOfBountyCheckMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyCheckMatcher.group(1))); } else if (amuletOfBountyUsedMatcher.find()) { updateAmuletOfBountyCharges(Integer.parseInt(amuletOfBountyUsedMatcher.group(1))); } else if (message.equals(AMULET_OF_BOUNTY_BREAK_TEXT)) { updateAmuletOfBountyCharges(MAX_AMULET_OF_BOUNTY_CHARGES); } else if (message.contains(BINDING_BREAK_TEXT)) { notifier.notify(config.bindingNotification(), BINDING_BREAK_TEXT); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateBindingNecklaceCharges(MAX_BINDING_CHARGES + 1); } else if (bindingNecklaceUsedMatcher.find()) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); if (equipment.contains(ItemID.BINDING_NECKLACE)) { updateBindingNecklaceCharges(getItemCharges(ItemChargeConfig.KEY_BINDING_NECKLACE) - 1); } } else if (bindingNecklaceCheckMatcher.find()) { final String match = bindingNecklaceCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateBindingNecklaceCharges(charges); } else if (ringOfForgingCheckMatcher.find()) { final String match = ringOfForgingCheckMatcher.group(1); int charges = 1; if (!match.equals("one")) { charges = Integer.parseInt(match); } updateRingOfForgingCharges(charges); } else if (message.equals(RING_OF_FORGING_USED_TEXT) || message.equals(RING_OF_FORGING_VARROCK_PLATEBODY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player smelted with a Ring of Forging equipped. if (equipment == null) { return; } if (equipment.contains(ItemID.RING_OF_FORGING) && (message.equals(RING_OF_FORGING_USED_TEXT) || inventory.count(ItemID.IRON_ORE) > 1)) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_RING_OF_FORGING) - 1, 0, MAX_RING_OF_FORGING_CHARGES); updateRingOfForgingCharges(charges); } } else if (message.equals(RING_OF_FORGING_BREAK_TEXT)) { notifier.notify(config.ringOfForgingNotification(), "Your ring of forging has melted."); // This chat message triggers before the used message so add 1 to the max charges to ensure proper sync updateRingOfForgingCharges(MAX_RING_OF_FORGING_CHARGES + 1); } else if (chronicleAddMatcher.find()) { final String match = chronicleAddMatcher.group(1); if (match.equals("one")) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(match)); } } else if (chronicleUseAndCheckMatcher.find()) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, Integer.parseInt(chronicleUseAndCheckMatcher.group(1))); } else if (message.equals(CHRONICLE_ONE_CHARGE_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1); } else if (message.equals(CHRONICLE_EMPTY_TEXT) || message.equals(CHRONICLE_NO_CHARGES_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 0); } else if (message.equals(CHRONICLE_FULL_TEXT)) { setItemCharges(ItemChargeConfig.KEY_CHRONICLE, 1000); } else if (slaughterActivateMatcher.find()) { final String found = slaughterActivateMatcher.group(1); if (found == null) { updateBraceletOfSlaughterCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.slaughterNotification(), BRACELET_OF_SLAUGHTER_BREAK_TEXT); } else { updateBraceletOfSlaughterCharges(Integer.parseInt(found)); } } else if (slaughterCheckMatcher.find()) { updateBraceletOfSlaughterCharges(Integer.parseInt(slaughterCheckMatcher.group(1))); } else if (expeditiousActivateMatcher.find()) { final String found = expeditiousActivateMatcher.group(1); if (found == null) { updateExpeditiousBraceletCharges(MAX_SLAYER_BRACELET_CHARGES); notifier.notify(config.expeditiousNotification(), EXPEDITIOUS_BRACELET_BREAK_TEXT); } else { updateExpeditiousBraceletCharges(Integer.parseInt(found)); } } else if (expeditiousCheckMatcher.find()) { updateExpeditiousBraceletCharges(Integer.parseInt(expeditiousCheckMatcher.group(1))); } else if (bloodEssenceCheckMatcher.find()) { updateBloodEssenceCharges(Integer.parseInt(bloodEssenceCheckMatcher.group(1))); } else if (bloodEssenceExtractMatcher.find()) { updateBloodEssenceCharges(getItemCharges(ItemChargeConfig.KEY_BLOOD_ESSENCE) - Integer.parseInt(bloodEssenceExtractMatcher.group(1))); } else if (message.contains(BLOOD_ESSENCE_ACTIVATE_TEXT)) { updateBloodEssenceCharges(MAX_BLOOD_ESSENCE_CHARGES); } else if (braceletOfClayCheckMatcher.find()) { updateBraceletOfClayCharges(Integer.parseInt(braceletOfClayCheckMatcher.group(1))); } else if (message.equals(BRACELET_OF_CLAY_USE_TEXT) || message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN)) { final ItemContainer equipment = client.getItemContainer(InventoryID.EQUIPMENT); // Determine if the player mined with a Bracelet of Clay equipped. if (equipment != null && equipment.contains(ItemID.BRACELET_OF_CLAY)) { final ItemContainer inventory = client.getItemContainer(InventoryID.INVENTORY); // Charge is not used if only 1 inventory slot is available when mining in Prifddinas boolean ignore = inventory != null && inventory.count() == 27 && message.equals(BRACELET_OF_CLAY_USE_TEXT_TRAHAEARN); if (!ignore) { int charges = Ints.constrainToRange(getItemCharges(ItemChargeConfig.KEY_BRACELET_OF_CLAY) - 1, 0, MAX_BRACELET_OF_CLAY_CHARGES); updateBraceletOfClayCharges(charges); } } } else if (message.equals(BRACELET_OF_CLAY_BREAK_TEXT)) { notifier.notify(config.braceletOfClayNotification(), "Your bracelet of clay has crumbled to dust"); updateBraceletOfClayCharges(MAX_BRACELET_OF_CLAY_CHARGES); } } }
@Test public void testChronicleTeleportFail() { ChatMessage chatMessage = new ChatMessage(null, ChatMessageType.GAMEMESSAGE, "", CHRONICLE_TELEPORT_FAIL, "", 0); itemChargePlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration(ItemChargeConfig.GROUP, ItemChargeConfig.KEY_CHRONICLE, 0); }
public static String normalizeUri(String uri) throws URISyntaxException { // try to parse using the simpler and faster Camel URI parser String[] parts = CamelURIParser.fastParseUri(uri); if (parts != null) { // we optimized specially if an empty array is returned if (parts == URI_ALREADY_NORMALIZED) { return uri; } // use the faster and more simple normalizer return doFastNormalizeUri(parts); } else { // use the legacy normalizer as the uri is complex and may have unsafe URL characters return doComplexNormalizeUri(uri); } }
@Test public void testNormalizeHttpEndpointURLEncodedParameter() throws Exception { String out = URISupport.normalizeUri("http://www.google.com?q=S%C3%B8ren%20Hansen"); assertEquals("http://www.google.com?q=S%C3%B8ren+Hansen", out); }
public final void containsAnyOf( @Nullable Object first, @Nullable Object second, @Nullable Object @Nullable ... rest) { containsAnyIn(accumulate(first, second, rest)); }
@Test public void iterableContainsAnyOfFailsWithSameToStringAndNullInExpectation() { expectFailureWhenTestingThat(asList("null", "abc")).containsAnyOf("def", null); assertFailureKeys( "expected to contain any of", "but did not", "though it did contain", "full contents"); assertFailureValue("expected to contain any of", "[def (java.lang.String), null (null type)]"); assertFailureValue("though it did contain", "[null] (java.lang.String)"); assertFailureValue("full contents", "[null, abc]"); }
public List<String> getDispatcherKeysForProperty(String propertyKey, @Nullable String propertyValue) { ImmutableList.Builder<String> keys = ImmutableList.builder(); for (NotificationDispatcherMetadata metadata : dispatchersMetadata) { String dispatcherKey = metadata.getDispatcherKey(); String value = metadata.getProperty(propertyKey); if (value != null && (propertyValue == null || value.equals(propertyValue))) { keys.add(dispatcherKey); } } return keys.build(); }
@Test public void shouldReturnDispatcherKeysForExistenceOfProperty() { assertThat(underTest.getDispatcherKeysForProperty("on-project", null)).containsOnly("Dispatcher1", "Dispatcher3"); }
@Override public String getString(final int columnIndex) throws SQLException { return (String) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, String.class), String.class); }
@Test void assertGetStringWithColumnIndex() throws SQLException { when(mergeResultSet.getValue(1, String.class)).thenReturn("value"); LocalDateTime tempTime = LocalDateTime.of(2022, 12, 14, 0, 0); when(mergeResultSet.getValue(2, String.class)).thenReturn(tempTime); when(mergeResultSet.getValue(3, String.class)).thenReturn(Timestamp.valueOf(tempTime)); assertThat(shardingSphereResultSet.getString(1), is("value")); assertThat(shardingSphereResultSet.getString(2), is("2022-12-14T00:00")); assertThat(shardingSphereResultSet.getString(3), is("2022-12-14 00:00:00.0")); }
@Override public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) { if (readError == null) { try { processHighlightings(lineBuilder); } catch (RangeOffsetConverterException e) { readError = new ReadError(HIGHLIGHTING, lineBuilder.getLine()); LOG.debug(format("Inconsistency detected in Highlighting data. Highlighting will be ignored for file '%s'", file.getKey()), e); } } return Optional.ofNullable(readError); }
@Test public void read_one_syntax_highlighting_on_many_lines() { // This highlighting begin on line 1 and finish on line 3 TextRange textRange = newTextRange(LINE_1, LINE_3); when(rangeOffsetConverter.offsetToString(textRange, LINE_1, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_1); when(rangeOffsetConverter.offsetToString(textRange, LINE_2, 6)).thenReturn(RANGE_LABEL_2); when(rangeOffsetConverter.offsetToString(textRange, LINE_3, DEFAULT_LINE_LENGTH)).thenReturn(RANGE_LABEL_3); HighlightingLineReader highlightingLineReader = newReader(of(textRange, ANNOTATION)); assertThat(highlightingLineReader.read(line1)).isEmpty(); DbFileSources.Line.Builder line2 = sourceData.addLinesBuilder().setSource("line 2").setLine(2); assertThat(highlightingLineReader.read(line2)).isEmpty(); assertThat(highlightingLineReader.read(line3)).isEmpty(); assertThat(line1.getHighlighting()).isEqualTo(RANGE_LABEL_1 + ",a"); assertThat(line2.getHighlighting()).isEqualTo(RANGE_LABEL_2 + ",a"); assertThat(line3.getHighlighting()).isEqualTo(RANGE_LABEL_3 + ",a"); }
@Override public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { synchronized (getClassLoadingLock(name)) { Class<?> loadedClass = findLoadedClass(name); if (loadedClass != null) { return loadedClass; } if (isClosed) { throw new ClassNotFoundException("This ClassLoader is closed"); } if (config.shouldAcquire(name)) { loadedClass = PerfStatsCollector.getInstance() .measure("load sandboxed class", () -> maybeInstrumentClass(name)); } else { loadedClass = getParent().loadClass(name); } if (resolve) { resolveClass(loadedClass); } return loadedClass; } }
@Test public void soMockitoDoesntExplodeDueToTooManyMethods_shouldGenerateClassSpecificDirectAccessMethodWhichIsPrivateAndFinal() throws Exception { Class<?> exampleClass = loadClass(AnExampleClass.class); String methodName = shadow.directMethodName(exampleClass.getName(), "normalMethod"); Method directMethod = exampleClass.getDeclaredMethod(methodName, String.class, int.class); assertTrue(Modifier.isPrivate(directMethod.getModifiers())); assertTrue(Modifier.isFinal(directMethod.getModifiers())); }
public static Object remove(Object root, DataIterator it) { DataElement element; // construct the list of Data objects to remove // don't remove in place because iterator behavior with removals while iterating is undefined ArrayList<ToRemove> removeList = new ArrayList<>(); while ((element = it.next()) != null) { ToRemove toRemove = new ToRemove(element); removeList.add(toRemove); } // perform actual removal in reverse order to make sure deleting array elements starts with higher indices for (int i = removeList.size() - 1; i >= 0; i--) { ToRemove toRemove = removeList.get(i); if (toRemove.isRoot()) { root = null; } else { toRemove.remove(); } } return root; }
@Test public void testRemoveByPredicateAtPath() throws Exception { SimpleTestData data = IteratorTestData.createSimpleTestData(); SimpleDataElement el = data.getDataElement(); Builder.create(el.getValue(), el.getSchema(), IterationOrder.PRE_ORDER) .filterBy(Predicates.and(Predicates.pathMatchesPathSpec(IteratorTestData.PATH_TO_ID), IteratorTestData.LESS_THAN_3_CONDITION)) .remove(); assertTrue(data.getValue().getDataList("foo").getDataMap(0).getInteger("id") == null); assertTrue(data.getValue().getDataList("foo").getDataMap(1).getInteger("id") == null); assertEquals(3, data.getValue().getDataList("foo").getDataMap(2).getInteger("id").intValue()); }
@Override public void preflight(Path file) throws BackgroundException { if(file.isRoot() || new DeepboxPathContainerService(session).isContainer(file)) { throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot delete {0}", "Error"), file.getName())).withFile(file); } final Acl acl = file.attributes().getAcl(); if(Acl.EMPTY == acl) { // Missing initialization log.warn(String.format("Unknown ACLs on %s", file)); return; } if(new DeepboxPathContainerService(session).isInTrash(file)) { if(!acl.get(new Acl.CanonicalUser()).contains(CANPURGE)) { if(log.isWarnEnabled()) { log.warn(String.format("ACL %s for %s does not include %s", acl, file, CANPURGE)); } throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot delete {0}", "Error"), file.getName())).withFile(file); } } else if(!acl.get(new Acl.CanonicalUser()).contains(CANDELETE)) { if(log.isWarnEnabled()) { log.warn(String.format("ACL %s for %s does not include %s", acl, file, CANDELETE)); } throw new AccessDeniedException(MessageFormat.format(LocaleFactory.localizedString("Cannot delete {0}", "Error"), file.getName())).withFile(file); } }
@Test public void testNodeDeleteRoot() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final Path folder = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final PathAttributes attributes = new DeepboxAttributesFinderFeature(session, nodeid).find(folder); assertEquals(Acl.EMPTY, attributes.getAcl()); assertThrows(AccessDeniedException.class, () -> new DeepboxTrashFeature(session, nodeid).preflight(folder.withAttributes(attributes))); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_FileNotFound() { resolve("${readFile:./hello-world-not-found.txt}"); assertTrue(logContains( "Configuration import: Error looking up file './hello-world-not-found.txt' with UTF-8 encoding.")); assertTrue( logContains("Configuration import: Found unresolved variable 'readFile:./hello-world-not-found.txt'.")); }
@Override public boolean scopesMatch(Set<String> expected, Set<String> actual) { Set<SystemScope> ex = fromStrings(expected); Set<SystemScope> act = fromStrings(actual); for (SystemScope actScope : act) { // first check to see if there's an exact match if (!ex.contains(actScope)) { return false; } else { // if we did find an exact match, we need to check the rest } } // if we got all the way down here, the setup passed return true; }
@Test public void scopesMatch() { Set<String> expected = Sets.newHashSet("foo", "bar", "baz"); Set<String> actualGood = Sets.newHashSet("foo", "baz", "bar"); Set<String> actualGood2 = Sets.newHashSet("foo", "bar"); Set<String> actualBad = Sets.newHashSet("foo", "bob", "bar"); // same scopes, different order assertThat(service.scopesMatch(expected, actualGood), is(true)); // subset assertThat(service.scopesMatch(expected, actualGood2), is(true)); // extra scope (fail) assertThat(service.scopesMatch(expected, actualBad), is(false)); }
public AggregateAnalysisResult analyze( final ImmutableAnalysis analysis, final List<SelectExpression> finalProjection ) { if (!analysis.getGroupBy().isPresent()) { throw new IllegalArgumentException("Not an aggregate query"); } final AggAnalyzer aggAnalyzer = new AggAnalyzer(analysis, functionRegistry); aggAnalyzer.process(finalProjection); return aggAnalyzer.result(); }
@Test public void shouldCaptureHavingAggregateFunction() { // Given: givenHavingExpression(AGG_FUNCTION_CALL); // When: final AggregateAnalysisResult result = analyzer.analyze(analysis, selects); // Then: assertThat(result.getAggregateFunctions(), hasItem(AGG_FUNCTION_CALL)); }
public static DatabaseType get(final String url) { Collection<DatabaseType> databaseTypes = ShardingSphereServiceLoader.getServiceInstances(DatabaseType.class).stream().filter(each -> matchURLs(url, each)).collect(Collectors.toList()); ShardingSpherePreconditions.checkNotEmpty(databaseTypes, () -> new UnsupportedStorageTypeException(url)); for (DatabaseType each : databaseTypes) { if (!each.getTrunkDatabaseType().isPresent()) { return each; } } return databaseTypes.iterator().next(); }
@Test void assertGetDatabaseTypeWithTrunkURL() { assertThat(DatabaseTypeFactory.get("jdbc:trunk://localhost:3306/test").getType(), is("TRUNK")); }
@Override public T deserialize(final String topic, final byte[] bytes) { try { if (bytes == null) { return null; } // don't use the JsonSchemaConverter to read this data because // we require that the MAPPER enables USE_BIG_DECIMAL_FOR_FLOATS, // which is not currently available in the standard converters final JsonNode value = isJsonSchema ? JsonSerdeUtils.readJsonSR(bytes, MAPPER, JsonNode.class) : MAPPER.readTree(bytes); final Object coerced = enforceFieldType( "$", new JsonValueContext(value, schema) ); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized {}. topic:{}, row:{}", target, topic, coerced); } return SerdeUtils.castToTargetType(coerced, targetType); } catch (final Exception e) { // Clear location in order to avoid logging data, for security reasons if (e instanceof JsonParseException) { ((JsonParseException) e).clearLocation(); } throw new SerializationException( "Failed to deserialize " + target + " from topic: " + topic + ". " + e.getMessage(), e); } }
@Test public void shouldDeserializeJsonObjectCorrectly() { // Given: final byte[] bytes = serializeJson(AN_ORDER); // When: final Struct result = deserializer.deserialize(SOME_TOPIC, bytes); // Then: assertThat(result, is(expectedOrder)); }
public AndroidModel getModel() { return mModel; }
@Test public void testSelectsDefaultModel() { org.robolectric.shadows.ShadowLog.stream = System.err; ModelSpecificDistanceCalculator distanceCalculator = new ModelSpecificDistanceCalculator(null, null); assertEquals("Default model should be Nexus 5", "Nexus 5", distanceCalculator.getModel().getModel()); }
@Config public static PrintStream fallbackLogger() { final String fallbackLoggerName = getProperty(FALLBACK_LOGGER_PROP_NAME, "stderr"); switch (fallbackLoggerName) { case "stdout": return System.out; case "no_op": return NO_OP_LOGGER; case "stderr": default: return System.err; } }
@Test void fallbackLoggerReturnsSystemOutIfConfigured() { System.setProperty(FALLBACK_LOGGER_PROP_NAME, "stdout"); try { assertSame(System.out, CommonContext.fallbackLogger()); } finally { System.clearProperty(FALLBACK_LOGGER_PROP_NAME); } }
@Override public String toString() { return this.ping ? "services ping" : "services pong"; }
@Test void testToString() { Assertions.assertEquals("services ping", HeartbeatMessage.PING.toString()); Assertions.assertEquals("services pong", HeartbeatMessage.PONG.toString()); }
@Override public AppResponse process(Flow flow, AppRequest request) { appSession = new AppSession(); appSession.setState(State.INITIALIZED.name()); appSession.setFlow(WidCheckerIdCheckFlow.NAME); appSession.setAction("upgrade_rda_widchecker"); appSession.setRdaAction("upgrade_rda_widchecker"); digidClient.remoteLog("1307", Map.of()); return new AppSessionResponse(appSession.getId(), Instant.now().getEpochSecond()); }
@Test void processTest(){ AppResponse appResponse = startWidCheckerIdCheck.process(mockedFlow, null); assertTrue(appResponse instanceof AppSessionResponse); assertEquals(startWidCheckerIdCheck.getAppSession().getId(), ((AppSessionResponse)appResponse).getAppSessionId()); }
static <T> T getWildcardMappedObject(final Map<String, T> mapping, final String query) { T value = mapping.get(query); if (value == null) { for (String key : mapping.keySet()) { // Turn the search key into a regex, using all characters but the * as a literal. String regex = Arrays.stream(key.split("\\*")) // split in parts that do not have a wildcard in them .map(Pattern::quote) // each part should be used as a literal (not as a regex or partial regex) .collect(Collectors.joining(".*")); // join all literal parts with a regex representation on the wildcard. if (key.endsWith("*")) { // the 'split' will have removed any trailing wildcard characters. Correct for that. regex += ".*"; } if (query.matches(regex)) { value = mapping.get(key); break; } } } return value; }
@Test public void testSubdirWildcardExtensionFalse() throws Exception { // Setup test fixture. final Map<String, Object> haystack = Map.of("myplugin/baz/*.jsp", new Object()); // Execute system under test. final Object result = PluginServlet.getWildcardMappedObject(haystack, "myplugin/baz/foo.gif"); // Verify results. assertNull(result); }
public ListenableFuture<HealthCheckResponse> checkHealthWithDeadline( HealthCheckRequest request, Deadline deadline) { return healthService.withDeadline(deadline).check(request); }
@Test public void checkHealth_returnServingHealthResponse() throws Exception { HealthCheckRequest request = HealthCheckRequest.getDefaultInstance(); HealthImplBase healthImpl = new HealthImplBase() { @Override public void check( HealthCheckRequest request, StreamObserver<HealthCheckResponse> responseObserver) { responseObserver.onNext( HealthCheckResponse.newBuilder().setStatus(ServingStatus.SERVING).build()); responseObserver.onCompleted(); } }; serviceRegistry.addService(healthImpl); ListenableFuture<HealthCheckResponse> health = pluginService.checkHealthWithDeadline(request, DEADLINE_DEFAULT); assertThat(health.isDone()).isTrue(); assertThat(health.get().getStatus()).isEqualTo(ServingStatus.SERVING); }
@Override public void writeToParcel(Parcel dest, int flags) { dest.writeString(crashHeader); dest.writeString(crashReportText); dest.writeParcelable(fullReport, 0); }
@Test public void testHappyPath() { String header = "header"; String crashReport = "a huge crash report"; Uri someFile = Uri.fromFile(new File("/blah/blah.txt")); BugReportDetails details = new BugReportDetails(header, crashReport, someFile); Assert.assertSame(header, details.crashHeader); Assert.assertSame(crashReport, details.crashReportText); Assert.assertSame(someFile, details.fullReport); final Parcel parcel = Parcel.obtain(); parcel.setDataPosition(0); details.writeToParcel(parcel, 0); parcel.setDataPosition(0); BugReportDetails read = new BugReportDetails(parcel); Assert.assertEquals(details.crashHeader, read.crashHeader); Assert.assertEquals(details.crashReportText, read.crashReportText); Assert.assertEquals(details.fullReport, read.fullReport); }
String scheduleRecurrently(String id, JobDetails jobDetails, Schedule schedule, ZoneId zoneId) { final RecurringJob recurringJob = new RecurringJob(id, jobDetails, schedule, zoneId); return scheduleRecurrently(recurringJob); }
@Test void scheduleRecurrentlyValidatesScheduleDoesThrowExceptionWhenUsingNotAnInMemoryStorageProvider() { AbstractJobScheduler jobScheduler = jobScheduler(new H2StorageProvider(null, NO_VALIDATE)); RecurringJob recurringJob = aDefaultRecurringJob().withCronExpression("* * * * * *").build(); assertThatCode(() -> jobScheduler.scheduleRecurrently(recurringJob)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("The smallest supported duration between recurring job instances is 5 seconds (because of the smallest supported pollInterval)."); }
public static <T> T checkNotNull(T arg, String text) { if (arg == null) { throw new NullPointerException(text); } return arg; }
@Test public void testCheckNotNull() { Exception actualEx = null; try { ObjectUtil.checkNotNull(NON_NULL_OBJECT, NON_NULL_NAME); } catch (Exception e) { actualEx = e; } assertNull(actualEx, TEST_RESULT_NULLEX_NOK); actualEx = null; try { ObjectUtil.checkNotNull(NULL_OBJECT, NULL_NAME); } catch (Exception e) { actualEx = e; } assertNotNull(actualEx, TEST_RESULT_NULLEX_OK); assertTrue(actualEx instanceof NullPointerException, TEST_RESULT_EXTYPE_NOK); }
public static byte[] unzipFileBytes(String zipFilePath, String name) { return unzipFileBytes(zipFilePath, DEFAULT_CHARSET, name); }
@Test @Disabled public void unzipFileBytesTest() { final byte[] fileBytes = ZipUtil.unzipFileBytes(FileUtil.file("e:/02 电力相关设备及服务2-241-.zip"), CharsetUtil.CHARSET_GBK, "images/CE-EP-HY-MH01-ES-0001.jpg"); assertNotNull(fileBytes); }
public static void executeLongPolling(Runnable runnable) { LONG_POLLING_EXECUTOR.execute(runnable); }
@Test public void executeLongPollingTest() { ConfigExecutor.executeLongPolling(() -> log.info(Thread.currentThread().getName())); }
public boolean sendEvents( final Message inMessage, final Consumer<StitchResponse> resultCallback, final AsyncCallback callback) { sendAsyncEvents(inMessage) .subscribe(resultCallback, error -> { // error but we continue if (LOG.isDebugEnabled()) { LOG.debug("Error processing async exchange with error: {}", error.getMessage()); } inMessage.getExchange().setException(error); callback.done(false); }, () -> { // we are done from everything, so mark it as sync done LOG.trace("All events with exchange have been sent successfully."); callback.done(false); }); return false; }
@Test void testErrorHandle() { final StitchConfiguration configuration = new StitchConfiguration(); configuration.setTableName("table_1"); configuration.setStitchSchema(StitchSchema.builder().addKeyword("field_1", "string").build()); configuration.setKeyNames("field_1"); final StitchMessage message = StitchMessage.builder() .withData("field_1", "data") .withSequence(0) .build(); final Exchange exchange = new DefaultExchange(context); exchange.getMessage().setBody(message); final StitchProducerOperations operations = new StitchProducerOperations(new TestErrorClient(), configuration); operations.sendEvents(exchange.getMessage(), response -> { }, doneSync -> { }); assertNotNull(exchange.getException()); assertTrue(exchange.getException() instanceof StitchException); assertNotNull(((StitchException) exchange.getException()).getResponse()); assertEquals(400, ((StitchException) exchange.getException()).getResponse().getHttpStatusCode()); assertEquals("Error", ((StitchException) exchange.getException()).getResponse().getStatus()); assertEquals("Not good!", ((StitchException) exchange.getException()).getResponse().getMessage()); }
@Override public QuoteCharacter getQuoteCharacter() { return QuoteCharacter.QUOTE; }
@Test void assertGetQuoteCharacter() { assertThat(dialectDatabaseMetaData.getQuoteCharacter(), is(QuoteCharacter.QUOTE)); }
public static Map<JobVertexID, ForwardGroup> computeForwardGroups( final Iterable<JobVertex> topologicallySortedVertices, final Function<JobVertex, Set<JobVertex>> forwardProducersRetriever) { final Map<JobVertex, Set<JobVertex>> vertexToGroup = new IdentityHashMap<>(); // iterate all the vertices which are topologically sorted for (JobVertex vertex : topologicallySortedVertices) { Set<JobVertex> currentGroup = new HashSet<>(); currentGroup.add(vertex); vertexToGroup.put(vertex, currentGroup); for (JobVertex producerVertex : forwardProducersRetriever.apply(vertex)) { final Set<JobVertex> producerGroup = vertexToGroup.get(producerVertex); if (producerGroup == null) { throw new IllegalStateException( "Producer task " + producerVertex.getID() + " forward group is null" + " while calculating forward group for the consumer task " + vertex.getID() + ". This should be a forward group building bug."); } if (currentGroup != producerGroup) { currentGroup = VertexGroupComputeUtil.mergeVertexGroups( currentGroup, producerGroup, vertexToGroup); } } } final Map<JobVertexID, ForwardGroup> ret = new HashMap<>(); for (Set<JobVertex> vertexGroup : VertexGroupComputeUtil.uniqueVertexGroups(vertexToGroup)) { if (vertexGroup.size() > 1) { ForwardGroup forwardGroup = new ForwardGroup(vertexGroup); for (JobVertexID jobVertexId : forwardGroup.getJobVertexIds()) { ret.put(jobVertexId, forwardGroup); } } } return ret; }
@Test void testTwoInputsMergesIntoOne() throws Exception { JobVertex v1 = new JobVertex("v1"); JobVertex v2 = new JobVertex("v2"); JobVertex v3 = new JobVertex("v3"); JobVertex v4 = new JobVertex("v4"); v3.connectNewDataSetAsInput( v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); v1.getProducedDataSets().get(0).getConsumers().get(0).setForward(true); v3.connectNewDataSetAsInput( v2, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING); v2.getProducedDataSets().get(0).getConsumers().get(0).setForward(true); v4.connectNewDataSetAsInput( v3, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING); Set<ForwardGroup> groups = computeForwardGroups(v1, v2, v3, v4); checkGroupSize(groups, 1, 3); }
@Override public void write(InputT element, Context context) throws IOException, InterruptedException { while (bufferedRequestEntries.size() >= maxBufferedRequests) { flush(); } addEntryToBuffer(elementConverter.apply(element, context), false); nonBlockingFlush(); }
@Test public void testThatTimeBasedBatchPicksUpAllRelevantItemsUpUntilExpiryOfTimer() throws Exception { AsyncSinkWriterImpl sink = new AsyncSinkWriterImplBuilder() .context(sinkInitContext) .maxBatchSize(10) .maxInFlightRequests(20) .maxBatchSizeInBytes(10_000) .maxTimeInBufferMS(100) .maxRecordSizeInBytes(10_000) .simulateFailures(true) .build(); TestProcessingTimeService tpts = sinkInitContext.getTestProcessingTimeService(); for (int i = 0; i < 98; i++) { tpts.setCurrentTime(i); sink.write(String.valueOf(i)); } tpts.setCurrentTime(99L); assertThat(res.size()).isEqualTo(90); tpts.setCurrentTime(100L); assertThat(res.size()).isEqualTo(98); }
@Nullable public String getPipelineConfigure(String statement) { Pattern patternYaml = Pattern.compile("(?is)^EXECUTE\\s+PIPELINE\\s+WITHYAML\\s+\\((.+)\\)"); Matcher matcherYaml = patternYaml.matcher(statement); if (matcherYaml.find()) { return matcherYaml.group(1); } return ""; }
@Test void getPipelineConfigure() { FlinkCDCPipelineOperation operation = new FlinkCDCPipelineOperation(); String configure = "EXECUTE PIPELINE withYaml (a.b=1\n a.b.c=2)"; String result = operation.getPipelineConfigure(configure); assertEquals("a.b=1\n a.b.c=2", result); }
public Plan generate() { final Plan plan = createPlan(); registerGenericTypeInfoIfConfigured(plan); registerCachedFiles(plan); logTypeRegistrationDetails(); return plan; }
@Test void testGenerate() { final String fileA = "fileA"; final String fileB = "fileB"; final Map<String, DistributedCache.DistributedCacheEntry> originalArtifacts = Stream.of( Tuple2.of( fileA, new DistributedCache.DistributedCacheEntry("test1", true)), Tuple2.of( fileB, new DistributedCache.DistributedCacheEntry("test2", false))) .collect(Collectors.toMap(x -> x.f0, x -> x.f1)); final ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment(); env.setParallelism(10); env.registerCachedFile("test1", fileA, true); env.registerCachedFile("test2", fileB, false); env.fromElements(1, 3, 5) .map((MapFunction<Integer, String>) value -> String.valueOf(value + 1)) .writeAsText("/tmp/csv"); final Plan generatedPlanUnderTest = env.createProgramPlan("test"); final Map<String, DistributedCache.DistributedCacheEntry> retrievedArtifacts = generatedPlanUnderTest.getCachedFiles().stream() .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); assertThat(generatedPlanUnderTest.getDataSinks()).hasSize(1); assertThat(generatedPlanUnderTest.getDefaultParallelism()).isEqualTo(10); assertThat(generatedPlanUnderTest.getExecutionConfig()).isEqualTo(env.getConfig()); assertThat(generatedPlanUnderTest.getJobName()).isEqualTo("test"); assertThat(retrievedArtifacts) .hasSameSizeAs(originalArtifacts) .containsEntry(fileA, originalArtifacts.get(fileA)) .containsEntry(fileB, originalArtifacts.get(fileB)); }
DefaultHttp2FrameStream newStream() { return new DefaultHttp2FrameStream(); }
@Test public void streamIdentifiersExhausted() throws Http2Exception { int maxServerStreamId = Integer.MAX_VALUE - 1; assertNotNull(frameCodec.connection().local().createStream(maxServerStreamId, false)); Http2FrameStream stream = frameCodec.newStream(); assertNotNull(stream); ChannelPromise writePromise = channel.newPromise(); channel.writeAndFlush(new DefaultHttp2HeadersFrame(new DefaultHttp2Headers()).stream(stream), writePromise); Http2GoAwayFrame goAwayFrame = inboundHandler.readInbound(); assertNotNull(goAwayFrame); assertEquals(NO_ERROR.code(), goAwayFrame.errorCode()); assertEquals(Integer.MAX_VALUE, goAwayFrame.lastStreamId()); goAwayFrame.release(); assertThat(writePromise.cause(), instanceOf(Http2NoMoreStreamIdsException.class)); }
void put(Sample x) { UntypedMetric value = get(x); Measurement m = x.getMeasurement(); switch (x.getMetricType()) { case GAUGE: value.put(m.getMagnitude()); break; case COUNTER: value.add(m.getMagnitude()); break; default: throw new IllegalArgumentException("Unsupported metric type: " + x.getMetricType()); } }
@Test final void testPutSampleWithUnsupportedType() { boolean caughtIt = false; try { bucket.put(new Sample(new Measurement(1), new Identifier("nalle", null), AssumedType.NONE)); } catch (Exception e) { caughtIt = true; } assertTrue(caughtIt); }