focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
boolean sendRecords() { int processed = 0; recordBatch(toSend.size()); final SourceRecordWriteCounter counter = toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup); for (final SourceRecord preTransformRecord : toSend) { ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord); final SourceRecord record = transformationChain.apply(context, preTransformRecord); final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record); if (producerRecord == null || context.failed()) { counter.skipRecord(); recordDropped(preTransformRecord); processed++; continue; } log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value()); Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord); try { final String topic = producerRecord.topic(); maybeCreateTopic(topic); producer.send( producerRecord, (recordMetadata, e) -> { if (e != null) { if (producerClosed) { log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e); } else { log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e); } log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord); producerSendFailed(context, false, producerRecord, preTransformRecord, e); if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) { counter.skipRecord(); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); } } else { counter.completeRecord(); log.trace("{} Wrote record successfully: topic {} partition {} offset {}", AbstractWorkerSourceTask.this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()); recordSent(preTransformRecord, producerRecord, recordMetadata); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); if (topicTrackingEnabled) { recordActiveTopic(producerRecord.topic()); } } }); // Note that this will cause retries to take place within a transaction } catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ", this, producerRecord.topic(), producerRecord.partition(), e); toSend = toSend.subList(processed, toSend.size()); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop); counter.retryRemaining(); return false; } catch (ConnectException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ", this, producerRecord.topic(), producerRecord.partition(), e); log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e); throw e; } catch (KafkaException e) { producerSendFailed(context, true, producerRecord, preTransformRecord, e); } processed++; recordDispatched(preTransformRecord); } toSend = null; batchDispatched(); return true; }
@Test public void testHeaders() { Headers headers = new RecordHeaders() .add("header_key", "header_value".getBytes()); org.apache.kafka.connect.header.Headers connectHeaders = new ConnectHeaders() .add("header_key", new SchemaAndValue(Schema.STRING_SCHEMA, "header_value")); createWorkerTask(); expectSendRecord(headers); expectApplyTransformationChain(); expectTopicCreation(TOPIC); workerTask.toSend = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, null, connectHeaders) ); workerTask.sendRecords(); ArgumentCaptor<ProducerRecord<byte[], byte[]>> sent = verifySendRecord(); assertArrayEquals(SERIALIZED_KEY, sent.getValue().key()); assertArrayEquals(SERIALIZED_RECORD, sent.getValue().value()); assertEquals(headers, sent.getValue().headers()); verifyTaskGetTopic(); verifyTopicCreation(); }
public static ResourceId fromInstanceIdentifier(String input) { String[] nodes = input.split("/"); List<NodeKey> nodeKeys = Arrays.stream(nodes) .filter(s -> !s.isEmpty()) .map(ResourceIds::toNodeKey) .collect(Collectors.toList()); if (nodeKeys.isEmpty()) { return null; } Builder builder = ResourceId.builder(); // fill-in null (=inherit from parent) nameSpace String lastNamespace = null; for (NodeKey nodeKey : nodeKeys) { if (nodeKey.schemaId().namespace() != null) { lastNamespace = nodeKey.schemaId().namespace(); } if (nodeKey instanceof LeafListKey) { builder.addLeafListBranchPoint(nodeKey.schemaId().name(), firstNonNull(nodeKey.schemaId().namespace(), lastNamespace), ((LeafListKey) nodeKey).value()); } else if (nodeKey instanceof ListKey) { builder.addBranchPointSchema(nodeKey.schemaId().name(), lastNamespace); for (KeyLeaf kl : ((ListKey) nodeKey).keyLeafs()) { builder.addKeyLeaf(kl.leafSchema().name(), firstNonNull(kl.leafSchema().namespace(), lastNamespace), kl.leafValue()); } } else { builder.addBranchPointSchema(nodeKey.schemaId().name(), lastNamespace); } } return builder.build(); }
@Test public void testFromInstanceIdentifier() { ResourceId eth0 = ResourceId.builder() .addBranchPointSchema("interfaces", "ietf-interfaces") .addBranchPointSchema("interface", "ietf-interfaces") .addKeyLeaf("name", "ietf-interfaces", "eth0") .build(); assertThat(ResourceIds.fromInstanceIdentifier("/ietf-interfaces:interfaces/interface[name=\"eth0\"]"), is(eth0)); assertThat("fromInstanceIdentifier return path relative to virtual root", ResourceIds.fromInstanceIdentifier("/org.onosproject.dcs:devices"), is(ResourceIds.relativize(ResourceIds.ROOT_ID, DEVICES_ID))); assertThat(ResourceIds.prefixDcsRoot( ResourceIds.fromInstanceIdentifier("/org.onosproject.dcs:devices")), is(DEVICES_ID)); assertThat(ResourceIds.fromInstanceIdentifier("/"), is(nullValue())); DeviceId deviceId = DeviceId.deviceId("test:device-identifier"); assertThat(ResourceIds.prefixDcsRoot( fromInstanceIdentifier("/org.onosproject.dcs:devices/device[device-id=\"test:device-identifier\"]")), is(toResourceId(deviceId))); }
private void notifyHighlightedItem(GroundItem item) { final boolean shouldNotifyHighlighted = config.notifyHighlightedDrops() && TRUE.equals(highlightedItems.getUnchecked(new NamedQuantity(item))); final boolean shouldNotifyTier = config.notifyTier() != HighlightTier.OFF && getValueByMode(item.getGePrice(), item.getHaPrice()) > config.notifyTier().getValueFromTier(config) && FALSE.equals(hiddenItems.getUnchecked(new NamedQuantity(item))); final String dropType; if (shouldNotifyHighlighted) { dropType = "highlighted"; } else if (shouldNotifyTier) { dropType = "valuable"; } else { return; } final StringBuilder notificationStringBuilder = new StringBuilder() .append("You received a ") .append(dropType) .append(" drop: ") .append(item.getName()); if (item.getQuantity() > 1) { notificationStringBuilder.append(" (") .append(QuantityFormatter.quantityToStackSize(item.getQuantity())) .append(')'); } notifier.notify(notificationStringBuilder.toString()); }
@Test public void testNotifyHighlightedItem() { when(config.getHighlightItems()).thenReturn("abyssal whip"); when(config.notifyTier()).thenReturn(HighlightTier.OFF); when(config.notifyHighlightedDrops()).thenReturn(true); when(itemManager.getItemComposition(ItemID.ABYSSAL_WHIP)).thenAnswer(a -> { ItemComposition itemComposition = mock(ItemComposition.class); when(itemComposition.getName()).thenReturn("Abyssal whip"); return itemComposition; }); // trigger reload of highlighted items list ConfigChanged configChanged = new ConfigChanged(); configChanged.setGroup("grounditems"); groundItemsPlugin.onConfigChanged(configChanged); // spawn whip Tile tile = mock(Tile.class); when(tile.getItemLayer()).thenReturn(mock(ItemLayer.class)); when(tile.getWorldLocation()).thenReturn(new WorldPoint(0, 0, 0)); TileItem tileItem = mock(TileItem.class); when(tileItem.getId()).thenReturn(ItemID.ABYSSAL_WHIP); when(tileItem.getQuantity()).thenReturn(1); groundItemsPlugin.onItemSpawned(new ItemSpawned(tile, tileItem)); verify(notifier).notify("You received a highlighted drop: Abyssal whip"); }
public <T> Map<String, Object> properties(Class<T> base, Class<? extends T> cls) { return this.generate(cls, base); }
@SuppressWarnings("unchecked") @Test void betaTask() { Map<String, Object> generate = jsonSchemaGenerator.properties(Task.class, BetaTask.class); assertThat(generate, is(not(nullValue()))); assertThat(generate.get("$beta"), is(true)); assertThat(((Map<String, Map<String, Object>>) generate.get("properties")).size(), is(1)); assertThat(((Map<String, Map<String, Object>>) generate.get("properties")).get("beta").get("$beta"), is(true)); }
@VisibleForTesting protected void mergeAllocateResponse(AllocateResponse homeResponse, AllocateResponse otherResponse, SubClusterId otherRMAddress) { if (otherResponse.getAMRMToken() != null) { // Propagate only the new amrmToken from home sub-cluster back to // AMRMProxyService if (otherRMAddress.equals(this.homeSubClusterId)) { homeResponse.setAMRMToken(otherResponse.getAMRMToken()); } else { LOG.warn("amrmToken from UAM {} not null, it should be null here", otherRMAddress); } } if (!isNullOrEmpty(otherResponse.getAllocatedContainers())) { if (!isNullOrEmpty(homeResponse.getAllocatedContainers())) { homeResponse.getAllocatedContainers() .addAll(otherResponse.getAllocatedContainers()); } else { homeResponse .setAllocatedContainers(otherResponse.getAllocatedContainers()); } } if (!isNullOrEmpty(otherResponse.getCompletedContainersStatuses())) { if (!isNullOrEmpty(homeResponse.getCompletedContainersStatuses())) { homeResponse.getCompletedContainersStatuses() .addAll(otherResponse.getCompletedContainersStatuses()); } else { homeResponse.setCompletedContainersStatuses( otherResponse.getCompletedContainersStatuses()); } } if (!isNullOrEmpty(otherResponse.getUpdatedNodes())) { if (!isNullOrEmpty(homeResponse.getUpdatedNodes())) { homeResponse.getUpdatedNodes().addAll(otherResponse.getUpdatedNodes()); } else { homeResponse.setUpdatedNodes(otherResponse.getUpdatedNodes()); } } if (otherResponse.getApplicationPriority() != null) { homeResponse.setApplicationPriority( otherResponse.getApplicationPriority()); } homeResponse.setNumClusterNodes( homeResponse.getNumClusterNodes() + otherResponse.getNumClusterNodes()); PreemptionMessage homePreempMessage = homeResponse.getPreemptionMessage(); PreemptionMessage otherPreempMessage = otherResponse.getPreemptionMessage(); if (homePreempMessage == null && otherPreempMessage != null) { homeResponse.setPreemptionMessage(otherPreempMessage); } if (homePreempMessage != null && otherPreempMessage != null) { PreemptionContract par1 = homePreempMessage.getContract(); PreemptionContract par2 = otherPreempMessage.getContract(); if (par1 == null && par2 != null) { homePreempMessage.setContract(par2); } if (par1 != null && par2 != null) { par1.getResourceRequest().addAll(par2.getResourceRequest()); par1.getContainers().addAll(par2.getContainers()); } StrictPreemptionContract spar1 = homePreempMessage.getStrictContract(); StrictPreemptionContract spar2 = otherPreempMessage.getStrictContract(); if (spar1 == null && spar2 != null) { homePreempMessage.setStrictContract(spar2); } if (spar1 != null && spar2 != null) { spar1.getContainers().addAll(spar2.getContainers()); } } if (!isNullOrEmpty(otherResponse.getNMTokens())) { if (!isNullOrEmpty(homeResponse.getNMTokens())) { homeResponse.getNMTokens().addAll(otherResponse.getNMTokens()); } else { homeResponse.setNMTokens(otherResponse.getNMTokens()); } } if (!isNullOrEmpty(otherResponse.getUpdatedContainers())) { if (!isNullOrEmpty(homeResponse.getUpdatedContainers())) { homeResponse.getUpdatedContainers() .addAll(otherResponse.getUpdatedContainers()); } else { homeResponse.setUpdatedContainers(otherResponse.getUpdatedContainers()); } } if (!isNullOrEmpty(otherResponse.getUpdateErrors())) { if (!isNullOrEmpty(homeResponse.getUpdateErrors())) { homeResponse.getUpdateErrors().addAll(otherResponse.getUpdateErrors()); } else { homeResponse.setUpdateErrors(otherResponse.getUpdateErrors()); } } }
@Test public void testMergeAllocateResponse() { ContainerId cid = ContainerId.newContainerId(attemptId, 0); ContainerStatus cStatus = Records.newRecord(ContainerStatus.class); cStatus.setContainerId(cid); Container container = Container.newInstance(cid, null, null, null, null, null); AllocateResponse homeResponse = Records.newRecord(AllocateResponse.class); homeResponse.setAllocatedContainers(Collections.singletonList(container)); homeResponse.setCompletedContainersStatuses(Collections.singletonList(cStatus)); homeResponse.setUpdatedNodes(Collections.singletonList(Records.newRecord(NodeReport.class))); homeResponse.setNMTokens(Collections.singletonList(Records.newRecord(NMToken.class))); homeResponse.setUpdatedContainers(Collections.singletonList( Records.newRecord(UpdatedContainer.class))); homeResponse.setUpdateErrors(Collections.singletonList( Records.newRecord(UpdateContainerError.class))); homeResponse.setAvailableResources(Records.newRecord(Resource.class)); homeResponse.setPreemptionMessage(createDummyPreemptionMessage( ContainerId.newContainerId(attemptId, 0))); AllocateResponse response = Records.newRecord(AllocateResponse.class); response.setAllocatedContainers(Collections.singletonList(container)); response.setCompletedContainersStatuses(Collections.singletonList(cStatus)); response.setUpdatedNodes(Collections.singletonList(Records.newRecord(NodeReport.class))); response.setNMTokens(Collections.singletonList(Records.newRecord(NMToken.class))); response.setUpdatedContainers(Collections.singletonList( Records.newRecord(UpdatedContainer.class))); response.setUpdateErrors(Collections.singletonList( Records.newRecord(UpdateContainerError.class))); response.setAvailableResources(Records.newRecord(Resource.class)); response.setPreemptionMessage(createDummyPreemptionMessage( ContainerId.newContainerId(attemptId, 1))); interceptor.mergeAllocateResponse(homeResponse, response, SubClusterId.newInstance("SC-1")); Assert.assertEquals(2, homeResponse.getPreemptionMessage().getContract().getContainers().size()); Assert.assertEquals(2, homeResponse.getAllocatedContainers().size()); Assert.assertEquals(2, homeResponse.getUpdatedNodes().size()); Assert.assertEquals(2, homeResponse.getCompletedContainersStatuses().size()); }
@Override public BaseMetabolicRate calculate(BMRAttributes bmrAttributes) { if(bmrAttributes == null) return new BaseMetabolicRate(BigDecimal.valueOf(0)); if (bmrAttributes.getGender() == Gender.MALE) return calculateUsingMaleEquation(bmrAttributes); else return calculateUsingFemaleEquation(bmrAttributes); }
@Test void calculate_null() { BaseMetabolicRate baseMetabolicRate = bmrCalculator.calculate(null); assertEquals(new BigDecimal("0"), baseMetabolicRate.getBMR()); }
public static long getFileSize(Page statisticsPage, int position) { // FileStatistics page layout: // // fileSize rowCount // X X if (position < 0 || position >= statisticsPage.getPositionCount()) { throw new PrestoException(MALFORMED_HIVE_FILE_STATISTICS, format("Invalid position: %d specified for FileStatistics page", position)); } return BIGINT.getLong(statisticsPage.getBlock(FILE_SIZE_CHANNEL), position); }
@Test(expectedExceptions = PrestoException.class, expectedExceptionsMessageRegExp = "Invalid position: 2 specified for FileStatistics page") public void testGetFileSizeOfInvalidStatisticsPage() { Page statisticsPage = createTestStatisticsPageWithOneRow(ImmutableList.of(BIGINT, BIGINT), ImmutableList.of(FILE_SIZE, ROW_COUNT)); getFileSize(statisticsPage, 2); }
Iterable<ArchUnitExtension> getAll() { return extensions.get(); }
@Test public void loads_multiple_extensions() { testServicesFile.addService(TestExtension.class); testServicesFile.addService(DummyTestExtension.class); testServicesFile.addService(YetAnotherDummyTestExtension.class); Iterable<ArchUnitExtension> extensions = extensionLoader.getAll(); assertThat(extensions) .hasSize(3) .hasAtLeastOneElementOfType(TestExtension.class) .hasAtLeastOneElementOfType(DummyTestExtension.class) .hasAtLeastOneElementOfType(YetAnotherDummyTestExtension.class); }
@Override public Flux<BackupFile> getBackupFiles() { return Flux.using( () -> Files.list(getBackupsRoot()), Flux::fromStream, BaseStream::close ) .filter(Files::isRegularFile) .filter(Files::isReadable) .filter(path -> isExtension(path.getFileName().toString(), "zip")) .map(this::toBackupFile) .sort(comparing(BackupFile::getLastModifiedTime).reversed() .thenComparing(BackupFile::getFilename) ) .subscribeOn(this.scheduler); }
@Test void getBackupFilesTest() throws Exception { var now = Instant.now(); var backup1 = tempDir.resolve("backup1.zip"); Files.writeString(backup1, "fake-content"); Files.setLastModifiedTime(backup1, FileTime.from(now)); var backup2 = tempDir.resolve("backup2.zip"); Files.writeString(backup2, "fake--content"); Files.setLastModifiedTime( backup2, FileTime.from(now.plus(Duration.ofSeconds(1))) ); var backup3 = tempDir.resolve("backup3.not-a-zip"); Files.writeString(backup3, "fake-content"); Files.setLastModifiedTime( backup3, FileTime.from(now.plus(Duration.ofSeconds(2))) ); when(backupRoot.get()).thenReturn(tempDir); migrationService.afterPropertiesSet(); migrationService.getBackupFiles() .as(StepVerifier::create) .assertNext(backupFile -> { assertEquals("backup2.zip", backupFile.getFilename()); assertEquals(13, backupFile.getSize()); assertEquals(now.plus(Duration.ofSeconds(1)), backupFile.getLastModifiedTime()); }) .assertNext(backupFile -> { assertEquals("backup1.zip", backupFile.getFilename()); assertEquals(12, backupFile.getSize()); assertEquals(now, backupFile.getLastModifiedTime()); }) .verifyComplete(); }
public MethodBuilder oninvokeMethod(String oninvokeMethod) { this.oninvokeMethod = oninvokeMethod; return getThis(); }
@Test void oninvokeMethod() { MethodBuilder builder = MethodBuilder.newBuilder(); builder.oninvokeMethod("on-invoke-method"); Assertions.assertEquals("on-invoke-method", builder.build().getOninvokeMethod()); }
@Override public CompletableFuture<Void> complete(Exchange exchange) { return sagaService.getClient().complete(this.lraURL, exchange); }
@DisplayName("Tests whether complete is called on LRAClient") @Test void testComplete() throws Exception { CompletableFuture<Void> expected = CompletableFuture.completedFuture(null); Mockito.when(client.complete(url, exchange)).thenReturn(expected); CompletableFuture<Void> actual = coordinator.complete(exchange); Assertions.assertSame(expected, actual); Mockito.verify(sagaService).getClient(); Mockito.verify(client).complete(url, exchange); }
public static void saveDeepLinkInfo() { try { if (sLatestUtmProperties.size() > 0) { SAStoreManager.getInstance().setString(SHARED_PREF_UTM, sLatestUtmProperties.toString()); } else { clearLocalUtm(); } } catch (Exception e) { SALog.printStackTrace(e); } }
@Test public void saveDeepLinkInfo() { ChannelUtils.saveDeepLinkInfo(); }
@Override public void populateContainer(TaskContainer container) { ComputationSteps steps = new ReportComputationSteps(container); container.add(SettingsLoader.class); container.add(task); container.add(steps); container.add(componentClasses()); for (ReportAnalysisComponentProvider componentProvider : componentProviders) { container.add(componentProvider.getComponents()); } container.add(steps.orderedStepClasses()); }
@Test public void at_least_one_core_step_is_added_to_the_container() { ListTaskContainer container = new ListTaskContainer(); underTest.populateContainer(container); assertThat(container.getAddedComponents()).contains(PersistComponentsStep.class); }
public static RecordBatchingStateRestoreCallback adapt(final StateRestoreCallback restoreCallback) { Objects.requireNonNull(restoreCallback, "stateRestoreCallback must not be null"); if (restoreCallback instanceof RecordBatchingStateRestoreCallback) { return (RecordBatchingStateRestoreCallback) restoreCallback; } else if (restoreCallback instanceof BatchingStateRestoreCallback) { return records -> { final List<KeyValue<byte[], byte[]>> keyValues = new ArrayList<>(); for (final ConsumerRecord<byte[], byte[]> record : records) { keyValues.add(new KeyValue<>(record.key(), record.value())); } ((BatchingStateRestoreCallback) restoreCallback).restoreAll(keyValues); }; } else { return records -> { for (final ConsumerRecord<byte[], byte[]> record : records) { restoreCallback.restore(record.key(), record.value()); } }; } }
@Test public void shouldConvertToKeyValueBatches() { final ArrayList<KeyValue<byte[], byte[]>> actual = new ArrayList<>(); final BatchingStateRestoreCallback callback = new BatchingStateRestoreCallback() { @Override public void restoreAll(final Collection<KeyValue<byte[], byte[]>> records) { actual.addAll(records); } @Override public void restore(final byte[] key, final byte[] value) { // unreachable } }; final RecordBatchingStateRestoreCallback adapted = adapt(callback); final byte[] key1 = {1}; final byte[] value1 = {2}; final byte[] key2 = {3}; final byte[] value2 = {4}; adapted.restoreBatch(asList( new ConsumerRecord<>("topic1", 0, 0L, key1, value1), new ConsumerRecord<>("topic2", 1, 1L, key2, value2) )); assertThat( actual, is(asList( new KeyValue<>(key1, value1), new KeyValue<>(key2, value2) )) ); }
@Override public BigDecimal getBigDecimal(final int columnIndex) throws SQLException { return (BigDecimal) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, BigDecimal.class), BigDecimal.class); }
@Test void assertGetBigDecimalWithColumnLabel() throws SQLException { when(mergeResultSet.getValue(1, BigDecimal.class)).thenReturn(new BigDecimal("1")); assertThat(shardingSphereResultSet.getBigDecimal("label"), is(new BigDecimal("1"))); }
public static String jsonFromMap(Map<String, Object> jsonData) { try { JsonDocument json = new JsonDocument(); json.startGroup(); for (String key : jsonData.keySet()) { Object data = jsonData.get(key); if (data instanceof Map) { /* it's a nested map, so we'll recursively add the JSON of this map to the current JSON */ json.addValue(key, jsonFromMap((Map<String, Object>) data)); } else if (data instanceof Object[]) { /* it's an object array, so we'll iterate the elements and put them all in here */ json.addValue(key, "[" + stringArrayFromObjectArray((Object[]) data) + "]"); } else if (data instanceof Collection) { /* it's a collection, so we'll iterate the elements and put them all in here */ json.addValue(key, "[" + stringArrayFromObjectArray(((Collection) data).toArray()) + "]"); } else if (data instanceof int[]) { /* it's an int array, so we'll get the string representation */ String intArray = Arrays.toString((int[]) data); /* remove whitespace */ intArray = intArray.replaceAll(" ", ""); json.addValue(key, intArray); } else if (data instanceof JsonCapableObject) { json.addValue(key, jsonFromMap(((JsonCapableObject) data).jsonMap())); } else { /* all other objects we assume we are to just put the string value in */ json.addValue(key, String.valueOf(data)); } } json.endGroup(); logger.debug("created json from map => {}", json); return json.toString(); } catch (Exception e) { logger.error("Could not create JSON from Map. ", e); return "{}"; } }
@Test void testNestedMapOne() { Map<String, Object> jsonData = new LinkedHashMap<String, Object>(); jsonData.put("myKey", "myValue"); Map<String, Object> jsonData2 = new LinkedHashMap<String, Object>(); jsonData2.put("myNestedKey", "myNestedValue"); jsonData.put("myNestedData", jsonData2); String json = JsonUtility.jsonFromMap(jsonData); String expected = "{\"myKey\":\"myValue\",\"myNestedData\":{\"myNestedKey\":\"myNestedValue\"}}"; assertEquals(expected, json); }
@Override public double quantile(double p) { if (p < 0.0 || p > 1.0) { throw new IllegalArgumentException("Invalid p: " + p); } if (p <= 1 - this.p) { return 0; } else { return 1; } }
@Test public void testQuantile() { System.out.println("quantile"); BernoulliDistribution instance = new BernoulliDistribution(0.3); instance.rand(); assertEquals(0, instance.quantile(0), 1E-7); assertEquals(0, instance.quantile(0.7), 1E-7); assertEquals(1, instance.quantile(1), 1E-7); }
public synchronized <T> void persist(String topicName, T t) { // stub for future this.persist(); }
@Test public void testPersist() throws Exception { ConfigManager testConfigManager = buildTestConfigManager(); testConfigManager.persist(); File file = new File(testConfigManager.configFilePath()); assertEquals(CONTENT_ENCODE, MixAll.file2String(file)); }
@Override public String provideSecret(String secretName, Object pathToSecret) { return System.getenv(secretName); }
@Test public void testConfigValidation() throws Exception { EnvironmentBasedSecretsProvider provider = new EnvironmentBasedSecretsProvider(); assertNull(provider.provideSecret("mySecretName", "Ignored")); SystemLambda.withEnvironmentVariable("mySecretName", "SecretValue").execute(() -> { assertEquals(provider.provideSecret("mySecretName", "Ignored"), "SecretValue"); }); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@TestTemplate public void testUnpartitionedAnd() throws Exception { createUnpartitionedTable(spark, tableName); SparkScanBuilder builder = scanBuilder(); YearsFunction.TimestampToYearsFunction tsToYears = new YearsFunction.TimestampToYearsFunction(); UserDefinedScalarFunc udf1 = toUDF(tsToYears, expressions(fieldRef("ts"))); Predicate predicate1 = new Predicate("=", expressions(udf1, intLit(2017 - 1970))); BucketFunction.BucketLong bucketLong = new BucketFunction.BucketLong(DataTypes.LongType); UserDefinedScalarFunc udf = toUDF(bucketLong, expressions(intLit(5), fieldRef("id"))); Predicate predicate2 = new Predicate(">=", expressions(udf, intLit(2))); Predicate predicate = new And(predicate1, predicate2); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); // NOT (years(ts) = 47 AND bucket(id, 5) >= 2) builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); }
@Injection( name = "USE_BATCH_UPDATE" ) public void metaSetUseBatchUpdate( String value ) { setUseBatchUpdate( "Y".equalsIgnoreCase( value ) ); }
@Test public void metaSetUseBatchUpdate() { TableOutputMeta tableOutputMeta = new TableOutputMeta(); tableOutputMeta.metaSetUseBatchUpdate( "Y" ); assertTrue( tableOutputMeta.useBatchUpdate() ); tableOutputMeta.metaSetUseBatchUpdate( "N" ); assertFalse( tableOutputMeta.useBatchUpdate() ); tableOutputMeta.metaSetUseBatchUpdate( "Ynot" ); assertFalse( tableOutputMeta.useBatchUpdate() ); }
@Override public DictTypeDO getDictType(Long id) { return dictTypeMapper.selectById(id); }
@Test public void testGetDictType_type() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType); // 准备参数 String type = dbDictType.getType(); // 调用 DictTypeDO dictType = dictTypeService.getDictType(type); // 断言 assertNotNull(dictType); assertPojoEquals(dbDictType, dictType); }
@Override protected void decode(final ChannelHandlerContext context, final ByteBuf in, final List<Object> out) { int readableBytes = in.readableBytes(); if (!databasePacketCodecEngine.isValidHeader(readableBytes)) { return; } if (log.isDebugEnabled()) { log.debug("Read from client {} :\n{}", context.channel().id().asShortText(), ByteBufUtil.prettyHexDump(in)); } databasePacketCodecEngine.decode(context, in, out); }
@Test void assertDecodeWithInvalidHeader() { when(byteBuf.readableBytes()).thenReturn(1); when(databasePacketCodecEngine.isValidHeader(1)).thenReturn(false); packetCodec.decode(context, byteBuf, Collections.emptyList()); verify(databasePacketCodecEngine, times(0)).decode(context, byteBuf, Collections.emptyList()); }
public TableMetadata updateSchema(Schema newSchema, int newLastColumnId) { return new Builder(this).setCurrentSchema(newSchema, newLastColumnId).build(); }
@Test public void testUpdateSchema() { Schema schema = new Schema(0, Types.NestedField.required(1, "y", Types.LongType.get(), "comment")); TableMetadata freshTable = TableMetadata.newTableMetadata( schema, PartitionSpec.unpartitioned(), null, ImmutableMap.of()); assertThat(freshTable.currentSchemaId()).isEqualTo(TableMetadata.INITIAL_SCHEMA_ID); assertSameSchemaList(ImmutableList.of(schema), freshTable.schemas()); assertThat(freshTable.schema().asStruct()).isEqualTo(schema.asStruct()); assertThat(freshTable.lastColumnId()).isEqualTo(1); // update schema Schema schema2 = new Schema( Types.NestedField.required(1, "y", Types.LongType.get(), "comment"), Types.NestedField.required(2, "x", Types.StringType.get())); TableMetadata twoSchemasTable = freshTable.updateSchema(schema2, 2); assertThat(twoSchemasTable.currentSchemaId()).isEqualTo(1); assertSameSchemaList( ImmutableList.of(schema, new Schema(1, schema2.columns())), twoSchemasTable.schemas()); assertThat(twoSchemasTable.schema().asStruct()).isEqualTo(schema2.asStruct()); assertThat(twoSchemasTable.lastColumnId()).isEqualTo(2); // update schema with the same schema and last column ID as current shouldn't cause change Schema sameSchema2 = new Schema( Types.NestedField.required(1, "y", Types.LongType.get(), "comment"), Types.NestedField.required(2, "x", Types.StringType.get())); TableMetadata sameSchemaTable = twoSchemasTable.updateSchema(sameSchema2, 2); assertThat(sameSchemaTable).isSameAs(twoSchemasTable); // update schema with the same schema and different last column ID as current should create // a new table TableMetadata differentColumnIdTable = sameSchemaTable.updateSchema(sameSchema2, 3); assertThat(differentColumnIdTable.currentSchemaId()).isEqualTo(1); assertSameSchemaList( ImmutableList.of(schema, new Schema(1, schema2.columns())), differentColumnIdTable.schemas()); assertThat(differentColumnIdTable.schema().asStruct()).isEqualTo(schema2.asStruct()); assertThat(differentColumnIdTable.lastColumnId()).isEqualTo(3); // update schema with old schema does not change schemas TableMetadata revertSchemaTable = differentColumnIdTable.updateSchema(schema, 3); assertThat(revertSchemaTable.currentSchemaId()).isEqualTo(0); assertSameSchemaList( ImmutableList.of(schema, new Schema(1, schema2.columns())), revertSchemaTable.schemas()); assertThat(revertSchemaTable.schema().asStruct()).isEqualTo(schema.asStruct()); assertThat(revertSchemaTable.lastColumnId()).isEqualTo(3); // create new schema will use the largest schema id + 1 Schema schema3 = new Schema( Types.NestedField.required(2, "y", Types.LongType.get(), "comment"), Types.NestedField.required(4, "x", Types.StringType.get()), Types.NestedField.required(6, "z", Types.IntegerType.get())); TableMetadata threeSchemaTable = revertSchemaTable.updateSchema(schema3, 6); assertThat(threeSchemaTable.currentSchemaId()).isEqualTo(2); assertSameSchemaList( ImmutableList.of( schema, new Schema(1, schema2.columns()), new Schema(2, schema3.columns())), threeSchemaTable.schemas()); assertThat(threeSchemaTable.schema().asStruct()).isEqualTo(schema3.asStruct()); assertThat(threeSchemaTable.lastColumnId()).isEqualTo(6); }
@Override public String getDefaultDatabase() throws CatalogException { return "default"; }
@Test @Order(1) void getDefaultDatabase() { Assertions.assertEquals(icebergCatalog.getDefaultDatabase(), databaseName); }
public void walk(Handler handler) { walkSubtree(this.rootValidatable, new ConfigSaveValidationContext(null), handler); }
@Test public void shouldWalkPipelineConfigsInMergePipelineConfigs() { PipelineConfig pipe = mockPipelineConfig(); MergePipelineConfigs mergePipelines = new MergePipelineConfigs(new BasicPipelineConfigs(pipe)); new GoConfigGraphWalker(mergePipelines).walk(Validatable::validate); verify(pipe, atLeastOnce()).validate(any(ValidationContext.class)); }
public Map<String, Parameter> generateMergedWorkflowParams( WorkflowInstance instance, RunRequest request) { Workflow workflow = instance.getRuntimeWorkflow(); Map<String, ParamDefinition> allParamDefs = new LinkedHashMap<>(); Map<String, ParamDefinition> defaultWorkflowParams = defaultParamManager.getDefaultWorkflowParams(); // merge workflow params for start if (request.isFreshRun()) { // merge default workflow params ParamsMergeHelper.mergeParams( allParamDefs, defaultWorkflowParams, ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM_DEFAULT, request)); // merge defined workflow params if (workflow.getParams() != null) { ParamsMergeHelper.mergeParams( allParamDefs, workflow.getParams(), ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.DEFINITION, request)); } } // merge workflow params from previous instance for restart if (!request.isFreshRun() && instance.getParams() != null) { Map<String, ParamDefinition> previousParamDefs = instance.getParams().entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().toDefinition())); // remove reserved params, which should be injected again by the system. for (String paramName : Constants.RESERVED_PARAM_NAMES) { previousParamDefs.remove(paramName); } ParamsMergeHelper.mergeParams( allParamDefs, previousParamDefs, ParamsMergeHelper.MergeContext.workflowCreate(ParamSource.SYSTEM, false)); } // merge run params if (request.getRunParams() != null) { ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun()); ParamsMergeHelper.mergeParams( allParamDefs, request.getRunParams(), ParamsMergeHelper.MergeContext.workflowCreate(source, request)); } // merge user provided restart run params getUserRestartParam(request) .ifPresent( userRestartParams -> { ParamSource source = getParamSource(request.getInitiator(), request.isFreshRun()); ParamsMergeHelper.mergeParams( allParamDefs, userRestartParams, ParamsMergeHelper.MergeContext.workflowCreate(source, request)); }); // cleanup any placeholder params and convert to params return ParamsMergeHelper.convertToParameters(ParamsMergeHelper.cleanupParams(allParamDefs)); }
@Test public void testRestartConfigRunUnchangedParamMerge() { RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .restartConfig( RestartConfig.builder().addRestartNode("sample-wf-map-params", 1, "foo").build()) .build(); Map<String, Object> meta = Collections.singletonMap(Constants.METADATA_SOURCE_KEY, "SYSTEM_DEFAULT"); Map<String, Parameter> instanceParams = new LinkedHashMap<>(); instanceParams.put( "TARGET_RUN_DATE", LongParameter.builder() .name("TARGET_RUN_DATE") .value(1000L) .evaluatedResult(1000L) .evaluatedTime(123L) .mode(ParamMode.MUTABLE_ON_START) .meta(meta) .build()); workflowInstance.setParams(instanceParams); Map<String, Parameter> workflowParams = paramsManager.generateMergedWorkflowParams(workflowInstance, request); Assert.assertFalse(workflowParams.isEmpty()); Assert.assertEquals( ParamSource.SYSTEM_DEFAULT, workflowParams.get("TARGET_RUN_DATE").getSource()); }
public static CommonsConfigurationBulkHeadConfiguration of(final Configuration configuration) throws ConfigParseException { CommonsConfigurationBulkHeadConfiguration obj = new CommonsConfigurationBulkHeadConfiguration(); try{ obj.getConfigs().putAll(obj.getProperties(configuration.subset(BULK_HEAD_CONFIGS_PREFIX))); obj.getInstances().putAll(obj.getProperties(configuration.subset(BULK_HEAD_INSTANCES_PREFIX))); return obj; }catch (Exception ex){ throw new ConfigParseException("Error creating bulkhead configuration", ex); } }
@Test public void testFromPropertiesFile() throws ConfigurationException { Configuration config = CommonsConfigurationUtil.getConfiguration(PropertiesConfiguration.class, TestConstants.RESILIENCE_CONFIG_PROPERTIES_FILE_NAME); CommonsConfigurationBulkHeadConfiguration bulkHeadConfiguration = CommonsConfigurationBulkHeadConfiguration.of(config); assertConfigs(bulkHeadConfiguration.getConfigs()); assertInstances(bulkHeadConfiguration.getInstances()); }
public static <T> List<LocalProperty<T>> grouped(Collection<T> columns) { return ImmutableList.of(new GroupingProperty<>(columns)); }
@Test public void testNonoverlappingConstantGroup() { List<LocalProperty<String>> actual = builder() .constant("a") .grouped("b") .build(); assertMatch( actual, builder().grouped("a", "b", "c").build(), Optional.of(grouped("c"))); assertMatch( actual, builder().grouped("a", "b").build(), Optional.empty()); assertMatch( actual, builder().grouped("a").build(), Optional.empty()); assertMatch( actual, builder().grouped("b").build(), Optional.empty()); assertMatch( actual, builder() .grouped("b") .grouped("a") .build(), Optional.empty(), Optional.empty()); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void testTempLocationAndNoGcpTempLocationSucceeds() throws Exception { DataflowPipelineOptions options = PipelineOptionsFactory.as(DataflowPipelineOptions.class); options.setRunner(DataflowRunner.class); options.setGcpCredential(new TestCredential()); options.setProject("foo-project"); options.setRegion(REGION_ID); options.setTempLocation(VALID_TEMP_BUCKET); options.setGcsUtil(mockGcsUtil); DataflowRunner.fromOptions(options); }
public static Future<Void> maybeUpdateMetadataVersion( Reconciliation reconciliation, Vertx vertx, TlsPemIdentity coTlsPemIdentity, AdminClientProvider adminClientProvider, String desiredMetadataVersion, KafkaStatus status ) { String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT; LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace()); Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity()); Promise<Void> updatePromise = Promise.promise(); maybeUpdateMetadataVersion(reconciliation, vertx, kafkaAdmin, desiredMetadataVersion, status) .onComplete(res -> { // Close the Admin client and return the original result LOGGER.debugCr(reconciliation, "Closing the Kafka Admin API connection"); kafkaAdmin.close(); updatePromise.handle(res); }); return updatePromise.future(); }
@Test public void testUnsuccessfulMetadataVersionChange(VertxTestContext context) { // Mock the Admin client Admin mockAdminClient = mock(Admin.class); // Mock describing the current metadata version mockDescribeVersion(mockAdminClient); // Mock updating metadata version @SuppressWarnings(value = "unchecked") KafkaFuture<Void> kf = mock(KafkaFuture.class); when(kf.whenComplete(any())).thenAnswer(i -> { KafkaFuture.BiConsumer<Void, Throwable> action = i.getArgument(0); action.accept(null, new InvalidUpdateVersionException("Test error ...")); return null; }); UpdateFeaturesResult ufr = mock(UpdateFeaturesResult.class); when(ufr.values()).thenReturn(Map.of(KRaftMetadataManager.METADATA_VERSION_KEY, kf)); when(mockAdminClient.updateFeatures(any(), any())).thenReturn(ufr); // Mock the Admin client provider AdminClientProvider mockAdminClientProvider = mockAdminClientProvider(mockAdminClient); // Dummy KafkaStatus to check the values from KafkaStatus status = new KafkaStatus(); Checkpoint checkpoint = context.checkpoint(); KRaftMetadataManager.maybeUpdateMetadataVersion(Reconciliation.DUMMY_RECONCILIATION, vertx, DUMMY_IDENTITY, mockAdminClientProvider, "3.6", status) .onComplete(context.succeeding(s -> { assertThat(status.getKafkaMetadataVersion(), is("3.6-IV1")); assertThat(status.getConditions().size(), is(1)); assertThat(status.getConditions().get(0).getType(), is("Warning")); assertThat(status.getConditions().get(0).getReason(), is("MetadataUpdateFailed")); assertThat(status.getConditions().get(0).getMessage(), is("Failed to update metadata version to 3.6")); verify(mockAdminClient, times(1)).updateFeatures(any(), any()); verify(mockAdminClient, times(2)).describeFeatures(); checkpoint.flag(); })); }
public int hash(JimfsPath path) { // Note: JimfsPath.equals() is implemented using the compare() method below; // equalityUsesCanonicalForm is taken into account there via the namesOrdering, which is set // at construction time. int hash = 31; hash = 31 * hash + getFileSystem().hashCode(); final Name root = path.root(); final ImmutableList<Name> names = path.names(); if (equalityUsesCanonicalForm) { // use hash codes of names themselves, which are based on the canonical form hash = 31 * hash + (root == null ? 0 : root.hashCode()); for (Name name : names) { hash = 31 * hash + name.hashCode(); } } else { // use hash codes from toString() form of names hash = 31 * hash + (root == null ? 0 : root.toString().hashCode()); for (Name name : names) { hash = 31 * hash + name.toString().hashCode(); } } return hash; }
@Test public void testHash_usingCanonicalForm() { PathService pathService = fakePathService(PathType.unix(), true); JimfsPath path1 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("foo", "foo"))); JimfsPath path2 = new JimfsPath(pathService, null, ImmutableList.of(Name.create("FOO", "foo"))); JimfsPath path3 = new JimfsPath( pathService, null, ImmutableList.of(Name.create("28937497189478912374897", "foo"))); assertThat(pathService.hash(path1)).isEqualTo(pathService.hash(path2)); assertThat(pathService.hash(path2)).isEqualTo(pathService.hash(path3)); }
@Override protected IdentifiedDataSerializable getConfig() { CacheSimpleConfig config = new CacheSimpleConfig(); config.setAsyncBackupCount(parameters.asyncBackupCount); config.setBackupCount(parameters.backupCount); config.setCacheEntryListeners(parameters.cacheEntryListeners); config.setCacheLoader(parameters.cacheLoader); config.setCacheLoaderFactory(parameters.cacheLoaderFactory); config.setCacheWriter(parameters.cacheWriter); config.setCacheWriterFactory(parameters.cacheWriterFactory); config.setDisablePerEntryInvalidationEvents(parameters.disablePerEntryInvalidationEvents); if (parameters.evictionConfig != null) { config.setEvictionConfig(parameters.evictionConfig.asEvictionConfig(serializationService)); } if (parameters.expiryPolicyFactoryClassName != null) { config.setExpiryPolicyFactory(parameters.expiryPolicyFactoryClassName); } else if (parameters.timedExpiryPolicyFactoryConfig != null) { ExpiryPolicyFactoryConfig expiryPolicyFactoryConfig = new ExpiryPolicyFactoryConfig(parameters.timedExpiryPolicyFactoryConfig); config.setExpiryPolicyFactoryConfig(expiryPolicyFactoryConfig); } if (parameters.eventJournalConfig != null) { config.setEventJournalConfig(parameters.eventJournalConfig); } if (parameters.hotRestartConfig != null) { config.setHotRestartConfig(parameters.hotRestartConfig); } config.setInMemoryFormat(InMemoryFormat.valueOf(parameters.inMemoryFormat)); config.setKeyType(parameters.keyType); config.setManagementEnabled(parameters.managementEnabled); if (parameters.mergePolicy != null) { config.setMergePolicyConfig(mergePolicyConfig(parameters.mergePolicy, parameters.mergeBatchSize)); } config.setName(parameters.name); if (parameters.partitionLostListenerConfigs != null && !parameters.partitionLostListenerConfigs.isEmpty()) { List<CachePartitionLostListenerConfig> listenerConfigs = (List<CachePartitionLostListenerConfig>) adaptListenerConfigs(parameters.partitionLostListenerConfigs, parameters.userCodeNamespace); config.setPartitionLostListenerConfigs(listenerConfigs); } else { config.setPartitionLostListenerConfigs(new ArrayList<>()); } config.setSplitBrainProtectionName(parameters.splitBrainProtectionName); config.setReadThrough(parameters.readThrough); config.setStatisticsEnabled(parameters.statisticsEnabled); config.setValueType(parameters.valueType); config.setWanReplicationRef(parameters.wanReplicationRef); config.setWriteThrough(parameters.writeThrough); if (parameters.isMerkleTreeConfigExists && parameters.merkleTreeConfig != null) { config.setMerkleTreeConfig(parameters.merkleTreeConfig); } if (parameters.isDataPersistenceConfigExists) { config.setDataPersistenceConfig(parameters.dataPersistenceConfig); } if (parameters.isUserCodeNamespaceExists) { config.setUserCodeNamespace(parameters.userCodeNamespace); } return config; }
@Test public void doNotThrowException_whenNullValuesProvidedForNullableFields() throws Exception { CacheConfig<Object, Object> cacheConfig = new CacheConfig<>("my-cache"); ClientMessage addMapConfigClientMessage = DynamicConfigAddCacheConfigCodec.encodeRequest( cacheConfig.getName(), null, null, cacheConfig.isStatisticsEnabled(), cacheConfig.isManagementEnabled(), cacheConfig.isReadThrough(), cacheConfig.isWriteThrough(), null, null, null, null, cacheConfig.getBackupCount(), cacheConfig.getAsyncBackupCount(), cacheConfig.getInMemoryFormat().name(), null, null, 0, cacheConfig.isDisablePerEntryInvalidationEvents(), null, null, null, null, null, null, null, null, null, cacheConfig.getDataPersistenceConfig(), cacheConfig.getUserCodeNamespace() ); AddCacheConfigMessageTask addCacheConfigMessageTask = createMessageTask(addMapConfigClientMessage); addCacheConfigMessageTask.run(); CacheConfig<Object, Object> transmittedCacheConfig = new CacheConfig<>((CacheSimpleConfig) addCacheConfigMessageTask.getConfig()); assertEquals(cacheConfig, transmittedCacheConfig); }
@Override public OverlayData createOverlayData(ComponentName remoteApp) { if (!OS_SUPPORT_FOR_ACCENT) { return EMPTY; } try { final ActivityInfo activityInfo = mLocalContext .getPackageManager() .getActivityInfo(remoteApp, PackageManager.GET_META_DATA); final Context context = mLocalContext.createPackageContext(remoteApp.getPackageName(), CONTEXT_IGNORE_SECURITY); context.setTheme(activityInfo.getThemeResource()); fetchRemoteColors(mCurrentOverlayData, context); Logger.d( "OverlyDataCreatorForAndroid", "For component %s we fetched %s", remoteApp, mCurrentOverlayData); return mCurrentOverlayData; } catch (Exception e) { Logger.w("OverlyDataCreatorForAndroid", e, "Failed to fetch colors for %s", remoteApp); return EMPTY; } }
@Test public void testReturnDarkAsPrimaryIfMissing() { setupReturnedColors(R.style.MissingDarkAttribute); final OverlayData overlayData = mUnderTest.createOverlayData(mComponentName); Assert.assertTrue(overlayData.isValid()); Assert.assertEquals(Color.parseColor("#ffcc9900"), overlayData.getPrimaryColor()); Assert.assertEquals(Color.parseColor("#ffcc9900"), overlayData.getPrimaryDarkColor()); Assert.assertEquals(Color.parseColor("#ffff0000"), overlayData.getPrimaryTextColor()); }
public Map<String, List<String>> getTableToBrokersMap() { Map<String, Set<String>> brokerUrlsMap = new HashMap<>(); try { byte[] brokerResourceNodeData = _zkClient.readData(BROKER_EXTERNAL_VIEW_PATH, true); brokerResourceNodeData = unpackZnodeIfNecessary(brokerResourceNodeData); JsonNode jsonObject = OBJECT_READER.readTree(getInputStream(brokerResourceNodeData)); JsonNode brokerResourceNode = jsonObject.get("mapFields"); Iterator<Entry<String, JsonNode>> resourceEntries = brokerResourceNode.fields(); while (resourceEntries.hasNext()) { Entry<String, JsonNode> resourceEntry = resourceEntries.next(); String resourceName = resourceEntry.getKey(); String tableName = resourceName.replace(OFFLINE_SUFFIX, "").replace(REALTIME_SUFFIX, ""); Set<String> brokerUrls = brokerUrlsMap.computeIfAbsent(tableName, k -> new HashSet<>()); JsonNode resource = resourceEntry.getValue(); Iterator<Entry<String, JsonNode>> brokerEntries = resource.fields(); while (brokerEntries.hasNext()) { Entry<String, JsonNode> brokerEntry = brokerEntries.next(); String brokerName = brokerEntry.getKey(); if (brokerName.startsWith("Broker_") && "ONLINE".equals(brokerEntry.getValue().asText())) { brokerUrls.add(getHostPort(brokerName)); } } } } catch (Exception e) { LOGGER.warn("Exception while reading External view from zookeeper", e); // ignore } Map<String, List<String>> tableToBrokersMap = new HashMap<>(); for (Entry<String, Set<String>> entry : brokerUrlsMap.entrySet()) { tableToBrokersMap.put(entry.getKey(), new ArrayList<>(entry.getValue())); } return tableToBrokersMap; }
@Test public void testGetTableToBrokersMapExceptionState() { // Setup final Map<String, List<String>> expectedResult = new HashMap<>(); when(_mockZkClient.readData(Mockito.anyString(), Mockito.anyBoolean())).thenThrow(RuntimeException.class); // Run the test final Map<String, List<String>> result = _externalViewReaderUnderTest.getTableToBrokersMap(); // Verify the results assertEquals(expectedResult, result); }
@Override public Http2Headers authority(CharSequence value) { set(PseudoHeaderName.AUTHORITY.value(), value); return this; }
@Test public void testSetHeadersOrdersPseudoHeadersCorrectly() { Http2Headers headers = newHeaders(); Http2Headers other = new DefaultHttp2Headers().add("name2", "value2").authority("foo"); headers.set(other); verifyPseudoHeadersFirst(headers); assertEquals(other.size(), headers.size()); assertEquals("foo", headers.authority()); assertEquals("value2", headers.get("name2")); }
public int getTotalResourceCount() { return resources.isEmpty() ? 0 : resources.values().stream().reduce(0, Integer::sum); }
@Test void testGetTotalResourceCount() { final Map<ResourceProfile, Integer> resources = createResources(); final ResourceCounter resourceCounter = ResourceCounter.withResources(resources); assertThat(resourceCounter.getTotalResourceCount()).isEqualTo(5); }
public void runExtractor(Message msg) { try(final Timer.Context ignored = completeTimer.time()) { final String field; try (final Timer.Context ignored2 = conditionTimer.time()) { // We can only work on Strings. if (!(msg.getField(sourceField) instanceof String)) { conditionMissesCounter.inc(); return; } field = (String) msg.getField(sourceField); // Decide if to extract at all. if (conditionType.equals(ConditionType.STRING)) { if (field.contains(conditionValue)) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } else if (conditionType.equals(ConditionType.REGEX)) { if (regexConditionPattern.matcher(field).find()) { conditionHitsCounter.inc(); } else { conditionMissesCounter.inc(); return; } } } try (final Timer.Context ignored2 = executionTimer.time()) { Result[] results; try { results = run(field); } catch (ExtractorException e) { final String error = "Could not apply extractor <" + getTitle() + " (" + getId() + ")>"; msg.addProcessingError(new Message.ProcessingError( ProcessingFailureCause.ExtractorException, error, ExceptionUtils.getRootCauseMessage(e))); return; } if (results == null || results.length == 0 || Arrays.stream(results).anyMatch(result -> result.getValue() == null)) { return; } else if (results.length == 1 && results[0].target == null) { // results[0].target is null if this extractor cannot produce multiple fields use targetField in that case msg.addField(targetField, results[0].getValue()); } else { for (final Result result : results) { msg.addField(result.getTarget(), result.getValue()); } } // Remove original from message? if (cursorStrategy.equals(CursorStrategy.CUT) && !targetField.equals(sourceField) && !Message.RESERVED_FIELDS.contains(sourceField) && results[0].beginIndex != -1) { final StringBuilder sb = new StringBuilder(field); final List<Result> reverseList = Arrays.stream(results) .sorted(Comparator.<Result>comparingInt(result -> result.endIndex).reversed()) .collect(Collectors.toList()); // remove all from reverse so that the indices still match for (final Result result : reverseList) { sb.delete(result.getBeginIndex(), result.getEndIndex()); } final String builtString = sb.toString(); final String finalResult = builtString.trim().isEmpty() ? "fullyCutByExtractor" : builtString; msg.removeField(sourceField); // TODO don't add an empty field back, or rather don't add fullyCutByExtractor msg.addField(sourceField, finalResult); } runConverters(msg); } } }
@Test public void testWithOneTargetValueResult() throws Exception { final TestExtractor extractor = new TestExtractor.Builder() .callback(new Callable<Result[]>() { @Override public Result[] call() throws Exception { return new Result[]{ new Result("hello", "world", -1, -1), }; } }) .build(); final Message msg = createMessage("the hello"); extractor.runExtractor(msg); assertThat(msg.hasField("target")).isFalse(); assertThat(msg.getField("world")).isEqualTo("hello"); }
public static UserVo from(User user) { User.UserStatus statusCopy = JsonUtils.deepCopy(ObjectUtils.defaultIfNull(user.getStatus(), new User.UserStatus())); statusCopy.setLoginHistories(List.of()); statusCopy.setLastLoginAt(null); User.UserSpec userSpecCopy = JsonUtils.deepCopy(user.getSpec()); userSpecCopy.setPassword("[PROTECTED]"); return UserVo.builder() .metadata(user.getMetadata()) .spec(userSpecCopy) .status(statusCopy) .build(); }
@Test void fromWhenStatusIsNull() { User user = new User(); user.setMetadata(new Metadata()); user.getMetadata().setName("fake-user"); user.setSpec(new User.UserSpec()); UserVo userVo = UserVo.from(user); assertThat(userVo).isNotNull(); }
public static String validateColumnName(@Nullable String columnName) { String name = requireNonNull(columnName, "Column name cannot be null"); checkDbIdentifierCharacters(columnName, "Column name"); return name; }
@Test public void fail_when_column_name_is_an_SQL_reserved_keyword() { assertThatThrownBy(() -> validateColumnName("values")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Column name must not be an SQL reserved keyword, got 'values'"); }
@Override public Long dbSize(RedisClusterNode node) { return execute(node, RedisCommands.DBSIZE); }
@Test public void testDbSize() { testInCluster(connection -> { connection.flushAll(); RedisClusterNode master = getFirstMaster(connection); Long size = connection.dbSize(master); assertThat(size).isZero(); }); }
public <T> void compareFutureResult(final T expected, final CompletionStage<T> experimentStage) { final Timer.Sample sample = Timer.start(); experimentStage.whenComplete((actual, cause) -> { if (cause != null) { recordError(cause, sample); } else { recordResult(expected, actual, sample); } }); }
@Test void compareFutureResult() { experiment.compareFutureResult(12, CompletableFuture.completedFuture(12)); verify(matchTimer).record(anyLong(), eq(TimeUnit.NANOSECONDS)); }
public static void validateApplicationConfig(ApplicationConfig config) { if (config == null) { return; } if (!config.isValid()) { throw new IllegalStateException("No application config found or it's not a valid config! " + "Please add <dubbo:application name=\"...\" /> to your spring config."); } // backward compatibility ScopeModel scopeModel = ScopeModelUtil.getOrDefaultApplicationModel(config.getScopeModel()); PropertiesConfiguration configuration = scopeModel.modelEnvironment().getPropertiesConfiguration(); String wait = configuration.getProperty(SHUTDOWN_WAIT_KEY); if (wait != null && wait.trim().length() > 0) { System.setProperty(SHUTDOWN_WAIT_KEY, wait.trim()); } else { wait = configuration.getProperty(SHUTDOWN_WAIT_SECONDS_KEY); if (wait != null && wait.trim().length() > 0) { System.setProperty(SHUTDOWN_WAIT_SECONDS_KEY, wait.trim()); } } checkName(NAME, config.getName()); checkMultiName(OWNER, config.getOwner()); checkName(ORGANIZATION, config.getOrganization()); checkName(ARCHITECTURE, config.getArchitecture()); checkName(ENVIRONMENT, config.getEnvironment()); checkParameterName(config.getParameters()); checkQosDependency(config); }
@Test void testCheckQosInApplicationConfig() throws Exception { ConfigValidationUtils mock = Mockito.mock(ConfigValidationUtils.class); ErrorTypeAwareLogger loggerMock = Mockito.mock(ErrorTypeAwareLogger.class); injectField(mock.getClass().getDeclaredField("logger"), loggerMock); ApplicationConfig config = new ApplicationConfig(); config.setName("testName"); config.setQosEnable(false); mock.validateApplicationConfig(config); verify(loggerMock, never()).warn(any(), any()); config.setQosEnable(true); mock.validateApplicationConfig(config); verify(loggerMock) .warn( eq(COMMON_CLASS_NOT_FOUND), eq(""), eq(""), eq( "No QosProtocolWrapper class was found. Please check the dependency of dubbo-qos whether was imported correctly."), any()); }
@Override public void unloadPlugins() { throw new IllegalAccessError(PLUGIN_PREFIX + currentPluginId + " tried to execute unloadPlugins!"); }
@Test public void unloadPlugins() { assertThrows(IllegalAccessError.class, () -> wrappedPluginManager.unloadPlugins()); }
@Nullable public <T> T getInstanceWithoutAncestors(String name, Class<T> type) { try { return BeanFactoryUtils.beanOfType(getContext(name), type); } catch (BeansException ex) { return null; } }
@Test void getInstanceWithoutAncestors_verifyNullForMissing() { AnnotationConfigApplicationContext parent = new AnnotationConfigApplicationContext(); parent.refresh(); FeignClientFactory feignClientFactory = new FeignClientFactory(); feignClientFactory.setApplicationContext(parent); feignClientFactory.setConfigurations(Lists.newArrayList(getSpec("empty", null, EmptyConfiguration.class))); Logger.Level level = feignClientFactory.getInstanceWithoutAncestors("empty", Logger.Level.class); assertThat(level).as("Logger was not null").isNull(); }
public int getMaxConnections() { return maxConnections; }
@Test public void testGetMaximumMongoDBConnections() throws RepositoryException, ValidationException { MongoDbConfiguration configuration = new MongoDbConfiguration(); new JadConfig(new InMemoryRepository(singletonMap("mongodb_max_connections", "12345")), configuration).process(); assertEquals(12345, configuration.getMaxConnections()); }
void close() throws Exception { destination.close(); mbus.destroy(); }
@Test public void requireThatSerialTransferModeConfiguresStaticThrottling() throws Exception { TestDriver driver = new TestDriver(new FeederParams().setSerialTransfer(), "", null); assertEquals(StaticThrottlePolicy.class, getThrottlePolicy(driver).getClass()); assertTrue(driver.close()); }
@Override public RefreshNodesResponse refreshNodes(RefreshNodesRequest request) throws StandbyException, YarnException, IOException { // parameter verification. // We will not check whether the DecommissionType is empty, // because this parameter has a default value at the proto level. if (request == null) { routerMetrics.incrRefreshNodesFailedRetrieved(); RouterServerUtil.logAndThrowException("Missing RefreshNodes request.", null); } // call refreshNodes of activeSubClusters. try { long startTime = clock.getTime(); RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod( new Class[] {RefreshNodesRequest.class}, new Object[] {request}); String subClusterId = request.getSubClusterId(); Collection<RefreshNodesResponse> refreshNodesResps = remoteMethod.invokeConcurrent(this, RefreshNodesResponse.class, subClusterId); if (CollectionUtils.isNotEmpty(refreshNodesResps)) { long stopTime = clock.getTime(); routerMetrics.succeededRefreshNodesRetrieved(stopTime - startTime); return RefreshNodesResponse.newInstance(); } } catch (YarnException e) { routerMetrics.incrRefreshNodesFailedRetrieved(); RouterServerUtil.logAndThrowException(e, "Unable to refreshNodes due to exception. " + e.getMessage()); } routerMetrics.incrRefreshNodesFailedRetrieved(); throw new YarnException("Unable to refreshNodes due to exception."); }
@Test public void testSC1RefreshNodes() throws Exception { // We will test 2 cases: // case 1, test the existing subCluster (SC-1). // case 2, test the non-exist subCluster. RefreshNodesRequest request = RefreshNodesRequest.newInstance(DecommissionType.NORMAL, 10, "SC-1"); interceptor.refreshNodes(request); String notExistsSubCluster = "SC-NON"; RefreshNodesRequest request1 = RefreshNodesRequest.newInstance( DecommissionType.NORMAL, 10, notExistsSubCluster); LambdaTestUtils.intercept(YarnException.class, "subClusterId = SC-NON is not an active subCluster.", () -> interceptor.refreshNodes(request1)); }
public static List<UpdateRequirement> forReplaceView( ViewMetadata base, List<MetadataUpdate> metadataUpdates) { Preconditions.checkArgument(null != base, "Invalid view metadata: null"); Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null"); Builder builder = new Builder(null, false); builder.require(new UpdateRequirement.AssertViewUUID(base.uuid())); metadataUpdates.forEach(builder::update); return builder.build(); }
@Test public void setAndRemovePropertiesForView() { List<UpdateRequirement> requirements = UpdateRequirements.forReplaceView( viewMetadata, ImmutableList.of(new MetadataUpdate.SetProperties(ImmutableMap.of("test", "test")))); requirements.forEach(req -> req.validate(viewMetadata)); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfTypes(UpdateRequirement.AssertViewUUID.class); assertViewUUID(requirements); requirements = UpdateRequirements.forReplaceView( viewMetadata, ImmutableList.of(new MetadataUpdate.RemoveProperties(Sets.newHashSet("test")))); requirements.forEach(req -> req.validate(viewMetadata)); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfTypes(UpdateRequirement.AssertViewUUID.class); assertViewUUID(requirements); }
@Deprecated public static void logThreadInfo(org.apache.commons.logging.Log log, String title, long minInterval) { boolean dumpStack = false; if (log.isInfoEnabled()) { synchronized (ReflectionUtils.class) { long now = Time.monotonicNow(); if (now - previousLogTime >= minInterval * 1000) { previousLogTime = now; dumpStack = true; } } if (dumpStack) { try { ByteArrayOutputStream buffer = new ByteArrayOutputStream(); printThreadInfo(new PrintStream(buffer, false, "UTF-8"), title); log.info(buffer.toString(StandardCharsets.UTF_8.name())); } catch (UnsupportedEncodingException ignored) { } } } }
@Test public void testLogThreadInfo() throws Exception { Logger logger = LoggerFactory.getLogger(TestReflectionUtils.class); LogCapturer logCapturer = LogCapturer.captureLogs(logger); final String title = "title"; ReflectionUtils.logThreadInfo(logger, title, 0L); Assertions.assertThat(logCapturer.getOutput()) .contains("Process Thread Dump: " + title); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testDisplayDataExcludesJsonIgnoreOptions() { IgnoredProperty options = PipelineOptionsFactory.as(IgnoredProperty.class); options.setValue("foobar"); DisplayData data = DisplayData.from(options); assertThat(data, not(hasDisplayItem("value"))); }
@Override public T getMetaForStep( Repository rep, IMetaStore metaStore, VariableSpace space ) throws KettleException { // Note - was a synchronized static method, but as no static variables are manipulated, this is entirely unnecessary // baseStepMeta.getParentStepMeta() or getParantTransMeta() is only null when running unit tests metaFileCache = baseStepMeta.getParentStepMeta() == null || baseStepMeta.getParentStepMeta().getParentTransMeta() == null ? null : baseStepMeta.getParentStepMeta().getParentTransMeta().getMetaFileCache(); T theMeta = null; CurrentDirectoryResolver r = new CurrentDirectoryResolver(); VariableSpace tmpSpace; if ( isTransMeta() ) { // send restricted parentVariables with several important options // Otherwise we destroy child variables and the option "Inherit all variables from the transformation" is enabled // always. tmpSpace = r.resolveCurrentDirectory( specificationMethod, getVarSpaceOnlyWithRequiredParentVars( space ), rep, baseStepMeta.getParentStepMeta(), filename ); } else { tmpSpace = r.resolveCurrentDirectory( specificationMethod, space, rep, baseStepMeta.getParentStepMeta(), filename ); } final String[] idContainer = new String[ 1 ]; //unigue portion of cache key passed though argument switch ( specificationMethod ) { case FILENAME: String realFilename = tmpSpace.environmentSubstitute( filename ); if ( isTransMeta() && space != null ) { // This is a parent transformation and parent variable should work here. A child file name can be resolved // via parent space. realFilename = space.environmentSubstitute( realFilename ); } theMeta = attemptCacheRead( realFilename ); //try to get from the cache first if ( theMeta == null ) { try { // OK, load the meta-data from file... // Don't set internal variables: they belong to the parent thread! if ( rep != null ) { theMeta = getMetaFromRepository2( realFilename, rep, r, idContainer ); } if ( theMeta == null ) { theMeta = attemptLoadMeta( realFilename, rep, metaStore, null, tmpSpace, idContainer ); LogChannel.GENERAL.logDetailed( "Loading " + friendlyMetaType + " from repository", friendlyMetaType + " was loaded from XML file [" + realFilename + "]" ); } } catch ( Exception e ) { if ( isTransMeta() ) { throw new KettleException( BaseMessages.getString( persistentClass, "StepWithMappingMeta.Exception.UnableToLoadTrans" ), e ); } else { throw new KettleException( BaseMessages.getString( persistentClass, "JobExecutorMeta.Exception.UnableToLoadJob" ), e ); } } } break; case REPOSITORY_BY_NAME: String realMetaName = tmpSpace.environmentSubstitute( Const.NVL( metaName, "" ) ); String realDirectory = tmpSpace.environmentSubstitute( Const.NVL( directory, "" ) ); if ( isTransMeta() && space != null ) { // This is a parent transformation and parent variable should work here. A child file name can be // resolved via // parent space. realMetaName = space.environmentSubstitute( realMetaName ); realDirectory = space.environmentSubstitute( realDirectory ); } if ( Utils.isEmpty( realDirectory ) && !Utils.isEmpty( realMetaName ) ) { int index = realMetaName.lastIndexOf( '/' ); String transPath = realMetaName; realMetaName = realMetaName.substring( index + 1 ); realDirectory = transPath.substring( 0, index ); } //We will use this key in cache no matter what the final successful path is so that we don't need to hit the // repo the next time it comes in. (ie: rep.findDirectory ) String cacheKey = realDirectory + "/" + realMetaName; theMeta = attemptCacheRead( cacheKey ); //try to get from the cache first if ( theMeta == null ) { if ( rep != null ) { if ( !Utils.isEmpty( realMetaName ) && !Utils.isEmpty( realDirectory ) ) { realDirectory = r.normalizeSlashes( realDirectory ); RepositoryDirectoryInterface repdir = rep.findDirectory( realDirectory ); if ( repdir != null ) { try { // reads the last revision in the repository... theMeta = isTransMeta() ? (T) rep.loadTransformation( realMetaName, repdir, null, true, null ) : (T) rep.loadJob( realMetaName, repdir, null, null ); if ( theMeta != null ) { idContainer[ 0 ] = cacheKey; } LogChannel.GENERAL.logDetailed( "Loading " + friendlyMetaType + " from repository", "Executor " + friendlyMetaType + " [" + realMetaName + "] was loaded from the repository" ); } catch ( Exception e ) { throw new KettleException( "Unable to load " + friendlyMetaType + " [" + realMetaName + "]", e ); } } } } else { // rep is null, let's try loading by filename try { theMeta = attemptLoadMeta( cacheKey, rep, metaStore, null, tmpSpace, idContainer ); } catch ( KettleException ke ) { try { // add .ktr extension and try again String extension = isTransMeta() ? Const.STRING_TRANS_DEFAULT_EXT : Const.STRING_JOB_DEFAULT_EXT; theMeta = attemptLoadMeta( cacheKey + "." + extension, rep, metaStore, null, tmpSpace, idContainer ); if ( idContainer[ 0 ] != null ) { //It successfully read in the meta but we don't want to cache it with the extension so we override // it here idContainer[ 0 ] = cacheKey; } } catch ( KettleException ke2 ) { if ( isTransMeta() ) { throw new KettleException( BaseMessages.getString( persistentClass, "StepWithMappingMeta.Exception.UnableToLoadTrans", realMetaName ) + realDirectory ); } else { throw new KettleException( BaseMessages.getString( persistentClass, "JobExecutorMeta.Exception.UnableToLoadJob", realMetaName ) + realDirectory ); } } } } } break; case REPOSITORY_BY_REFERENCE: // Read the last revision by reference... theMeta = attemptCacheRead( metaObjectId.toString() ); if ( theMeta == null ) { theMeta = isTransMeta() ? (T) rep.loadTransformation( metaObjectId, null ) : (T) rep.loadJob( metaObjectId, null ); if ( theMeta != null ) { idContainer[ 0 ] = metaObjectId.toString(); //Only set when not found in cache } } break; default: break; } //If theMeta is present and idContainer[0] != null, ( meaning it read it from repo/file ), then cache it cacheMeta( idContainer[ 0 ], theMeta ); return theMeta; }
@Test //A Transformation getting the TransMeta from the file system public void getMetaForStepAsTransFromFileSystemTest() throws Exception { setupTransExecutorMeta(); specificationMethod = ObjectLocationSpecificationMethod.FILENAME; MetaFileLoaderImpl metaFileLoader = new MetaFileLoaderImpl<TransMeta>( baseStepMeta, specificationMethod ); TransMeta transMeta = (TransMeta) metaFileLoader.getMetaForStep( repository, store, space ); validateFirstTransMetaAccess( transMeta ); transMeta = (TransMeta) metaFileLoader.getMetaForStep( repository, store, space ); validateSecondTransMetaAccess( transMeta ); }
@Override public Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { synchronized (getClassLoadingLock(name)) { Class<?> loadedClass = findLoadedClass(name); if (loadedClass != null) { return loadedClass; } if (isClosed) { throw new ClassNotFoundException("This ClassLoader is closed"); } if (config.shouldAcquire(name)) { loadedClass = PerfStatsCollector.getInstance() .measure("load sandboxed class", () -> maybeInstrumentClass(name)); } else { loadedClass = getParent().loadClass(name); } if (resolve) { resolveClass(loadedClass); } return loadedClass; } }
@Test public void callingNormalMethodShouldInvokeClassHandler() throws Exception { Class<?> exampleClass = loadClass(AnExampleClass.class); Method normalMethod = exampleClass.getMethod("normalMethod", String.class, int.class); Object exampleInstance = exampleClass.getDeclaredConstructor().newInstance(); assertEquals( "response from methodInvoked: AnExampleClass.normalMethod(java.lang.String" + " value1, int 123)", normalMethod.invoke(exampleInstance, "value1", 123)); assertThat(transcript) .containsExactly( "methodInvoked: AnExampleClass.__constructor__()", "methodInvoked: AnExampleClass.normalMethod(java.lang.String value1, int 123)"); }
@Override public String getTooltip(final Path file) { final StringBuilder tooltip = new StringBuilder(file.getAbsolute()); final Checksum checksum = file.attributes().getChecksum(); if(Checksum.NONE != checksum) { tooltip.append("\n").append(String.format("%s %s", StringUtils.upperCase(checksum.algorithm.name()), checksum.hash)); } if(StringUtils.isNotBlank(file.attributes().getVersionId())) { tooltip.append("\n").append(file.attributes().getVersionId()); } return tooltip.toString(); }
@Test public void testGetTooltip() { final PathTooltipService s = new PathTooltipService(); assertEquals("/p", s.getTooltip(new Path("/p", EnumSet.of(Path.Type.file)))); }
public static Subscription deserializeSubscription(final ByteBuffer buffer, short version) { version = checkSubscriptionVersion(version); try { ConsumerProtocolSubscription data = new ConsumerProtocolSubscription(new ByteBufferAccessor(buffer), version); List<TopicPartition> ownedPartitions = new ArrayList<>(); for (ConsumerProtocolSubscription.TopicPartition tp : data.ownedPartitions()) { for (Integer partition : tp.partitions()) { ownedPartitions.add(new TopicPartition(tp.topic(), partition)); } } return new Subscription( data.topics(), data.userData() != null ? data.userData().duplicate() : null, ownedPartitions, data.generationId(), data.rackId() == null || data.rackId().isEmpty() ? Optional.empty() : Optional.of(data.rackId())); } catch (BufferUnderflowException e) { throw new SchemaException("Buffer underflow while parsing consumer protocol's subscription", e); } }
@Test public void deserializeFutureSubscriptionVersion() { ByteBuffer buffer = generateFutureSubscriptionVersionData(); Subscription subscription = ConsumerProtocol.deserializeSubscription(buffer); subscription.setGroupInstanceId(groupInstanceId); assertEquals(Collections.singleton("topic"), toSet(subscription.topics())); assertEquals(Collections.singleton(tp2), toSet(subscription.ownedPartitions())); assertEquals(groupInstanceId, subscription.groupInstanceId()); assertEquals(generationId, subscription.generationId().orElse(DEFAULT_GENERATION)); assertEquals(rackId, subscription.rackId()); }
static boolean portTaggedWith(PortInfo portInfo, List<String> requiredTags) { // vespa-model-inspect displays upper case tags, while actual tags for (at least) node-admin are lower case. Collection<String> upperCasePortTags = portInfo.getTags().stream().map(String::toUpperCase).collect(Collectors.toSet()); for (var tag : requiredTags) { if (!upperCasePortTags.contains(tag.toUpperCase())) { return false; } } return true; }
@Test public void caseInsensitiveTagMatching() { PortInfo portInfo = mock(PortInfo.class); when(portInfo.getTags()).thenReturn(List.of("http", "STATE", "foo")); assertTrue(StateV1HealthModel.portTaggedWith(portInfo, StateV1HealthModel.HTTP_HEALTH_PORT_TAGS)); assertTrue(StateV1HealthModel.portTaggedWith(portInfo, List.of("HTTP", "state"))); when(portInfo.getTags()).thenReturn(List.of("http", "foo")); assertFalse(StateV1HealthModel.portTaggedWith(portInfo, StateV1HealthModel.HTTP_HEALTH_PORT_TAGS)); assertFalse(StateV1HealthModel.portTaggedWith(portInfo, List.of("HTTP", "state"))); }
@Override public int compareTo(final TimestampedSegment segment) { return Long.compare(id, segment.id); }
@Test public void shouldCompareSegmentIdOnly() { final TimestampedSegment segment1 = new TimestampedSegment("a", "C", 50L, Position.emptyPosition(), metricsRecorder); final TimestampedSegment segment2 = new TimestampedSegment("b", "B", 100L, Position.emptyPosition(), metricsRecorder); final TimestampedSegment segment3 = new TimestampedSegment("c", "A", 0L, Position.emptyPosition(), metricsRecorder); assertThat(segment1.compareTo(segment1), equalTo(0)); assertThat(segment1.compareTo(segment2), equalTo(-1)); assertThat(segment2.compareTo(segment1), equalTo(1)); assertThat(segment1.compareTo(segment3), equalTo(1)); assertThat(segment3.compareTo(segment1), equalTo(-1)); assertThat(segment2.compareTo(segment3), equalTo(1)); assertThat(segment3.compareTo(segment2), equalTo(-1)); segment1.close(); segment2.close(); segment3.close(); }
public void isNull() { standardIsEqualTo(null); }
@Test public void stringIsNullFail() { expectFailure.whenTesting().that("foo").isNull(); }
@Override public COSBase getCOSObject() { return this; }
@Test void testGetCOSObject() { assertEquals(testCOSBase, testCOSBase.getCOSObject()); }
public FunctionEvaluator evaluatorOf(String modelName, String ... names) { return requireModel(modelName).evaluatorOf(names); }
@Test public void testBindingValidation() { List<ExpressionFunction> functions = new ArrayList<>(); ExpressionFunction function = new ExpressionFunction("test", RankingExpression.from("sum(arg1 * arg2)")); function = function.withArgument("arg1", TensorType.fromSpec("tensor(d0[1])")); function = function.withArgument("arg2", TensorType.fromSpec("tensor(d1{})")); functions.add(function); Model model = new Model("test-model", functions); try { // No bindings FunctionEvaluator evaluator = model.evaluatorOf("test"); evaluator.evaluate(); } catch (IllegalStateException e) { assertEquals("Argument 'arg1' must be bound to a value of type tensor(d0[1])", Exceptions.toMessageString(e)); } try { // Just one binding FunctionEvaluator evaluator = model.evaluatorOf("test"); evaluator.bind("arg2", Tensor.from(TensorType.fromSpec("tensor(d1{})"), "{{d1:foo}:0.1}")); evaluator.evaluate(); } catch (IllegalStateException e) { assertEquals("Argument 'arg1' must be bound to a value of type tensor(d0[1])", Exceptions.toMessageString(e)); } try { // Just the other binding FunctionEvaluator evaluator = model.evaluatorOf("test"); evaluator.bind("arg1", Tensor.from(TensorType.fromSpec("tensor(d0[1])"), "{{d0:0}:0.1}")); evaluator.evaluate(); } catch (IllegalStateException e) { assertEquals("Argument 'arg2' must be bound to a value of type tensor(d1{})", Exceptions.toMessageString(e)); } try { // Wrong binding argument FunctionEvaluator evaluator = model.evaluatorOf("test"); evaluator.bind("argNone", Tensor.from(TensorType.fromSpec("tensor(d1{})"), "{{d1:foo}:0.1}")); evaluator.evaluate(); } catch (IllegalArgumentException e) { assertEquals("'argNone' is not a valid argument in function 'test'. Expected arguments: arg1: tensor(d0[1]), arg2: tensor(d1{})", Exceptions.toMessageString(e)); } try { // Wrong binding type FunctionEvaluator evaluator = model.evaluatorOf("test"); evaluator.bind("arg1", Tensor.from(TensorType.fromSpec("tensor(d3{})"), "{{d3:foo}:0.1}")); evaluator.evaluate(); } catch (IllegalArgumentException e) { assertEquals("'arg1' must be of type tensor(d0[1]), not tensor(d3{})", Exceptions.toMessageString(e)); } try { // Attempt to reuse evaluator FunctionEvaluator evaluator = model.evaluatorOf("test"); evaluator.bind("arg1", Tensor.from(TensorType.fromSpec("tensor(d0[1])"), "{{d0:0}:0.1}")); evaluator.bind("arg2", Tensor.from(TensorType.fromSpec("tensor(d1{})"), "{{d1:foo}:0.1}")); evaluator.evaluate(); evaluator.bind("arg1", Tensor.from(TensorType.fromSpec("tensor(d0[1])"), "{{d0:0}:0.1}")); } catch (IllegalStateException e) { assertEquals("Cannot bind a new value in a used evaluator", Exceptions.toMessageString(e)); } }
@Override public ContentHandler getNewContentHandler() { if (type == HANDLER_TYPE.BODY) { return new BodyContentHandler( new WriteOutContentHandler(new ToTextContentHandler(), writeLimit, throwOnWriteLimitReached, parseContext)); } else if (type == HANDLER_TYPE.IGNORE) { return new DefaultHandler(); } ContentHandler formatHandler = getFormatHandler(); if (writeLimit < 0) { return formatHandler; } return new WriteOutContentHandler(formatHandler, writeLimit, throwOnWriteLimitReached, parseContext); }
@Test public void testHTML() throws Exception { Parser p = new MockParser(OVER_DEFAULT); BasicContentHandlerFactory.HANDLER_TYPE type = BasicContentHandlerFactory.HANDLER_TYPE.HTML; ContentHandler handler = new BasicContentHandlerFactory(type, -1).getNewContentHandler(); assertTrue(handler instanceof ToHTMLContentHandler); p.parse(null, handler, null, null); String extracted = handler.toString(); assertContains("<head><title>This is the title", extracted); assertContains("aaaaaaaaaa", extracted); assertTrue(extracted.length() > 110000); //now test write limit p = new MockParser(10); handler = new BasicContentHandlerFactory(type, 5).getNewContentHandler(); assertTrue(handler instanceof WriteOutContentHandler); assertWriteLimitReached(p, (WriteOutContentHandler) handler); extracted = handler.toString(); assertContains("This ", extracted); assertNotContains("aaaa", extracted); //now test outputstream call p = new MockParser(OVER_DEFAULT); ByteArrayOutputStream os = new ByteArrayOutputStream(); handler = new BasicContentHandlerFactory(type, -1).getNewContentHandler(os, UTF_8); assertTrue(handler instanceof ToHTMLContentHandler); p.parse(null, handler, null, null); assertContains("This is the title", os.toByteArray()); assertContains("aaaaaaaaaa", os.toByteArray()); assertContains("<body", os.toByteArray()); assertContains("<html", os.toByteArray()); assertTrue(os.toByteArray().length > 110000); p = new MockParser(10); os = new ByteArrayOutputStream(); handler = new BasicContentHandlerFactory(type, 5).getNewContentHandler(os, UTF_8); assertTrue(handler instanceof WriteOutContentHandler); assertWriteLimitReached(p, (WriteOutContentHandler) handler); assertEquals(0, os.toByteArray().length); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test public void path_properties_are_values_from_EsFileSystem_argument() throws IOException { File foo = temp.newFolder(); EsInstallation mockedEsInstallation = mock(EsInstallation.class); File home = new File(foo, "home"); when(mockedEsInstallation.getHomeDirectory()).thenReturn(home); File conf = new File(foo, "conf"); when(mockedEsInstallation.getConfDirectory()).thenReturn(conf); File log = new File(foo, "log"); when(mockedEsInstallation.getLogDirectory()).thenReturn(log); File data = new File(foo, "data"); when(mockedEsInstallation.getDataDirectory()).thenReturn(data); EsSettings underTest = new EsSettings(minProps(true), mockedEsInstallation, system); Map<String, String> generated = underTest.build(); assertThat(generated) .containsEntry("path.data", data.getPath()) .containsEntry("path.logs", log.getPath()); assertThat(generated.get("path.conf")).isNull(); }
public synchronized <K> KeyQueryMetadata getKeyQueryMetadataForKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); if (topologyMetadata.hasNamedTopologies()) { throw new IllegalArgumentException("Cannot invoke the getKeyQueryMetadataForKey(storeName, key, keySerializer)" + "method when using named topologies, please use the overload that" + "accepts a topologyName parameter to identify the correct store"); } return getKeyQueryMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer)); }
@SuppressWarnings("unchecked") @Test public void shouldThrowIfStreamPartitionerIsNull() { assertThrows(NullPointerException.class, () -> metadataState.getKeyQueryMetadataForKey(null, "key", (StreamPartitioner) null)); }
@Override public GetApplicationReportResponse getApplicationReport( GetApplicationReportRequest request) throws YarnException { ApplicationId applicationId = request.getApplicationId(); if (applicationId == null) { throw new ApplicationNotFoundException("Invalid application id: null"); } UserGroupInformation callerUGI = getCallerUgi(applicationId, AuditConstants.GET_APP_REPORT); RMApp application = verifyUserAccessForRMApp(applicationId, callerUGI, AuditConstants.GET_APP_REPORT, ApplicationAccessType.VIEW_APP, false); boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application); ApplicationReport report = application.createAndGetApplicationReport(callerUGI.getUserName(), allowAccess); GetApplicationReportResponse response = recordFactory .newRecordInstance(GetApplicationReportResponse.class); response.setApplicationReport(report); return response; }
@Test public void testGetApplicationReport() throws Exception { ResourceScheduler scheduler = mock(ResourceScheduler.class); RMContext rmContext = mock(RMContext.class); mockRMContext(scheduler, rmContext); ApplicationId appId1 = getApplicationId(1); ApplicationACLsManager mockAclsManager = mock(ApplicationACLsManager.class); when( mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(), ApplicationAccessType.VIEW_APP, null, appId1)).thenReturn(true); ClientRMService rmService = new ClientRMService(rmContext, scheduler, null, mockAclsManager, null, null); try { GetApplicationReportRequest request = recordFactory .newRecordInstance(GetApplicationReportRequest.class); request.setApplicationId(appId1); GetApplicationReportResponse response = rmService.getApplicationReport(request); ApplicationReport report = response.getApplicationReport(); ApplicationResourceUsageReport usageReport = report.getApplicationResourceUsageReport(); Assert.assertEquals(10, usageReport.getMemorySeconds()); Assert.assertEquals(3, usageReport.getVcoreSeconds()); Assert.assertEquals("<Not set>", report.getAmNodeLabelExpression()); Assert.assertEquals("<Not set>", report.getAppNodeLabelExpression()); // if application has am node label set to blank ApplicationId appId2 = getApplicationId(2); when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(), ApplicationAccessType.VIEW_APP, null, appId2)).thenReturn(true); request.setApplicationId(appId2); response = rmService.getApplicationReport(request); report = response.getApplicationReport(); Assert.assertEquals(NodeLabel.DEFAULT_NODE_LABEL_PARTITION, report.getAmNodeLabelExpression()); Assert.assertEquals(NodeLabel.NODE_LABEL_EXPRESSION_NOT_SET, report.getAppNodeLabelExpression()); // if application has am node label set to blank ApplicationId appId3 = getApplicationId(3); when(mockAclsManager.checkAccess(UserGroupInformation.getCurrentUser(), ApplicationAccessType.VIEW_APP, null, appId3)).thenReturn(true); request.setApplicationId(appId3); response = rmService.getApplicationReport(request); report = response.getApplicationReport(); Assert.assertEquals("high-mem", report.getAmNodeLabelExpression()); Assert.assertEquals("high-mem", report.getAppNodeLabelExpression()); // if application id is null GetApplicationReportRequest invalidRequest = recordFactory .newRecordInstance(GetApplicationReportRequest.class); invalidRequest.setApplicationId(null); try { rmService.getApplicationReport(invalidRequest); } catch (YarnException e) { // rmService should return a ApplicationNotFoundException // when a null application id is provided Assert.assertTrue(e instanceof ApplicationNotFoundException); } } finally { rmService.close(); } }
public static <E extends Extension> IndexSpec primaryKeyIndexSpec(Class<E> type) { return new IndexSpec() .setName(PRIMARY_INDEX_NAME) .setOrder(IndexSpec.OrderType.ASC) .setUnique(true) .setIndexFunc(IndexAttributeFactory.simpleAttribute(type, e -> e.getMetadata().getName()) ); }
@Test void primaryKeyIndexSpec() { var spec = PrimaryKeySpecUtils.primaryKeyIndexSpec(FakeExtension.class); assertThat(spec.getName()).isEqualTo("metadata.name"); assertThat(spec.getOrder()).isEqualTo(IndexSpec.OrderType.ASC); assertThat(spec.isUnique()).isTrue(); assertThat(spec.getIndexFunc()).isNotNull(); assertThat(spec.getIndexFunc().getObjectType()).isEqualTo(FakeExtension.class); var extension = new FakeExtension(); extension.setMetadata(new Metadata()); extension.getMetadata().setName("fake-name-1"); assertThat(spec.getIndexFunc().getValues(extension)) .isEqualTo(Set.of("fake-name-1")); }
public static SinkConfig validateUpdate(SinkConfig existingConfig, SinkConfig newConfig) { SinkConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Sink Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getSourceSubscriptionName()) && !newConfig.getSourceSubscriptionName() .equals(existingConfig.getSourceSubscriptionName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().putIfAbsent(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getTopicToSerdeClassName() != null) { newConfig.getTopicToSerdeClassName().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getTopicToSchemaType() != null) { newConfig.getTopicToSchemaType().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { SinkConfig finalMergedConfig = mergedConfig; newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } finalMergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getTransformFunction() != null) { mergedConfig.setTransformFunction(newConfig.getTransformFunction()); } if (newConfig.getTransformFunctionClassName() != null) { mergedConfig.setTransformFunctionClassName(newConfig.getTransformFunctionClassName()); } if (newConfig.getTransformFunctionConfig() != null) { mergedConfig.setTransformFunctionConfig(newConfig.getTransformFunctionConfig()); } return mergedConfig; }
@Test public void testMergeRuntimeFlags() { SinkConfig sinkConfig = createSinkConfig(); SinkConfig newFunctionConfig = createUpdatedSinkConfig("runtimeFlags", "-Dfoo=bar2"); SinkConfig mergedConfig = SinkConfigUtils.validateUpdate(sinkConfig, newFunctionConfig); assertEquals( mergedConfig.getRuntimeFlags(), "-Dfoo=bar2" ); mergedConfig.setRuntimeFlags(sinkConfig.getRuntimeFlags()); assertEquals( new Gson().toJson(sinkConfig), new Gson().toJson(mergedConfig) ); }
public ASN1Sequence signedPipFromPplist(List<PolymorphicPseudonymType> response) { for (PolymorphicPseudonymType polymorphicPseudonymType : response) { ASN1Sequence sequence; try { sequence = (ASN1Sequence) ASN1Sequence.fromByteArray(polymorphicPseudonymType.getValue()); } catch (Exception e) { logger.error(String.format("PolymorphicPseudonymType not a valid ASN1 Sequence. Exception: '%s'", e.getMessage())); continue; } if (sequence.getObjectAt(0) instanceof ASN1ObjectIdentifier) { ASN1ObjectIdentifier objectIdentifier = (ASN1ObjectIdentifier) sequence.getObjectAt(0); if (objectIdentifier.getId().equals(SIGNED_PIP_OID)) { return sequence; } } } throw new IllegalArgumentException("No signed pip found in PolymorphicPseudonymType list"); }
@Test() public void signedPipFromPplistInvalidPpTest() throws IOException { List<PolymorphicPseudonymType> pplist = new ArrayList<>(); pplist.add(new PolymorphicPseudonymType() { { value = "Not an ASN1 sequence".getBytes(); } }); pplist.add(new PolymorphicPseudonymType() { { value = signedPip.getEncoded(); } }); ASN1Sequence result = bsnkUtil.signedPipFromPplist(pplist); assertEquals(Base64.getEncoder().encodeToString(result.getEncoded()), signedPipBase64); }
@Override public T next() { return items.get(Math.abs(index.getAndIncrement() % items.size())); }
@Test void testNext() { String item1 = "item1"; String item2 = "item2"; GenericPoller<String> poller = new GenericPoller<>(Arrays.asList(item1, item2)); assertEquals(item1, poller.next()); assertEquals(item2, poller.next()); assertEquals(item1, poller.next()); }
public void registerBot( String botPath, Function<Update, BotApiMethod<?>> updateHandler, Runnable setWebhook, Runnable deleteWebhook ) throws TelegramApiException { registerBot(DefaultTelegramWebhookBot .builder() .botPath(botPath) .updateHandler(updateHandler) .setWebhook(setWebhook) .deleteWebhook(deleteWebhook) .build()); }
@Test public void testRegisterBotWithSamePathOverridePreviousOne() throws IOException, TelegramApiException { TestTelegramWebhookBot secondBot = new TestTelegramWebhookBot("/test"); application.registerBot(telegramWebhookBot); application.registerBot(secondBot); Request request = new Request.Builder() .url("http://127.0.0.1:" + webhookOptions.getPort() + "/test") .headers(Headers.of(headers)) .post(RequestBody.create(objectMapper.writeValueAsString(update), MediaType.parse("application/json"))) .build(); httpClient.newCall(request).execute(); assertNull(telegramWebhookBot.updateReceived); assertNotNull(secondBot.updateReceived); assertEquals(update.getUpdateId(), secondBot.updateReceived.getUpdateId()); }
@Override @Nonnull public <T> Future<T> submit(@Nonnull Callable<T> task) { throwRejectedExecutionExceptionIfShutdown(); try { T result = task.call(); return new CompletedFuture<>(result, null); } catch (Exception e) { return new CompletedFuture<>(null, e); } }
@Test void testRejectedSubmitRunnable() { testRejectedExecutionException(testInstance -> testInstance.submit(() -> {})); }
public <I, O> List<O> execute(final ExecutionGroupContext<I> executionGroupContext, final ExecutorCallback<I, O> firstCallback, final ExecutorCallback<I, O> callback, final boolean serial) throws SQLException { if (executionGroupContext.getInputGroups().isEmpty()) { return Collections.emptyList(); } return serial ? serialExecute(executionGroupContext.getInputGroups().iterator(), executionGroupContext.getReportContext().getProcessId(), firstCallback, callback) : parallelExecute(executionGroupContext.getInputGroups().iterator(), executionGroupContext.getReportContext().getProcessId(), firstCallback, callback); }
@Test void assertSerialExecute() throws SQLException, InterruptedException { List<String> actual = executorEngine.execute(executionGroupContext, firstCallback, callback, true); latch.await(); assertThat(actual.size(), is(4)); }
@Override public String getXML() { StringBuilder retval = new StringBuilder( 128 ); retval.append( "<" + XML_TAG + ">" ); retval.append( XMLHandler.addTagValue( "name", getName(), false ) ); retval.append( XMLHandler.addTagValue( "type", getTypeDesc(), false ) ); retval.append( XMLHandler.addTagValue( "text", toString( false ), false ) ); retval.append( XMLHandler.addTagValue( "length", getLength(), false ) ); retval.append( XMLHandler.addTagValue( "precision", getPrecision(), false ) ); retval.append( XMLHandler.addTagValue( "isnull", isNull(), false ) ); retval.append( "</" + XML_TAG + ">" ); return retval.toString(); }
@Test public void testGetXML() { String result = null; Value vs1 = new Value( "Name", Value.VALUE_TYPE_STRING ); vs1.setValue( "test" ); vs1.setLength( 4 ); vs1.setPrecision( 2 ); result = vs1.getXML(); assertEquals( "<value><name>Name</name><type>String</type><text>test</text><length>4</length><precision>-1</precision><isnull>N</isnull></value>", result ); Value vs2 = new Value( "Name", Value.VALUE_TYPE_BOOLEAN ); vs2.setValue( false ); vs2.setLength( 4 ); vs2.setPrecision( 2 ); result = vs2.getXML(); assertEquals( "<value><name>Name</name><type>Boolean</type><text>false</text><length>-1</length><precision>-1</precision><isnull>N</isnull></value>", result ); Value vs3 = new Value( "Name", Value.VALUE_TYPE_INTEGER ); vs3.setValue( 10 ); vs3.setLength( 4 ); vs3.setPrecision( 2 ); result = vs3.getXML(); assertEquals( "<value><name>Name</name><type>Integer</type><text>10</text><length>4</length><precision>0</precision><isnull>N</isnull></value>", result ); Value vs4 = new Value( "Name", Value.VALUE_TYPE_NUMBER ); vs4.setValue( 10.0D ); vs4.setLength( 4 ); vs4.setPrecision( 2 ); result = vs4.getXML(); assertEquals( "<value><name>Name</name><type>Number</type><text>10.0</text><length>4</length><precision>2</precision><isnull>N</isnull></value>", result ); Value vs5 = new Value( "Name", Value.VALUE_TYPE_BIGNUMBER ); vs5.setValue( new BigDecimal( 10 ) ); vs5.setLength( 4 ); vs5.setPrecision( 2 ); result = vs5.getXML(); assertEquals( "<value><name>Name</name><type>BigNumber</type><text>10</text><length>4</length><precision>2</precision><isnull>N</isnull></value>", result ); SimpleDateFormat df = new SimpleDateFormat( "yyyy/MM/dd HH:mm:ss.SSS" ); Date dt = df.parse( "2006/03/01 17:01:02.005", new ParsePosition( 0 ) ); Value vs6 = new Value( "Name", Value.VALUE_TYPE_DATE ); vs6.setValue( dt ); vs6.setLength( 4 ); vs6.setPrecision( 2 ); result = vs6.getXML(); assertEquals( "<value><name>Name</name><type>Date</type><text>2006/03/01 17:01:02.005</text><length>-1</length><precision>2</precision><isnull>N</isnull></value>", result ); }
@Override public Enumeration<URL> getResources(String name) throws IOException { List<URL> resources = new ArrayList<>(); ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name); log.trace("Received request to load resources '{}'", name); for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) { switch (classLoadingSource) { case APPLICATION: if (getParent() != null) { resources.addAll(Collections.list(getParent().getResources(name))); } break; case PLUGIN: resources.addAll(Collections.list(findResources(name))); break; case DEPENDENCIES: resources.addAll(findResourcesFromDependencies(name)); break; } } return Collections.enumeration(resources); }
@Test void parentFirstGetResourcesExistsInParentAndDependencyAndPlugin() throws URISyntaxException, IOException { Enumeration<URL> resources = parentFirstPluginClassLoader.getResources("META-INF/file-in-both-parent-and-dependency-and-plugin"); assertNumberOfResourcesAndFirstLineOfFirstElement(3, "parent", resources); }
@Override public boolean isSupported() { String versionJson = version(); if (StringUtils.isBlank(versionJson)) { throw new RuntimeException("Cannot get the version!"); } String version = JSON.parseObject(versionJson).getString("version"); if (StringUtils.isBlank(version)) { return false; } for (String prefix : getSupportVersionPrefix()) { if (version.startsWith(prefix)) { return true; } } return false; }
@Test public void testIsSupported() { Assert.assertTrue(new TSDBConnection(TSDB_ADDRESS,null,null).isSupported()); }
private static Map<String, Set<Dependency>> checkOptionalFlags( Map<String, Set<Dependency>> bundledDependenciesByModule, Map<String, DependencyTree> dependenciesByModule) { final Map<String, Set<Dependency>> allViolations = new HashMap<>(); for (String module : bundledDependenciesByModule.keySet()) { LOG.debug("Checking module '{}'.", module); if (!dependenciesByModule.containsKey(module)) { throw new IllegalStateException( String.format( "Module %s listed by shade-plugin, but not dependency-plugin.", module)); } final Collection<Dependency> bundledDependencies = bundledDependenciesByModule.get(module); final DependencyTree dependencyTree = dependenciesByModule.get(module); final Set<Dependency> violations = checkOptionalFlags(module, bundledDependencies, dependencyTree); if (violations.isEmpty()) { LOG.info("OK: {}", module); } else { allViolations.put(module, violations); } } return allViolations; }
@Test void testTransitiveBundledDependencyMustBeOptional() { final Dependency dependencyA = createMandatoryDependency("a"); final Dependency dependencyB = createMandatoryDependency("b"); final Set<Dependency> bundled = Collections.singleton(dependencyB); final DependencyTree dependencyTree = new DependencyTree() .addDirectDependency(dependencyA) .addTransitiveDependencyTo(dependencyB, dependencyA); final Set<Dependency> violations = ShadeOptionalChecker.checkOptionalFlags(MODULE, bundled, dependencyTree); assertThat(violations).containsExactly(dependencyB); }
@Override public void commit(Collection<CommitRequest<FileSinkCommittable>> requests) throws IOException, InterruptedException { for (CommitRequest<FileSinkCommittable> request : requests) { FileSinkCommittable committable = request.getCommittable(); if (committable.hasPendingFile()) { // We should always use commitAfterRecovery which contains additional checks. bucketWriter.recoverPendingFile(committable.getPendingFile()).commitAfterRecovery(); } if (committable.hasInProgressFileToCleanup()) { bucketWriter.cleanupInProgressFileRecoverable( committable.getInProgressFileToCleanup()); } if (committable.hasCompactedFileToCleanup()) { Path committedFileToCleanup = committable.getCompactedFileToCleanup(); try { committedFileToCleanup.getFileSystem().delete(committedFileToCleanup, false); } catch (Exception e) { // Try best to cleanup compacting files, skip if failed. if (LOG.isDebugEnabled()) { LOG.debug( "Failed to cleanup a compacted file, the file will be remained and should not be visible: {}", committedFileToCleanup, e); } } } } }
@Test void testCommitPendingFile() throws Exception { StubBucketWriter stubBucketWriter = new StubBucketWriter(); FileCommitter fileCommitter = new FileCommitter(stubBucketWriter); MockCommitRequest<FileSinkCommittable> fileSinkCommittable = new MockCommitRequest<>( new FileSinkCommittable( "0", new FileSinkTestUtils.TestPendingFileRecoverable())); fileCommitter.commit(Collections.singletonList(fileSinkCommittable)); assertThat(stubBucketWriter.getRecoveredPendingFiles()).hasSize(1); assertThat(stubBucketWriter.getNumCleanUp()).isEqualTo(0); assertThat(stubBucketWriter.getRecoveredPendingFiles().get(0).isCommitted()).isTrue(); assertThat(fileSinkCommittable.getNumberOfRetries()).isEqualTo(0); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new) .addToQueueAndTryProcess(msg, ctx, this::processMsgAsync); }
@Test public void test_sqrt_5_meta() { var node = initNode(TbRuleNodeMathFunctionType.SQRT, new TbMathResult(TbMathArgumentType.MESSAGE_METADATA, "result", 3, false, false, null), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "a") ); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 5).toString()); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, timeout(TIMEOUT)).tellSuccess(msgCaptor.capture()); TbMsg resultMsg = msgCaptor.getValue(); assertNotNull(resultMsg); assertNotNull(resultMsg.getData()); var result = resultMsg.getMetaData().getValue("result"); assertNotNull(result); assertEquals("2.236", result); }
@Override protected int getNextBufferSize(E[] buffer) { long maxSize = maxQueueCapacity / 2; if (buffer.length > maxSize) { throw new IllegalStateException(); } final int newSize = 2 * (buffer.length - 1); return newSize + 1; }
@Test(dataProvider = "full") public void getNextBufferSize_invalid(MpscGrowableArrayQueue<Integer> queue) { var buffer = new Integer[FULL_SIZE + 1]; assertThrows(IllegalStateException.class, () -> queue.getNextBufferSize(buffer)); }
@Override public ContainerInfo getContainer(HttpServletRequest req, HttpServletResponse res, String appId, String appAttemptId, String containerId) { // FederationInterceptorREST#getContainer is logically // the same as FederationClientInterceptor#getContainerReport, // so use the same Metric. // Check that the appId/appAttemptId/containerId format is accurate try { RouterServerUtil.validateApplicationAttemptId(appAttemptId); RouterServerUtil.validateContainerId(containerId); } catch (IllegalArgumentException e) { routerMetrics.incrGetContainerReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINER, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); throw e; } try { long startTime = Time.now(); DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByAppId(appId); ContainerInfo containerInfo = interceptor.getContainer(req, res, appId, appAttemptId, containerId); if (containerInfo != null) { long stopTime = Time.now(); routerMetrics.succeededGetContainerReportRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_CONTAINER, TARGET_WEB_SERVICE); return containerInfo; } } catch (IllegalArgumentException e) { String msg = String.format( "Unable to get the AppAttempt appId: %s, appAttemptId: %s, containerId: %s.", appId, appAttemptId, containerId); routerMetrics.incrGetContainerReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINER, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException(msg, e); } catch (YarnException e) { routerMetrics.incrGetContainerReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINER, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException("getContainer Failed.", e); } routerMetrics.incrGetContainerReportFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_CONTAINER, UNKNOWN, TARGET_WEB_SERVICE, "getContainer Failed."); throw new RuntimeException("getContainer Failed."); }
@Test public void testGetContainer() throws Exception { // ApplicationId appId = ApplicationId.newInstance(Time.now(), 1); ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); ContainerId appContainerId = ContainerId.newContainerId(appAttemptId, 1); String applicationId = appId.toString(); String attemptId = appAttemptId.toString(); String containerId = appContainerId.toString(); // Submit application to multiSubCluster ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo(); context.setApplicationId(applicationId); Assert.assertNotNull(interceptor.submitApplication(context, null)); // Test Case1: Wrong ContainerId LambdaTestUtils.intercept(IllegalArgumentException.class, "Invalid ContainerId prefix: 0", () -> interceptor.getContainer(null, null, applicationId, attemptId, "0")); // Test Case2: Correct ContainerId ContainerInfo containerInfo = interceptor.getContainer(null, null, applicationId, attemptId, containerId); Assert.assertNotNull(containerInfo); }
@Override @Transactional(rollbackFor = Exception.class) public void deleteCombinationActivity(Long id) { // 校验存在 CombinationActivityDO activity = validateCombinationActivityExists(id); // 校验状态 if (CommonStatusEnum.isEnable(activity.getStatus())) { throw exception(COMBINATION_ACTIVITY_DELETE_FAIL_STATUS_NOT_CLOSED_OR_END); } // 删除 combinationActivityMapper.deleteById(id); }
@Test public void testDeleteCombinationActivity_success() { // mock 数据 CombinationActivityDO dbCombinationActivity = randomPojo(CombinationActivityDO.class); combinationActivityMapper.insert(dbCombinationActivity);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbCombinationActivity.getId(); // 调用 combinationActivityService.deleteCombinationActivity(id); // 校验数据不存在了 assertNull(combinationActivityMapper.selectById(id)); }
public static SerdeFeatures of(final SerdeFeature... features) { return new SerdeFeatures(ImmutableSet.copyOf(features)); }
@Test public void shouldHandleIncompatibleInFindAnyParam() { assertThat(SerdeFeatures.of(WRAP_SINGLES) .findAny(EnumSet.allOf(SerdeFeature.class)), is(not(Optional.empty()))); }
@Override public IcebergSourceSplit deserialize(int version, byte[] serialized) throws IOException { switch (version) { case 1: return IcebergSourceSplit.deserializeV1(serialized); case 2: return IcebergSourceSplit.deserializeV2(serialized, caseSensitive); case 3: return IcebergSourceSplit.deserializeV3(serialized, caseSensitive); default: throw new IOException( String.format( "Failed to deserialize IcebergSourceSplit. " + "Encountered unsupported version: %d. Supported version are [1]", version)); } }
@Test public void testDeserializeV1() throws Exception { final List<IcebergSourceSplit> splits = SplitHelpers.createSplitsFromTransientHadoopTable(temporaryFolder, 1, 1); for (IcebergSourceSplit split : splits) { byte[] result = split.serializeV1(); IcebergSourceSplit deserialized = serializer.deserialize(1, result); assertSplitEquals(split, deserialized); } }
@Override protected PulsarLogCollectClient getLogConsumeClient() { return LoggingPulsarPluginDataHandler.getPulsarLogCollectClient(); }
@Test public void testGetLogConsumeClient() { LogConsumeClient logConsumeClient = new PulsarLogCollector().getLogConsumeClient(); Assertions.assertEquals(PulsarLogCollectClient.class, logConsumeClient.getClass()); }
public Main() { }
@Test public void testMain() throws Exception { // lets make a simple route Main main = new Main(); main.configure().addRoutesBuilder(new MyRouteBuilder()); main.enableTrace(); main.bind("foo", 31); main.start(); CamelContext camelContext = main.getCamelContext(); assertNotNull(camelContext); assertEquals(31, camelContext.getRegistry().lookupByName("foo"), "Could not find the registry bound object"); MockEndpoint endpoint = camelContext.getEndpoint("mock:results", MockEndpoint.class); endpoint.expectedMinimumMessageCount(1); main.getCamelTemplate().sendBody("direct:start", "<message>1</message>"); endpoint.assertIsSatisfied(); main.stop(); }
public double calcOrientation(double lat1, double lon1, double lat2, double lon2) { return calcOrientation(lat1, lon1, lat2, lon2, true); }
@Test public void testOrientationFast() { assertEquals(90.0, Math.toDegrees(AC.calcOrientation(0, 0, 1, 0, false)), 0.01); assertEquals(45.0, Math.toDegrees(AC.calcOrientation(0, 0, 1, 1, false)), 0.01); assertEquals(0.0, Math.toDegrees(AC.calcOrientation(0, 0, 0, 1, false)), 0.01); assertEquals(-45.0, Math.toDegrees(AC.calcOrientation(0, 0, -1, 1, false)), 0.01); assertEquals(-135.0, Math.toDegrees(AC.calcOrientation(0, 0, -1, -1, false)), 0.01); // is symmetric? assertEquals(90 - 32.92, Math.toDegrees(AC.calcOrientation(49.942, 11.580, 49.944, 11.582, false)), 0.01); assertEquals(-90 - 32.92, Math.toDegrees(AC.calcOrientation(49.944, 11.582, 49.942, 11.580, false)), 0.01); }
public void isNoneOf( @Nullable Object first, @Nullable Object second, @Nullable Object @Nullable ... rest) { isNotIn(accumulate(first, second, rest)); }
@Test public void isNoneOf() { assertThat("x").isNoneOf("a", "b", "c"); }
@Override public void debug(String msg) { logger.debug(msg); }
@Test void testDebugWithFormat() { jobRunrDashboardLogger.debug("Debug with {}", "format"); verify(slfLogger).debug("Debug with {}", "format"); }
@Override @Transactional(rollbackFor = Exception.class) public void updateCodegen(CodegenUpdateReqVO updateReqVO) { // 校验是否已经存在 if (codegenTableMapper.selectById(updateReqVO.getTable().getId()) == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } // 校验主表字段存在 if (Objects.equals(updateReqVO.getTable().getTemplateType(), CodegenTemplateTypeEnum.SUB.getType())) { if (codegenTableMapper.selectById(updateReqVO.getTable().getMasterTableId()) == null) { throw exception(CODEGEN_MASTER_TABLE_NOT_EXISTS, updateReqVO.getTable().getMasterTableId()); } if (CollUtil.findOne(updateReqVO.getColumns(), // 关联主表的字段不存在 column -> column.getId().equals(updateReqVO.getTable().getSubJoinColumnId())) == null) { throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, updateReqVO.getTable().getSubJoinColumnId()); } } // 更新 table 表定义 CodegenTableDO updateTableObj = BeanUtils.toBean(updateReqVO.getTable(), CodegenTableDO.class); codegenTableMapper.updateById(updateTableObj); // 更新 column 字段定义 List<CodegenColumnDO> updateColumnObjs = BeanUtils.toBean(updateReqVO.getColumns(), CodegenColumnDO.class); updateColumnObjs.forEach(updateColumnObj -> codegenColumnMapper.updateById(updateColumnObj)); }
@Test public void testUpdateCodegen_notExists() { // 准备参数 CodegenUpdateReqVO updateReqVO = randomPojo(CodegenUpdateReqVO.class); // mock 方法 // 调用,并断言 assertServiceException(() -> codegenService.updateCodegen(updateReqVO), CODEGEN_TABLE_NOT_EXISTS); }
public static <T> PCollection<T> getSingletonMainInput( AppliedPTransform<? extends PCollection<? extends T>, ?, ?> application) { return getSingletonMainInput( application.getInputs(), application.getTransform().getAdditionalInputs().keySet()); }
@Test public void getMainInputSingleOutputSideInputs() { AppliedPTransform<PCollection<Long>, ?, ?> application = AppliedPTransform.of( "application", ImmutableMap.<TupleTag<?>, PCollection<?>>builder() .put(new TupleTag<Long>(), mainInput) .put(sideInput.getTagInternal(), sideInput.getPCollection()) .build(), Collections.singletonMap(new TupleTag<Long>(), output), ParDo.of(new TestDoFn()).withSideInputs(sideInput), ResourceHints.create(), pipeline); PCollection<Long> input = PTransformReplacements.getSingletonMainInput(application); assertThat(input, equalTo(mainInput)); }
public static Optional<String> getSystemProperty(String propertyName) { return Optional.ofNullable(getSystemProperty(propertyName, null)); }
@Test public void getSystemProperty_whenPropertyExists_returnsPropertyValue() { System.setProperty(TEST_PROPERTY, "Test value"); assertThat(TsunamiConfig.getSystemProperty(TEST_PROPERTY)).hasValue("Test value"); }
boolean addRecord(String partitionKey, @Nullable String explicitHashKey, byte[] record) { int recordSize = sizeIncrement(partitionKey, explicitHashKey, record); if (sizeBytes + recordSize > maxAggregatedBytes) { return false; } ByteString data = record != null ? ByteString.copyFrom(record) : ByteString.EMPTY; Record.Builder recordBuilder = Record.newBuilder().setData(data); recordBuilder.setPartitionKeyIndex( partitionKeys.computeIfAbsent( partitionKey, pk -> aggBuilder.addPartitionKeyTable(pk).getPartitionKeyTableCount() - 1)); if (explicitHashKey != null) { recordBuilder.setExplicitHashKeyIndex( explicitHashKeys.computeIfAbsent( explicitHashKey, ehk -> aggBuilder.addExplicitHashKeyTable(ehk).getExplicitHashKeyTableCount() - 1)); } aggBuilder.addRecords(recordBuilder.build()); sizeBytes += recordSize; return true; }
@Test public void testRejectRecordIfSizeExceeded() { aggregator = new RecordsAggregator(BASE_OVERHEAD + PARTITION_KEY_OVERHEAD + 100, new Instant()); // adding record fails due to encoding overhead assertThat(aggregator.addRecord(PARTITION_KEY, null, new byte[95])).isFalse(); // but can fit if size is reduced assertThat(aggregator.addRecord(PARTITION_KEY, null, new byte[94])).isTrue(); }
@Override public V load(K key) { awaitSuccessfulInit(); try (SqlResult queryResult = sqlService.execute(queries.load(), key)) { Iterator<SqlRow> it = queryResult.iterator(); V value = null; if (it.hasNext()) { SqlRow sqlRow = it.next(); if (it.hasNext()) { throw new IllegalStateException("multiple matching rows for a key " + key); } // If there is a single column as the value, return that column as the value if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) { value = sqlRow.getObject(1); } else { //noinspection unchecked value = (V) toGenericRecord(sqlRow, genericMapStoreProperties); } } return value; } }
@Test public void givenTableNameProperty_whenCreateMapLoader_thenUseTableNameWithCustomSchemaWithDotInName() { assumeTrue(objectProvider instanceof JdbcDatabaseProvider); var jdbcDatabaseProvider = (JdbcObjectProvider) objectProvider; // See MySQLSchemaJdbcSqlConnectorTest assumeFalse(MySQLDatabaseProvider.TEST_MYSQL_VERSION.startsWith("5")); String schemaName = "custom_schema2"; jdbcDatabaseProvider.createSchema(schemaName); String tableName = randomName() + ".with_dot"; String fullTableName = schemaName + "." + databaseProvider.quote(tableName); ObjectSpec spec = objectProvider.createObject(fullTableName, false); objectProvider.insertItems(spec, 1); Properties properties = new Properties(); properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF); properties.setProperty(EXTERNAL_NAME_PROPERTY, schemaName + ".\"" + tableName + "\""); mapLoader = createMapLoader(properties, hz); GenericRecord genericRecord = mapLoader.load(0); assertThat(genericRecord).isNotNull(); }
@SuppressWarnings("unchecked") void openDB(final Map<String, Object> configs, final File stateDir) { // initialize the default rocksdb options final DBOptions dbOptions = new DBOptions(); final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions); final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache(); cache = new LRUCache(BLOCK_CACHE_SIZE); tableConfig.setBlockCache(cache); tableConfig.setBlockSize(BLOCK_SIZE); filter = new BloomFilter(); tableConfig.setFilterPolicy(filter); userSpecifiedOptions.optimizeFiltersForHits(); userSpecifiedOptions.setTableFormatConfig(tableConfig); userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE); userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE); userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE); userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS); userSpecifiedOptions.setCreateIfMissing(true); userSpecifiedOptions.setErrorIfExists(false); userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL); // this is the recommended way to increase parallelism in RocksDb // note that the current implementation of setIncreaseParallelism affects the number // of compaction threads but not flush threads (the latter remains one). Also, // the parallelism value needs to be at least two because of the code in // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580 // subtracts one from the value passed to determine the number of compaction threads // (this could be a bug in the RocksDB code and their devs have been contacted). userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2)); wOptions = new WriteOptions(); wOptions.setDisableWAL(true); fOptions = new FlushOptions(); fOptions.setWaitForFlush(true); final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG); if (configSetterClass != null) { configSetter = Utils.newInstance(configSetterClass); configSetter.setConfig(name, userSpecifiedOptions, configs); } dbDir = new File(new File(stateDir, parentDir), name); try { Files.createDirectories(dbDir.getParentFile().toPath()); Files.createDirectories(dbDir.getAbsoluteFile().toPath()); } catch (final IOException fatal) { throw new ProcessorStateException(fatal); } // Setup statistics before the database is opened, otherwise the statistics are not updated // with the measurements from Rocks DB setupStatistics(configs, dbOptions); openRocksDB(dbOptions, columnFamilyOptions); dbAccessor = new DirectDBAccessor(db, fOptions, wOptions); open = true; addValueProvidersToMetricsRecorder(); }
@Test public void shouldThrowWhenUserProvidesNewBlockBasedTableFormatConfig() { rocksDBStore = getRocksDBStoreWithRocksDBMetricsRecorder(); context = getProcessorContext( RecordingLevel.DEBUG, RocksDBConfigSetterWithUserProvidedNewBlockBasedTableFormatConfig.class ); assertThrows( ProcessorStateException.class, () -> rocksDBStore.openDB(context.appConfigs(), context.stateDir()), "The used block-based table format configuration does not expose the " + "block cache. Use the BlockBasedTableConfig instance provided by Options#tableFormatConfig() to configure " + "the block-based table format of RocksDB. Do not provide a new instance of BlockBasedTableConfig to " + "the RocksDB options." ); }
@Override public CompletableFuture<SchemaAndMetadata> getSchema(String schemaId) { return this.service.getSchema(schemaId); }
@Test public void testGetLatestSchema() { String schemaId = "test-schema-id"; CompletableFuture<SchemaAndMetadata> getFuture = new CompletableFuture<>(); when(underlyingService.getSchema(eq(schemaId))).thenReturn(getFuture); assertSame(getFuture, service.getSchema(schemaId)); verify(underlyingService, times(1)).getSchema(eq(schemaId)); }
public static void insert( final UnsafeBuffer termBuffer, final int termOffset, final UnsafeBuffer packet, final int length) { if (0 == termBuffer.getInt(termOffset)) { termBuffer.putBytes(termOffset + HEADER_LENGTH, packet, HEADER_LENGTH, length - HEADER_LENGTH); termBuffer.putLong(termOffset + 24, packet.getLong(24)); termBuffer.putLong(termOffset + 16, packet.getLong(16)); termBuffer.putLong(termOffset + 8, packet.getLong(8)); termBuffer.putLongOrdered(termOffset, packet.getLong(0)); } }
@Test void shouldFillSingleGap() { final int frameLength = 50; final int alignedFrameLength = BitUtil.align(frameLength, FRAME_ALIGNMENT); final int srcOffset = 0; final int tail = alignedFrameLength; final int termOffset = tail; final UnsafeBuffer packet = new UnsafeBuffer(ByteBuffer.allocate(alignedFrameLength)); TermRebuilder.insert(termBuffer, termOffset, packet, alignedFrameLength); verify(termBuffer).putBytes( tail + HEADER_LENGTH, packet, srcOffset + HEADER_LENGTH, alignedFrameLength - HEADER_LENGTH); }
RequestQueue<ReadRequest> getReadRequestQueue(FileIOChannel.ID channelID) { return this.readers[channelID.getThreadNum()].requestQueue; }
@Test void testExceptionPropagationReader() throws Exception { // use atomic boolean as a boolean reference final AtomicBoolean handlerCalled = new AtomicBoolean(); final AtomicBoolean exceptionForwarded = new AtomicBoolean(); ReadRequest req = new ReadRequest() { @Override public void requestDone(IOException ioex) { if (ioex instanceof TestIOException) { exceptionForwarded.set(true); } synchronized (handlerCalled) { handlerCalled.set(true); handlerCalled.notifyAll(); } } @Override public void read() throws IOException { throw new TestIOException(); } }; // test the read queue RequestQueue<ReadRequest> rq = ioManager.getReadRequestQueue(ioManager.createChannel()); rq.add(req); // wait until the asynchronous request has been handled synchronized (handlerCalled) { while (!handlerCalled.get()) { handlerCalled.wait(); } } assertThat(exceptionForwarded).isTrue(); }
@Override public void observe(MultiLabel output) { if (output == MultiLabelFactory.UNKNOWN_MULTILABEL) { unknownCount++; } else { for (String label : output.getNameSet()) { if (label.contains(",")) { throw new IllegalStateException("MultiLabel cannot use a Label which contains ','. The supplied label was " + label + "."); } MutableLong value = labelCounts.computeIfAbsent(label, k -> new MutableLong()); labels.computeIfAbsent(label, MultiLabel::new); value.increment(); } totalCount++; } }
@Test public void test() { MutableMultiLabelInfo info = new MutableMultiLabelInfo(); info.observe(label("a")); assertEquals(1, info.size()); ImmutableOutputInfo<MultiLabel> imm = info.generateImmutableOutputInfo(); assertEquals(1, imm.size()); }
public static boolean isNumber(Class<?> clazz) { if ( clazz == null ) { return false; } else { return NUMBER_TYPES.contains( clazz ); } }
@Test public void testIsNumber() { assertFalse( NativeTypes.isNumber( null ) ); assertFalse( NativeTypes.isNumber( Object.class ) ); assertFalse( NativeTypes.isNumber( String.class ) ); assertTrue( NativeTypes.isNumber( double.class ) ); assertTrue( NativeTypes.isNumber( Double.class ) ); assertTrue( NativeTypes.isNumber( long.class ) ); assertTrue( NativeTypes.isNumber( Long.class ) ); assertTrue( NativeTypes.isNumber( BigDecimal.class ) ); assertTrue( NativeTypes.isNumber( BigInteger.class ) ); }
@Override public String getName() { return ANALYZER_NAME; }
@Test public void testGetName() { assertThat(analyzer.getName(), is("Ruby Bundler Analyzer")); }
public int format(String... args) throws UsageException { CommandLineOptions parameters = processArgs(args); if (parameters.version()) { errWriter.println(versionString()); return 0; } if (parameters.help()) { throw new UsageException(); } JavaFormatterOptions options = JavaFormatterOptions.builder() .style(parameters.aosp() ? Style.AOSP : Style.GOOGLE) .formatJavadoc(parameters.formatJavadoc()) .build(); if (parameters.stdin()) { return formatStdin(parameters, options); } else { return formatFiles(parameters, options); } }
@Test public void imports() throws Exception { String[] input = { "import java.util.LinkedList;", "import java.util.List;", "import java.util.ArrayList;", "class Test {", " /**", " * May be an {@link ArrayList}.", " */", " public static List<String> names;", "}", }; String[] expected = { "import java.util.ArrayList;", "import java.util.List;", "", "class Test {", " /**", " * May be an {@link ArrayList}.", " */", " public static List<String> names;", "}", }; InputStream in = new ByteArrayInputStream(joiner.join(input).getBytes(UTF_8)); StringWriter out = new StringWriter(); Main main = new Main( new PrintWriter(out, true), new PrintWriter(new BufferedWriter(new OutputStreamWriter(System.err, UTF_8)), true), in); assertThat(main.format("-", "--fix-imports-only")).isEqualTo(0); assertThat(out.toString()).isEqualTo(joiner.join(expected)); }