focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Object getValueFromResultSet( ResultSet rs, ValueMetaInterface val, int index ) throws KettleDatabaseException { Object data; try { if ( val.getType() == ValueMetaInterface.TYPE_BINARY ) { data = rs.getString( index + 1 ); } else { return super.getValueFromResultSet( rs, val, index ); } if ( rs.wasNull() ) { data = null; } } catch ( SQLException e ) { throw new KettleDatabaseException( "Unable to get value '" + val.toStringMeta() + "' from database resultset, index " + index, e ); } return data; }
@Test(expected = KettleDatabaseException.class) public void testGetValueFromResultSetWhenExceptionIsComing() throws SQLException, KettleDatabaseException { ResultSet rs = mock( ResultSet.class ); Mockito.when( rs.getString( 3 ) ).thenThrow( SQLException.class ); ValueMetaString ts = new ValueMetaString( "AzureDB" ); dbMeta.getValueFromResultSet( rs,ts,2 ); }
static Object parseCell(String cell, Schema.Field field) { Schema.FieldType fieldType = field.getType(); try { switch (fieldType.getTypeName()) { case STRING: return cell; case INT16: return Short.parseShort(cell); case INT32: return Integer.parseInt(cell); case INT64: return Long.parseLong(cell); case BOOLEAN: return Boolean.parseBoolean(cell); case BYTE: return Byte.parseByte(cell); case DECIMAL: return new BigDecimal(cell); case DOUBLE: return Double.parseDouble(cell); case FLOAT: return Float.parseFloat(cell); case DATETIME: return Instant.parse(cell); default: throw new UnsupportedOperationException( "Unsupported type: " + fieldType + ", consider using withCustomRecordParsing"); } } catch (IllegalArgumentException e) { throw new IllegalArgumentException( e.getMessage() + " field " + field.getName() + " was received -- type mismatch"); } }
@Test public void givenCellUnsupportedType_throws() { String counting = "[one,two,three]"; Schema schema = Schema.builder() .addField("an_array", Schema.FieldType.array(Schema.FieldType.STRING)) .addStringField("a_string") .build(); UnsupportedOperationException e = assertThrows( UnsupportedOperationException.class, () -> CsvIOParseHelpers.parseCell(counting, schema.getField("an_array"))); assertEquals( "Unsupported type: " + schema.getField("an_array").getType() + ", consider using withCustomRecordParsing", e.getMessage()); }
@Override public int read() throws IOException { Preconditions.checkState(!closed, "Cannot read: already closed"); singleByteBuffer.position(0); pos += 1; channel.read(singleByteBuffer); readBytes.increment(); readOperations.increment(); return singleByteBuffer.array()[0] & 0xFF; }
@Test public void testReadSingle() throws Exception { BlobId uri = BlobId.fromGsUtilUri("gs://bucket/path/to/read.dat"); int i0 = 1; int i1 = 255; byte[] data = {(byte) i0, (byte) i1}; writeGCSData(uri, data); try (SeekableInputStream in = new GCSInputStream(storage, uri, null, gcpProperties, MetricsContext.nullMetrics())) { assertThat(in.read()).isEqualTo(i0); assertThat(in.read()).isEqualTo(i1); } }
@Override protected MemberData memberData(Subscription subscription) { // Always deserialize ownedPartitions and generation id from user data // since StickyAssignor is an eager rebalance protocol that will revoke all existing partitions before joining group ByteBuffer userData = subscription.userData(); if (userData == null || !userData.hasRemaining()) { return new MemberData(Collections.emptyList(), Optional.empty(), subscription.rackId()); } return deserializeTopicPartitionAssignment(userData); }
@Test public void testMemberDataWillHonorUserData() { List<String> topics = topics(topic); List<TopicPartition> ownedPartitions = partitions(tp(topic1, 0), tp(topic2, 1)); int generationIdInUserData = generationId - 1; Subscription subscription = new Subscription(topics, generateUserData(topics, ownedPartitions, generationIdInUserData), Collections.emptyList(), generationId, Optional.empty()); AbstractStickyAssignor.MemberData memberData = memberData(subscription); // in StickyAssignor with eager rebalance protocol, we'll always honor data in user data assertEquals(ownedPartitions, memberData.partitions, "subscription: " + subscription + " doesn't have expected owned partition"); assertEquals(generationIdInUserData, memberData.generation.orElse(-1), "subscription: " + subscription + " doesn't have expected generation id"); }
public List<ColumnMatchResult<?>> getMismatchedColumns(List<Column> columns, ChecksumResult controlChecksum, ChecksumResult testChecksum) { return columns.stream() .flatMap(column -> columnValidators.get(column.getCategory()).get().validate(column, controlChecksum, testChecksum).stream()) .filter(columnMatchResult -> !columnMatchResult.isMatched()) .collect(toImmutableList()); }
@Test public void testRow() { List<Column> columns = ImmutableList.of(ROW_COLUMN); ChecksumResult controlChecksum = new ChecksumResult(ROW_COLUMN_CHECKSUMS.size(), ROW_COLUMN_CHECKSUMS); assertTrue(checksumValidator.getMismatchedColumns(columns, controlChecksum, controlChecksum).isEmpty()); // Mismatched different elements ChecksumResult testChecksum = new ChecksumResult( ROW_COLUMN_CHECKSUMS.size(), merge(ROW_COLUMN_CHECKSUMS, ImmutableMap.<String, Object>builder() .put("row.i$checksum", new SqlVarbinary(new byte[] {0x1a})) .put("row.r.b$checksum", new SqlVarbinary(new byte[] {0x1d})) .build())); Column aFieldColumn = Column.create("row.i", new DereferenceExpression(ROW_COLUMN.getExpression(), new Identifier("i")), INTEGER); Column rbFieldColumn = Column.create("row.r.b", new DereferenceExpression(new DereferenceExpression(ROW_COLUMN.getExpression(), new Identifier("r")), new Identifier("b")), BIGINT); assertMismatchedColumns(columns, controlChecksum, testChecksum, aFieldColumn, rbFieldColumn); }
@Override public PageResult<TenantDO> getTenantPage(TenantPageReqVO pageReqVO) { return tenantMapper.selectPage(pageReqVO); }
@Test public void testGetTenantPage() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> { // 等会查询到 o.setName("芋道源码"); o.setContactName("芋艿"); o.setContactMobile("15601691300"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setCreateTime(buildTime(2020, 12, 12)); }); tenantMapper.insert(dbTenant); // 测试 name 不匹配 tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setName(randomString()))); // 测试 contactName 不匹配 tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setContactName(randomString()))); // 测试 contactMobile 不匹配 tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setContactMobile(randomString()))); // 测试 status 不匹配 tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 createTime 不匹配 tenantMapper.insert(cloneIgnoreId(dbTenant, o -> o.setCreateTime(buildTime(2021, 12, 12)))); // 准备参数 TenantPageReqVO reqVO = new TenantPageReqVO(); reqVO.setName("芋道"); reqVO.setContactName("艿"); reqVO.setContactMobile("1560"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime(buildBetweenTime(2020, 12, 1, 2020, 12, 24)); // 调用 PageResult<TenantDO> pageResult = tenantService.getTenantPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbTenant, pageResult.getList().get(0)); }
@Override public String toString() { return channel.toString(); }
@Test void toStringTest() { Assertions.assertEquals(header.toString(), channel.toString()); }
@Override public void persistInstance(final InstanceEntity instance) { try { Instance inst = new Instance(); inst.setWeight(1.0d); inst.setEphemeral(true); inst.setIp(instance.getHost()); inst.setPort(instance.getPort()); inst.setInstanceId(buildInstanceNodeName(instance)); inst.setServiceName(instance.getAppName()); namingService.registerInstance(instance.getAppName(), groupName, inst); LOGGER.info("nacos client register success: {}", inst); } catch (NacosException e) { throw new ShenyuException(e); } }
@Test public void testPersistInstance() { InstanceEntity data = InstanceEntity.builder() .appName("shenyu-test") .host("shenyu-host") .port(9195) .build(); final String key = "shenyu-test-group"; repository.persistInstance(data); assertTrue(storage.containsKey(key)); final Instance instance = storage.get(key); assertEquals(data.getHost(), instance.getIp()); assertEquals(data.getPort(), instance.getPort()); assertEquals(data.getAppName(), instance.getServiceName()); repository.close(); }
public static void main(String[] args) { LOGGER.info("Start Game Application using Data-Locality pattern"); var gameEntity = new GameEntity(NUM_ENTITIES); gameEntity.start(); gameEntity.update(); }
@Test void shouldExecuteGameApplicationWithoutException() { assertDoesNotThrow(() -> Application.main(new String[]{})); }
public void replay( long recordOffset, long producerId, OffsetCommitKey key, OffsetCommitValue value ) { final String groupId = key.group(); final String topic = key.topic(); final int partition = key.partition(); if (value != null) { // The classic or consumer group should exist when offsets are committed or // replayed. However, it won't if the consumer commits offsets but does not // use the membership functionality. In this case, we automatically create // a so-called "simple consumer group". This is an empty classic group // without a protocol type. try { groupMetadataManager.group(groupId); } catch (GroupIdNotFoundException ex) { groupMetadataManager.getOrMaybeCreateClassicGroup(groupId, true); } if (producerId == RecordBatch.NO_PRODUCER_ID) { log.debug("Replaying offset commit with key {}, value {}", key, value); // If the offset is not part of a transaction, it is directly stored // in the offsets store. OffsetAndMetadata previousValue = offsets.put( groupId, topic, partition, OffsetAndMetadata.fromRecord(recordOffset, value) ); if (previousValue == null) { metrics.incrementNumOffsets(); } } else { log.debug("Replaying transactional offset commit with producer id {}, key {}, value {}", producerId, key, value); // Otherwise, the transaction offset is stored in the pending transactional // offsets store. Pending offsets there are moved to the main store when // the transaction is committed; or removed when the transaction is aborted. pendingTransactionalOffsets .computeIfAbsent(producerId, __ -> new Offsets()) .put( groupId, topic, partition, OffsetAndMetadata.fromRecord(recordOffset, value) ); openTransactionsByGroup .computeIfAbsent(groupId, __ -> new TimelineHashSet<>(snapshotRegistry, 1)) .add(producerId); } } else { if (offsets.remove(groupId, topic, partition) != null) { metrics.decrementNumOffsets(); } // Remove all the pending offset commits related to the tombstone. TimelineHashSet<Long> openTransactions = openTransactionsByGroup.get(groupId); if (openTransactions != null) { openTransactions.forEach(openProducerId -> { Offsets pendingOffsets = pendingTransactionalOffsets.get(openProducerId); if (pendingOffsets != null) { pendingOffsets.remove(groupId, topic, partition); } }); } } }
@Test public void testReplay() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); verifyReplay(context, "foo", "bar", 0, new OffsetAndMetadata( 0L, 100L, OptionalInt.empty(), "small", context.time.milliseconds(), OptionalLong.empty() )); verifyReplay(context, "foo", "bar", 0, new OffsetAndMetadata( 1L, 200L, OptionalInt.of(10), "small", context.time.milliseconds(), OptionalLong.empty() )); verifyReplay(context, "foo", "bar", 1, new OffsetAndMetadata( 2L, 200L, OptionalInt.of(10), "small", context.time.milliseconds(), OptionalLong.empty() )); verifyReplay(context, "foo", "bar", 1, new OffsetAndMetadata( 3L, 300L, OptionalInt.of(10), "small", context.time.milliseconds(), OptionalLong.of(12345L) )); }
@Override protected void route(List<SendingMailbox> destinations, TransferableBlock block) throws Exception { int destinationIdx = _rand.apply(destinations.size()); sendBlock(destinations.get(destinationIdx), block); }
@Test public void shouldRouteRandomly() throws Exception { // Given: ImmutableList<SendingMailbox> destinations = ImmutableList.of(_mailbox1, _mailbox2); // When: new RandomExchange(destinations, size -> 1, TransferableBlockUtils::splitBlock).route(destinations, _block); ArgumentCaptor<TransferableBlock> captor = ArgumentCaptor.forClass(TransferableBlock.class); // Then: Mockito.verify(_mailbox2, Mockito.times(1)).send(captor.capture()); Assert.assertEquals(captor.getValue(), _block); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatDescribeSource() { // Given: final Statement statement = parseSingle("DESCRIBE ORDERS;"); // When: final String result = SqlFormatter.formatSql(statement); // Then: assertThat(result, is("DESCRIBE ORDERS")); }
@Override public ResultSet getSuperTypes(final String catalog, final String schemaPattern, final String typeNamePattern) { return null; }
@Test void assertGetSuperTypes() { assertNull(metaData.getSuperTypes("", "", "")); }
public static String humanize(String s) { String[] strings = StringUtils.splitByCharacterTypeCamelCase(s); for (int i = 0; i < strings.length; i++) { String string = strings[i]; strings[i] = string.toLowerCase(); } return StringUtils.join(strings, " "); }
@Test public void shouldHumanize() throws Exception { assertThat(humanize("camelCase"), is("camel case")); assertThat(humanize("camel"), is("camel")); assertThat(humanize("camelCaseForALongString"), is("camel case for a long string")); }
@Override public ProcessContinuation run() { // Read any available data. for (Optional<SequencedMessage> next = subscriber.peek(); next.isPresent(); next = subscriber.peek()) { SequencedMessage message = next.get(); Offset messageOffset = Offset.of(message.getCursor().getOffset()); if (tracker.tryClaim(OffsetByteProgress.of(messageOffset, message.getSizeBytes()))) { subscriber.pop(); lastClaimedOffset = Optional.of(messageOffset); receiver.outputWithTimestamp( message, new Instant(Timestamps.toMillis(message.getPublishTime()))); } else { // Our claim failed, return stop() return ProcessContinuation.stop(); } } // There is no more data available, yield to the runtime. return ProcessContinuation.resume(); }
@Test public void create() { SubscriptionPartitionProcessor processor = newProcessor(); assertEquals(ProcessContinuation.resume(), processor.run()); InOrder order = inOrder(subscriber); order.verify(subscriber).fetchOffset(); order.verify(subscriber).rebuffer(); }
public long getChecksum() { return this.checksum; }
@Test public void testChecksum() { byte[] bytes1 = new byte[5]; byte[] bytes2 = new byte[10]; initBytes(bytes1); initBytes(bytes2); ByteBuffer buffer1 = ByteBuffer.wrap(bytes1); ByteBuffer buffer2 = ByteBuffer.wrap(bytes2); buffer2.limit(bytes1.length); long checksum1 = BufferData.getChecksum(buffer1); long checksum2 = BufferData.getChecksum(buffer2); assertEquals(checksum1, checksum2); }
public IcebergRecordObjectInspector( Types.StructType structType, List<ObjectInspector> objectInspectors) { Preconditions.checkArgument(structType.fields().size() == objectInspectors.size()); this.structFields = Lists.newArrayListWithExpectedSize(structType.fields().size()); int position = 0; for (Types.NestedField field : structType.fields()) { ObjectInspector oi = objectInspectors.get(position); Types.NestedField fieldInLowercase = Types.NestedField.of( field.fieldId(), field.isOptional(), field.name().toLowerCase(Locale.ROOT), field.type(), field.doc()); IcebergRecordStructField structField = new IcebergRecordStructField(fieldInLowercase, oi, position); structFields.add(structField); position++; } }
@Test public void testIcebergRecordObjectInspector() { Schema schema = new Schema( required(1, "integer_field", Types.IntegerType.get()), required( 2, "struct_field", Types.StructType.of( Types.NestedField.required(3, "string_field", Types.StringType.get())))); Record record = RandomGenericData.generate(schema, 1, 0L).get(0); Record innerRecord = record.get(1, Record.class); StructObjectInspector soi = (StructObjectInspector) IcebergObjectInspector.create(schema); assertThat(soi.getStructFieldsDataAsList(record)) .isEqualTo(ImmutableList.of(record.get(0), record.get(1))); StructField integerField = soi.getStructFieldRef("integer_field"); assertThat(soi.getStructFieldData(record, integerField)).isEqualTo(record.get(0)); StructField structField = soi.getStructFieldRef("struct_field"); Object innerData = soi.getStructFieldData(record, structField); assertThat(innerData).isEqualTo(innerRecord); StructObjectInspector innerSoi = (StructObjectInspector) structField.getFieldObjectInspector(); StructField stringField = innerSoi.getStructFieldRef("string_field"); assertThat(innerSoi.getStructFieldsDataAsList(innerRecord)) .isEqualTo(ImmutableList.of(innerRecord.get(0))); assertThat(innerSoi.getStructFieldData(innerData, stringField)).isEqualTo(innerRecord.get(0)); }
@Override public Mono<UserDetails> findByUsername(String username) { return userService.getUser(username) .onErrorMap(UserNotFoundException.class, e -> new BadCredentialsException("Invalid Credentials")) .flatMap(user -> { var name = user.getMetadata().getName(); var userBuilder = User.withUsername(name) .password(user.getSpec().getPassword()) .disabled(requireNonNullElse(user.getSpec().getDisabled(), false)); var setAuthorities = roleService.getRolesByUsername(name) // every authenticated user should have authenticated and anonymous roles. .concatWithValues(AUTHENTICATED_ROLE_NAME, ANONYMOUS_ROLE_NAME) .map(roleName -> new SimpleGrantedAuthority(ROLE_PREFIX + roleName)) .distinct() .collectList() .doOnNext(userBuilder::authorities); return setAuthorities.then(Mono.fromSupplier(() -> { var twoFactorAuthSettings = TwoFactorUtils.getTwoFactorAuthSettings(user); return new HaloUser.Builder(userBuilder.build()) .twoFactorAuthEnabled( (!twoFactorAuthDisabled) && twoFactorAuthSettings.isAvailable() ) .totpEncryptedSecret(user.getSpec().getTotpEncryptedSecret()) .build(); })); }); }
@Test void shouldFindHaloUserDetailsWith2faEnabledWhen2faEnabledAndTotpConfigured() { var fakeUser = createFakeUser(); fakeUser.getSpec().setTwoFactorAuthEnabled(true); fakeUser.getSpec().setTotpEncryptedSecret("fake-totp-encrypted-secret"); when(userService.getUser("faker")).thenReturn(Mono.just(fakeUser)); when(roleService.getRolesByUsername("faker")).thenReturn(Flux.empty()); userDetailService.findByUsername("faker") .as(StepVerifier::create) .assertNext(userDetails -> { assertInstanceOf(HaloUserDetails.class, userDetails); assertTrue(((HaloUserDetails) userDetails).isTwoFactorAuthEnabled()); }) .verifyComplete(); }
public static <F extends Future<Void>> Mono<Void> deferFuture(Supplier<F> deferredFuture) { return new DeferredFutureMono<>(deferredFuture); }
@Test void raceTestDeferredFutureMono() { for (int i = 0; i < 1000; i++) { final TestSubscriber subscriber = new TestSubscriber(); final ImmediateEventExecutor eventExecutor = ImmediateEventExecutor.INSTANCE; final Promise<Void> promise = eventExecutor.newPromise(); final Supplier<Promise<Void>> promiseSupplier = () -> promise; RaceTestUtils.race(() -> FutureMono.deferFuture(promiseSupplier) .subscribe(subscriber), subscriber::cancel); assertThat(resolveListeners(promise)).isNullOrEmpty(); assertThat(subscriber.operations).first() .isEqualTo(TestSubscriber.Operation.ON_SUBSCRIBE); } }
static void activateHttpAndHttpsProxies(Settings settings, SettingsDecrypter decrypter) throws MojoExecutionException { List<Proxy> proxies = new ArrayList<>(2); for (String protocol : ImmutableList.of("http", "https")) { if (areProxyPropertiesSet(protocol)) { continue; } settings.getProxies().stream() .filter(Proxy::isActive) .filter(proxy -> protocol.equals(proxy.getProtocol())) .findFirst() .ifPresent(proxies::add); } if (proxies.isEmpty()) { return; } SettingsDecryptionRequest request = new DefaultSettingsDecryptionRequest().setProxies(proxies); SettingsDecryptionResult result = decrypter.decrypt(request); for (SettingsProblem problem : result.getProblems()) { if (problem.getSeverity() == SettingsProblem.Severity.ERROR || problem.getSeverity() == SettingsProblem.Severity.FATAL) { throw new MojoExecutionException( "Unable to decrypt proxy info from settings.xml: " + problem); } } result.getProxies().forEach(MavenSettingsProxyProvider::setProxyProperties); }
@Test public void testActivateHttpAndHttpsProxies_dontOverwriteUserHttp() throws MojoExecutionException { System.setProperty("http.proxyHost", "host"); MavenSettingsProxyProvider.activateHttpAndHttpsProxies( mixedProxyEncryptedSettings, settingsDecrypter); Assert.assertNull(System.getProperty("http.proxyPassword")); Assert.assertEquals("password2", System.getProperty("https.proxyPassword")); }
@Override public Object get(PropertyKey key) { return get(key, ConfigurationValueOptions.defaults()); }
@Test public void sitePropertiesNotLoadedInTest() throws Exception { Properties props = new Properties(); props.setProperty(PropertyKey.LOGGER_TYPE.toString(), "TEST_LOGGER"); File propsFile = mFolder.newFile(Constants.SITE_PROPERTIES); props.store(new FileOutputStream(propsFile), "ignored header"); // Avoid interference from system properties. Reset SITE_CONF_DIR to include the temp // site-properties file HashMap<String, String> sysProps = new HashMap<>(); sysProps.put(PropertyKey.LOGGER_TYPE.toString(), null); sysProps.put(PropertyKey.SITE_CONF_DIR.toString(), mFolder.getRoot().getCanonicalPath()); try (Closeable p = new SystemPropertyRule(sysProps).toResource()) { mConfiguration = Configuration.copyGlobal(); assertEquals(PropertyKey.LOGGER_TYPE.getDefaultValue(), mConfiguration.get(PropertyKey.LOGGER_TYPE)); } }
public static Read read() { return Read.create(); }
@Test public void testReadWithoutValidate() { final String table = "fooTable"; BigtableIO.Read read = BigtableIO.read() .withBigtableOptions(BIGTABLE_OPTIONS) .withTableId(table) .withoutValidation(); // validate() will throw if withoutValidation() isn't working read.validate(TestPipeline.testingPipelineOptions()); }
public static boolean validatePlugin(PluginLookup.PluginType type, Class<?> pluginClass) { switch (type) { case INPUT: return containsAllMethods(inputMethods, pluginClass.getMethods()); case FILTER: return containsAllMethods(filterMethods, pluginClass.getMethods()); case CODEC: return containsAllMethods(codecMethods, pluginClass.getMethods()); case OUTPUT: return containsAllMethods(outputMethods, pluginClass.getMethods()); default: throw new IllegalStateException("Unknown plugin type for validation: " + type); } }
@Ignore("Test failing on windows for many weeks. See https://github.com/elastic/logstash/issues/10926") @Test public void testInvalidInputPlugin() throws IOException { Path tempJar = null; try { tempJar = Files.createTempFile("pluginValidationTest", "inputPlugin.jar"); final InputStream resourceJar = getClass().getResourceAsStream("logstash-input-java_input_example-1.0.3.jar"); Files.copy(resourceJar, tempJar, REPLACE_EXISTING); URL[] jarUrl = {tempJar.toUri().toURL()}; URLClassLoader cl = URLClassLoader.newInstance(jarUrl); Class<?> oldInputClass = cl.loadClass("org.logstash.javaapi.JavaInputExample"); Assert.assertNotNull(oldInputClass); Assert.assertFalse(PluginValidator.validatePlugin(PluginLookup.PluginType.INPUT, oldInputClass)); } catch (Exception ex) { Assert.fail("Failed with exception: " + ex); } finally { if (tempJar != null) { Files.deleteIfExists(tempJar); } } }
public void validatePositionsIfNeeded() { Map<TopicPartition, SubscriptionState.FetchPosition> partitionsToValidate = offsetFetcherUtils.getPartitionsToValidate(); validatePositionsAsync(partitionsToValidate); }
@Test public void testOffsetValidationSkippedForOldResponse() { // Old responses may provide unreliable leader epoch, // so we should skip offset validation and not send the request. buildFetcher(); assignFromUser(singleton(tp0)); Map<String, Integer> partitionCounts = new HashMap<>(); partitionCounts.put(tp0.topic(), 4); final int epochOne = 1; metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWithIds("dummy", 1, Collections.emptyMap(), partitionCounts, tp -> epochOne, topicIds), false, 0L); Node node = metadata.fetch().nodes().get(0); assertFalse(client.isConnected(node.idString())); // Seek with a position and leader+epoch Metadata.LeaderAndEpoch leaderAndEpoch = new Metadata.LeaderAndEpoch( metadata.currentLeader(tp0).leader, Optional.of(epochOne)); subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(20L, Optional.of(epochOne), leaderAndEpoch)); assertFalse(client.isConnected(node.idString())); assertTrue(subscriptions.awaitingValidation(tp0)); // Inject an older version of the metadata response final short responseVersion = 8; metadata.updateWithCurrentRequestVersion(RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, tp -> null, MetadataResponse.PartitionMetadata::new, responseVersion, topicIds), false, 0L); offsetFetcher.validatePositionsIfNeeded(); // Offset validation is skipped assertFalse(subscriptions.awaitingValidation(tp0)); }
@ScalarOperator(GREATER_THAN) @SqlType(StandardTypes.BOOLEAN) public static boolean greaterThan(@SqlType("unknown") boolean left, @SqlType("unknown") boolean right) { throw new AssertionError("value of unknown type should all be NULL"); }
@Test public void testGreaterThan() { assertFunction("NULL > NULL", BOOLEAN, null); }
public static String fromJavaPropertyToEnvVariable(String property) { return property.toUpperCase(Locale.ENGLISH).replace('.', '_').replace('-', '_'); }
@Test public void fromJavaPropertyToEnvVariable() { String output = SettingFormatter.fromJavaPropertyToEnvVariable("some.randomProperty-123.test"); assertThat(output).isEqualTo("SOME_RANDOMPROPERTY_123_TEST"); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final BoxApiClient client = new BoxApiClient(session.getClient()); final HttpGet request = new HttpGet(String.format("%s/files/%s/content", client.getBasePath(), fileid.getFileId(file))); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(-1 == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); } final CloseableHttpResponse response = session.getClient().execute(request); return new HttpMethodReleaseInputStream(response, status); } catch(IOException e) { throw new HttpExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRange() throws Exception { final BoxFileidProvider fileid = new BoxFileidProvider(session); final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final byte[] content = RandomUtils.nextBytes(1432); final OutputStream out = new BoxWriteFeature(session, fileid).write(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setAppend(true); status.setOffset(100L); final InputStream in = new BoxReadFeature(session, fileid).read(test, status.withLength(content.length - 100), new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100); new StreamCopier(status, status).transfer(in, buffer); final byte[] reference = new byte[content.length - 100]; System.arraycopy(content, 100, reference, 0, content.length - 100); assertArrayEquals(reference, buffer.toByteArray()); in.close(); new BoxDeleteFeature(session, fileid).delete(Collections.<Path>singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); session.close(); }
@Override public <I, K, V> Map<K, V> mapToPair(List<I> data, SerializablePairFunction<I, K, V> func, Integer parallelism) { return data.stream().parallel().map(throwingMapToPairWrapper(func)).collect(Collectors.toMap(Pair::getLeft, Pair::getRight)); }
@Test public void testMapToPair() { List<String> mapList = Arrays.asList("spark_hudi", "flink_hudi"); Map<String, String> resultMap = context.mapToPair(mapList, x -> { String[] splits = x.split("_"); return new ImmutablePair<>(splits[0], splits[1]); }, 2); Assertions.assertEquals(resultMap.get("spark"), resultMap.get("flink")); }
@Override public Long time(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG); return syncFuture(f); }
@Test public void testTime() { RedisClusterNode master = getFirstMaster(); Long time = connection.time(master); assertThat(time).isGreaterThan(1000); }
@Override public boolean equals(Object object) { if (this == object) { return true; } if (object == null || getClass() != object.getClass()) { return false; } CollectionResult<?, ?> that = (CollectionResult<?, ?>) object; return Objects.equals(_elements, that._elements) && Objects.equals(_metadata, that._metadata) && Objects.equals(_total, that._total) && _pageIncrement == that._pageIncrement; }
@Test(dataProvider = "testEqualsDataProvider") public void testEquals ( boolean shouldEquals, @Nonnull CollectionResult<TestRecordTemplateClass.Foo, TestRecordTemplateClass.Bar> collectionResult, @Nullable Object compareObject ) { assertEquals(collectionResult.equals(compareObject), shouldEquals); }
@ScalarOperator(CAST) @SqlType(StandardTypes.SMALLINT) public static long castToSmallint(@SqlType(StandardTypes.DOUBLE) double value) { try { return Shorts.checkedCast(DoubleMath.roundToInt(value, HALF_UP)); } catch (ArithmeticException | IllegalArgumentException e) { throw new PrestoException(INVALID_CAST_ARGUMENT, format("Unable to cast %s to smallint", value), e); } }
@Test public void testCastToSmallInt() { assertFunction("cast(" + (0x1.0p15 - 0.6) + " as smallint)", SMALLINT, Short.MAX_VALUE); assertInvalidFunction("cast(DOUBLE '" + 0x1.0p15 + "' as smallint)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(9.2E9 as smallint)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(-9.2E9 as smallint)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(infinity() as smallint)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(-infinity() as smallint)", INVALID_CAST_ARGUMENT); assertInvalidFunction("cast(nan() as smallint)", INVALID_CAST_ARGUMENT); }
@Override public SingleRuleConfiguration buildToBeCreatedRuleConfiguration(final LoadSingleTableStatement sqlStatement) { SingleRuleConfiguration result = new SingleRuleConfiguration(); if (null != rule) { result.getTables().addAll(rule.getConfiguration().getTables()); } result.getTables().addAll(getRequiredTables(sqlStatement)); return result; }
@Test void assertBuild() { LoadSingleTableStatement sqlStatement = new LoadSingleTableStatement(Collections.singletonList(new SingleTableSegment("ds_0", null, "foo"))); SingleRule rule = mock(SingleRule.class); when(rule.getConfiguration()).thenReturn(new SingleRuleConfiguration()); executor.setRule(rule); SingleRuleConfiguration actual = executor.buildToBeCreatedRuleConfiguration(sqlStatement); assertThat(actual.getTables().iterator().next(), is("ds_0.foo")); }
@Override public boolean match(Message msg, StreamRule rule) { Double msgVal = getDouble(msg.getField(rule.getField())); if (msgVal == null) { return false; } Double ruleVal = getDouble(rule.getValue()); if (ruleVal == null) { return false; } return rule.getInverted() ^ (msgVal > ruleVal); }
@Test public void testMissedInvertedMatchMissingField() { StreamRule rule = getSampleRule(); rule.setValue("42"); rule.setInverted(true); Message msg = getSampleMessage(); msg.addField("someother", "30"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
static String buildUserPreferenceConfigMapName(String username) { return "user-preferences-" + username; }
@Test void buildUserPreferenceConfigMapName() { var preferenceConfigMapName = UserNotificationPreferenceServiceImpl .buildUserPreferenceConfigMapName("guqing"); assertEquals("user-preferences-guqing", preferenceConfigMapName); }
Object getEventuallyWeightedResult(Object rawObject, MULTIPLE_MODEL_METHOD multipleModelMethod, double weight) { switch (multipleModelMethod) { case MAJORITY_VOTE: case MODEL_CHAIN: case SELECT_ALL: case SELECT_FIRST: return rawObject; case MAX: case SUM: case MEDIAN: case AVERAGE: case WEIGHTED_SUM: case WEIGHTED_MEDIAN: case WEIGHTED_AVERAGE: if (!(rawObject instanceof Number)) { throw new KiePMMLException("Expected a number, retrieved " + rawObject.getClass().getName()); } return new KiePMMLValueWeight(((Number) rawObject).doubleValue(), weight); case WEIGHTED_MAJORITY_VOTE: throw new KiePMMLException(multipleModelMethod + " not implemented, yet"); default: throw new KiePMMLException("Unrecognized MULTIPLE_MODEL_METHOD " + multipleModelMethod); } }
@Test void getEventuallyWeightedResultNotImplemented() { NOT_IMPLEMENTED_METHODS.forEach(multipleModelMethod -> { try { evaluator.getEventuallyWeightedResult("OBJ", multipleModelMethod, 34.2); fail(multipleModelMethod + " is supposed to throw exception because not implemented"); } catch (KiePMMLException e) { // expected } }); }
@Udf(schema = "ARRAY<STRUCT<K STRING, V DOUBLE>>") public List<Struct> entriesDouble( @UdfParameter(description = "The map to create entries from") final Map<String, Double> map, @UdfParameter(description = "If true then the resulting entries are sorted by key") final boolean sorted ) { return entries(map, DOUBLE_STRUCT_SCHEMA, sorted); }
@Test public void shouldComputeDoubleEntries() { final Map<String, Double> map = createMap(Double::valueOf); shouldComputeEntries(map, () -> entriesUdf.entriesDouble(map, false)); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { if(file.isPlaceholder()) { final DescriptiveUrl link = new DriveUrlProvider().toUrl(file).find(DescriptiveUrl.Type.http); if(DescriptiveUrl.EMPTY.equals(link)) { log.warn(String.format("Missing web link for file %s", file)); return new NullInputStream(file.attributes().getSize()); } // Write web link file return IOUtils.toInputStream(UrlFileWriterFactory.get().write(link), Charset.defaultCharset()); } else { final HttpHeaders headers = new HttpHeaders(); headers.setContentType(MEDIA_TYPE); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } headers.setRange(header); // Disable compression headers.setAcceptEncoding("identity"); } if(file.attributes().isDuplicate()) { // Read previous version try { final Drive.Revisions.Get request = session.getClient().revisions().get(fileid.getFileId(file), file.attributes().getVersionId()); request.setRequestHeaders(headers); return request.executeMediaAsInputStream(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file); } } else { try { try { final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file)); request.setRequestHeaders(headers); request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")); return request.executeMediaAsInputStream(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", e, file); } } catch(RetriableAccessDeniedException e) { throw e; } catch(AccessDeniedException e) { if(!PreferencesFactory.get().getBoolean(String.format("connection.unsecure.download.%s", session.getHost().getHostname()))) { // Not previously dismissed callback.warn(session.getHost(), MessageFormat.format(LocaleFactory.localizedString("Download {0} failed", "Error"), file.getName()), "Acknowledge the risk of downloading known malware or other abusive file.", LocaleFactory.localizedString("Continue", "Credentials"), LocaleFactory.localizedString("Cancel", "Localizable"), String.format("connection.unsecure.download.%s", session.getHost().getHostname())); } try { final Drive.Files.Get request = session.getClient().files().get(fileid.getFileId(file)); request.setAcknowledgeAbuse(true); request.setRequestHeaders(headers); request.setSupportsTeamDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")); return request.executeMediaAsInputStream(); } catch(IOException f) { throw new DriveExceptionMappingService(fileid).map("Download {0} failed", f, file); } } } } }
@Test public void testReadWhitespace() throws Exception { final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final Path file = new DriveTouchFeature(session, fileid).touch(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, String.format("t %s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(0, new DriveAttributesFinderFeature(session, fileid).find(file).getSize()); final CountingInputStream in = new CountingInputStream(new DriveReadFeature(session, fileid).read(file, new TransferStatus(), new DisabledConnectionCallback())); in.close(); assertEquals(0L, in.getByteCount(), 0L); new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public String toJson(final Object object) { return GSON.toJson(object); }
@Test public void testToJson() { TestObject testObject = generateTestObject(); JsonElement expectedJson = JsonParser.parseString(EXPECTED_JSON); JsonElement objectJson = JsonParser.parseString(GsonUtils.getInstance().toJson(testObject)); assertEquals(expectedJson, objectJson); }
@Override public Optional<DispatchEvent> build(final DataChangedEvent event) { if (Strings.isNullOrEmpty(event.getValue())) { return Optional.empty(); } Optional<QualifiedDataSource> qualifiedDataSource = QualifiedDataSourceNode.extractQualifiedDataSource(event.getKey()); if (qualifiedDataSource.isPresent()) { QualifiedDataSourceState state = new YamlQualifiedDataSourceStateSwapper().swapToObject(YamlEngine.unmarshal(event.getValue(), YamlQualifiedDataSourceState.class)); return Optional.of(new QualifiedDataSourceStateEvent(qualifiedDataSource.get(), state)); } return Optional.empty(); }
@Test void assertCreateDisabledQualifiedDataSourceChangedEvent() { Optional<DispatchEvent> actual = new QualifiedDataSourceDispatchEventBuilder().build( new DataChangedEvent("/nodes/qualified_data_sources/replica_query_db.readwrite_ds.replica_ds_0", "state: DISABLED\n", Type.DELETED)); assertTrue(actual.isPresent()); QualifiedDataSourceStateEvent actualEvent = (QualifiedDataSourceStateEvent) actual.get(); assertThat(actualEvent.getQualifiedDataSource().getDatabaseName(), is("replica_query_db")); assertThat(actualEvent.getQualifiedDataSource().getGroupName(), is("readwrite_ds")); assertThat(actualEvent.getQualifiedDataSource().getDataSourceName(), is("replica_ds_0")); assertThat(actualEvent.getStatus().getState(), is(DataSourceState.DISABLED)); }
@Override public Collection<RedisServer> masters() { List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS); return toRedisServersList(masters); }
@Test public void testMasters() { Collection<RedisServer> masters = connection.masters(); assertThat(masters).hasSize(1); }
private Map<String, Object> augmentAndFilterConnectorConfig(String connectorConfigs) throws IOException { return augmentAndFilterConnectorConfig(connectorConfigs, instanceConfig, secretsProvider, componentClassLoader, componentType); }
@Test(dataProvider = "component") public void testInterpolatingEnvironmentVariables(FunctionDetails.ComponentType componentType) throws Exception { final Map<String, Object> parsedConfig = JavaInstanceRunnable.augmentAndFilterConnectorConfig( """ { "key": { "key1": "${TEST_JAVA_INSTANCE_PARSE_ENV_VAR}", "key2": "${unset-env-var}" }, "key3": "${TEST_JAVA_INSTANCE_PARSE_ENV_VAR}" } """, new InstanceConfig(), new EnvironmentBasedSecretsProvider(), null, componentType ); if ((componentType == FunctionDetails.ComponentType.SOURCE || componentType == FunctionDetails.ComponentType.SINK)) { Assert.assertEquals(((Map) parsedConfig.get("key")).get("key1"), "some-configuration"); Assert.assertEquals(((Map) parsedConfig.get("key")).get("key2"), "${unset-env-var}"); Assert.assertEquals(parsedConfig.get("key3"), "some-configuration"); } else { Assert.assertEquals(((Map) parsedConfig.get("key")).get("key1"), "${TEST_JAVA_INSTANCE_PARSE_ENV_VAR}"); Assert.assertEquals(((Map) parsedConfig.get("key")).get("key2"), "${unset-env-var}"); Assert.assertEquals(parsedConfig.get("key3"), "${TEST_JAVA_INSTANCE_PARSE_ENV_VAR}"); } }
@Override public MepLbCreate decode(ObjectNode json, CodecContext context) { if (json == null || !json.isObject()) { return null; } JsonNode loopbackNode = json.get(LOOPBACK); JsonNode remoteMepIdNode = loopbackNode.get(REMOTE_MEP_ID); JsonNode remoteMepMacNode = loopbackNode.get(REMOTE_MEP_MAC); MepLbCreate.MepLbCreateBuilder lbCreateBuilder; if (remoteMepIdNode != null) { MepId remoteMepId = MepId.valueOf((short) remoteMepIdNode.asInt()); lbCreateBuilder = DefaultMepLbCreate.builder(remoteMepId); } else if (remoteMepMacNode != null) { MacAddress remoteMepMac = MacAddress.valueOf( remoteMepMacNode.asText()); lbCreateBuilder = DefaultMepLbCreate.builder(remoteMepMac); } else { throw new IllegalArgumentException( "Either a remoteMepId or a remoteMepMac"); } JsonNode numMessagesNode = loopbackNode.get(NUMBER_MESSAGES); if (numMessagesNode != null) { int numMessages = numMessagesNode.asInt(); lbCreateBuilder.numberMessages(numMessages); } JsonNode vlanDropEligibleNode = loopbackNode.get(VLAN_DROP_ELIGIBLE); if (vlanDropEligibleNode != null) { boolean vlanDropEligible = vlanDropEligibleNode.asBoolean(); lbCreateBuilder.vlanDropEligible(vlanDropEligible); } JsonNode vlanPriorityNode = loopbackNode.get(VLAN_PRIORITY); if (vlanPriorityNode != null) { short vlanPriority = (short) vlanPriorityNode.asInt(); lbCreateBuilder.vlanPriority(Priority.values()[vlanPriority]); } JsonNode dataTlvHexNode = loopbackNode.get(DATA_TLV_HEX); if (dataTlvHexNode != null) { String dataTlvHex = loopbackNode.get(DATA_TLV_HEX).asText(); if (!dataTlvHex.isEmpty()) { lbCreateBuilder.dataTlv(HexString.fromHexString(dataTlvHex)); } } return lbCreateBuilder.build(); }
@Test public void testDecodeMepLbCreateMepMac() throws JsonProcessingException, IOException { String loopbackString = "{\"loopback\": { " + "\"remoteMepMac\": \"AA:BB:CC:DD:EE:FF\" }}"; InputStream input = new ByteArrayInputStream( loopbackString.getBytes(StandardCharsets.UTF_8)); JsonNode cfg = mapper.readTree(input); MepLbCreate mepLbCreate = context .codec(MepLbCreate.class).decode((ObjectNode) cfg, context); assertNull(mepLbCreate.remoteMepId()); assertEquals("AA:BB:CC:DD:EE:FF", mepLbCreate.remoteMepAddress().toString()); assertNull(mepLbCreate.dataTlvHex()); }
@Override public OAuth2AccessTokenDO getAccessToken(String accessToken) { // 优先从 Redis 中获取 OAuth2AccessTokenDO accessTokenDO = oauth2AccessTokenRedisDAO.get(accessToken); if (accessTokenDO != null) { return accessTokenDO; } // 获取不到,从 MySQL 中获取 accessTokenDO = oauth2AccessTokenMapper.selectByAccessToken(accessToken); // 如果在 MySQL 存在,则往 Redis 中写入 if (accessTokenDO != null && !DateUtils.isExpired(accessTokenDO.getExpiresTime())) { oauth2AccessTokenRedisDAO.set(accessTokenDO); } return accessTokenDO; }
@Test public void testCheckAccessToken_success() { // mock 数据(访问令牌) OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class) .setExpiresTime(LocalDateTime.now().plusDays(1)); oauth2AccessTokenMapper.insert(accessTokenDO); // 准备参数 String accessToken = accessTokenDO.getAccessToken(); // 调研,并断言 OAuth2AccessTokenDO result = oauth2TokenService.getAccessToken(accessToken); // 断言 assertPojoEquals(accessTokenDO, result, "createTime", "updateTime", "deleted", "creator", "updater"); }
public static void removeDupes( final List<CharSequence> suggestions, List<CharSequence> stringsPool) { if (suggestions.size() < 2) return; int i = 1; // Don't cache suggestions.size(), since we may be removing items while (i < suggestions.size()) { final CharSequence cur = suggestions.get(i); // Compare each suggestion with each previous suggestion for (int j = 0; j < i; j++) { CharSequence previous = suggestions.get(j); if (TextUtils.equals(cur, previous)) { removeSuggestion(suggestions, i, stringsPool); i--; break; } } i++; } }
@Test public void testRemoveDupesDupeIsNotFirst() throws Exception { ArrayList<CharSequence> list = new ArrayList<>( Arrays.<CharSequence>asList("typed", "something", "duped", "duped", "something")); IMEUtil.removeDupes(list, mStringPool); Assert.assertEquals(3, list.size()); Assert.assertEquals("typed", list.get(0)); Assert.assertEquals("something", list.get(1)); Assert.assertEquals("duped", list.get(2)); }
public static String buildCallPluginMethod(Class pluginClass, String method, String... paramValueAndType) { return buildCallPluginMethod("getClass().getClassLoader()", pluginClass, method, paramValueAndType); }
@Test public void testBuildCallPluginMethod() throws Exception { SimplePlugin plugin = new SimplePlugin(); registerPlugin(plugin); // plugin.init(PluginManager.getInstance()); String s = PluginManagerInvoker.buildCallPluginMethod(plugin.getClass(), "callPluginMethod", "Boolean.TRUE", "java.lang.Boolean"); ClassPool classPool = ClassPool.getDefault(); classPool.appendSystemPath(); CtClass clazz = classPool.makeClass("Test"); clazz.addMethod(CtNewMethod.make("public void test() {" + s + "}", clazz)); Class<?> testClass = clazz.toClass(); Method testMethod = testClass.getDeclaredMethod("test"); testMethod.invoke(testClass.newInstance()); }
public static void setFieldValue(Object target, Field field, Object fieldValue) throws IllegalArgumentException, SecurityException { if (target == null) { throw new IllegalArgumentException("target must be not null"); } while (true) { if (!field.isAccessible()) { field.setAccessible(true); } try { field.set(target, fieldValue); return; } catch (IllegalAccessException ignore) { // avoid other threads executing `field.setAccessible(false)` } } }
@Test public void testSetFieldValue() throws NoSuchFieldException { BranchDO branchDO = new BranchDO("xid123123", 123L, 1, 2.2, new Date()); ReflectionUtil.setFieldValue(branchDO, "xid", "xid456"); Assertions.assertEquals("xid456", branchDO.getXid()); }
public User getUser(String username) throws UserNotFoundException { if (username == null) { throw new UserNotFoundException("Username cannot be null"); } // Make sure that the username is valid. username = username.trim().toLowerCase(); User user = userCache.get(username); if (user == null) { synchronized (userBaseMutex.intern(XMPPServer.getInstance().createJID(username, null))) { user = userCache.get(username); if (user == null) { user = provider.loadUser(username); userCache.put(username, user); } } } return user; }
@Test public void canGetUserByUserNameForExistingUsers() throws Exception{ final User result = userManager.getUser(USER_ID); assertThat(result.getUsername(), is(USER_ID)); assertThat(result.getEmail(), is("test-email@example.com")); assertThat(result.getName(), is("Test User Name")); }
public RowMetaAndData getRow() { RowMetaAndData r = new RowMetaAndData(); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Type" ) ), getTypeDesc() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Transformation" ) ), getTransformationName() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Step" ) ), getStepName() ); r .addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Database" ) ), getDatabaseName() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Table" ) ), getTable() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Field" ) ), getField() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Value" ) ), getValue() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.ValueOrigin" ) ), getValueOrigin() ); r.addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.SQL" ) ), getSQL() ); r .addValue( new ValueMetaString( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Remarks" ) ), getRemark() ); return r; }
@Test public void testGetRow() throws KettleValueException { DatabaseImpact testObject = new DatabaseImpact( DatabaseImpact.TYPE_IMPACT_READ, "myTrans", "aStep", "ProdDB", "DimCustomer", "Customer_Key", "MyValue", "Calculator 2", "SELECT * FROM dimCustomer", "Some remarks" ); RowMetaAndData rmd = testObject.getRow(); assertNotNull( rmd ); assertEquals( 10, rmd.size() ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 0 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Type" ), rmd.getValueMeta( 0 ).getName() ); assertEquals( "Read", rmd.getString( 0, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 1 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Transformation" ), rmd.getValueMeta( 1 ) .getName() ); assertEquals( "myTrans", rmd.getString( 1, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 2 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Step" ), rmd.getValueMeta( 2 ).getName() ); assertEquals( "aStep", rmd.getString( 2, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 3 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Database" ), rmd.getValueMeta( 3 ) .getName() ); assertEquals( "ProdDB", rmd.getString( 3, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 4 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Table" ), rmd.getValueMeta( 4 ) .getName() ); assertEquals( "DimCustomer", rmd.getString( 4, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 5 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Field" ), rmd.getValueMeta( 5 ) .getName() ); assertEquals( "Customer_Key", rmd.getString( 5, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 6 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Value" ), rmd.getValueMeta( 6 ) .getName() ); assertEquals( "MyValue", rmd.getString( 6, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 7 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.ValueOrigin" ), rmd.getValueMeta( 7 ) .getName() ); assertEquals( "Calculator 2", rmd.getString( 7, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 8 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.SQL" ), rmd.getValueMeta( 8 ).getName() ); assertEquals( "SELECT * FROM dimCustomer", rmd.getString( 8, "default" ) ); assertEquals( ValueMetaInterface.TYPE_STRING, rmd.getValueMeta( 9 ).getType() ); assertEquals( BaseMessages.getString( PKG, "DatabaseImpact.RowDesc.Label.Remarks" ), rmd.getValueMeta( 9 ) .getName() ); assertEquals( "Some remarks", rmd.getString( 9, "default" ) ); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { try { final IRODSFileSystemAO fs = session.getClient(); final IRODSFileFactory factory = fs.getIRODSFileFactory(); final IRODSFile f = factory.instanceIRODSFile(file.getAbsolute()); if(f.exists()) { final InputStream in = new PackingIrodsInputStream(factory.instanceIRODSFileInputStream(f)); if(status.isAppend()) { return StreamCopier.skip(in, status.getOffset()); } return in; } else { throw new NotfoundException(file.getAbsolute()); } } catch(JargonRuntimeException e) { if(e.getCause() instanceof JargonException) { throw (JargonException) e.getCause(); } throw new DefaultExceptionMappingService().map(e); } } catch(JargonException e) { throw new IRODSExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testRead() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials( PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret") )); final IRODSSession session = new IRODSSession(host); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); assertFalse(session.getFeature(Find.class).find(test)); final byte[] content = RandomUtils.nextBytes(2048); final TransferStatus status = new TransferStatus(); status.setLength(content.length); status.setAppend(false); final OutputStream out = new IRODSWriteFeature(session).write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); out.close(); assertTrue(session.getFeature(Find.class).find(test)); final InputStream in = new IRODSReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); in.close(); session.getFeature(Delete.class).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(session.getFeature(Find.class).find(test)); session.close(); }
@Override @MethodNotAvailable public Map<K, Object> executeOnEntries(com.hazelcast.map.EntryProcessor entryProcessor) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testExecuteOnEntries() { adapter.executeOnEntries(new IMapReplaceEntryProcessor("value", "newValue")); }
@Override public ApplicationInstanceStatus getApplicationInstanceStatus(ApplicationId appId) throws ApplicationIdNotFoundException { ApplicationInstanceReference reference = OrchestratorUtil.toApplicationInstanceReference(appId, serviceMonitor); return statusService.getApplicationInstanceStatus(reference); }
@Test public void application_has_initially_no_remarks() throws Exception { assertEquals(NO_REMARKS, orchestrator.getApplicationInstanceStatus(app1)); }
@Override public boolean contains(K name) { return false; }
@Test public void testContainsWithValue() { assertFalse(HEADERS.contains("name1", "value1")); }
@ConstantFunction(name = "bitor", argTypes = {TINYINT, TINYINT}, returnType = TINYINT) public static ConstantOperator bitorTinyInt(ConstantOperator first, ConstantOperator second) { return ConstantOperator.createTinyInt((byte) (first.getTinyInt() | second.getTinyInt())); }
@Test public void bitorTinyInt() { assertEquals(10, ScalarOperatorFunctions.bitorTinyInt(O_TI_10, O_TI_10).getTinyInt()); }
public static Write write() { return new AutoValue_SnsIO_Write.Builder().build(); }
@Test public void testDataWritesToSNS() { final PublishRequest request1 = createSampleMessage("my_first_message"); final PublishRequest request2 = createSampleMessage("my_second_message"); final TupleTag<PublishResult> results = new TupleTag<>(); final AmazonSNS amazonSnsSuccess = getAmazonSnsMockSuccess(); final PCollectionTuple snsWrites = p.apply(Create.of(request1, request2)) .apply( SnsIO.write() .withTopicName(topicName) .withRetryConfiguration( SnsIO.RetryConfiguration.create( 5, org.joda.time.Duration.standardMinutes(1))) .withAWSClientsProvider(new Provider(amazonSnsSuccess)) .withResultOutputTag(results)); final PCollection<Long> publishedResultsSize = snsWrites.get(results).apply(Count.globally()); PAssert.that(publishedResultsSize).containsInAnyOrder(ImmutableList.of(2L)); p.run().waitUntilFinish(); }
static SQLViewRepresentation fromJson(String json) { return JsonUtil.parse(json, SQLViewRepresentationParser::fromJson); }
@Test public void testParseSqlViewRepresentation() { String requiredFields = "{\"type\":\"sql\", \"sql\": \"select * from foo\", \"dialect\": \"spark-sql\"}"; SQLViewRepresentation viewRepresentation = ImmutableSQLViewRepresentation.builder() .sql("select * from foo") .dialect("spark-sql") .build(); assertThat(SQLViewRepresentationParser.fromJson(requiredFields)) .as("Should be able to parse valid SQL view representation") .isEqualTo(viewRepresentation); String requiredAndOptionalFields = "{\"type\":\"sql\", \"sql\": \"select * from foo\", \"dialect\": \"spark-sql\"}"; SQLViewRepresentation viewWithOptionalFields = ImmutableSQLViewRepresentation.builder() .sql("select * from foo") .dialect("spark-sql") .build(); assertThat(SQLViewRepresentationParser.fromJson(requiredAndOptionalFields)) .as("Should be able to parse valid SQL view representation") .isEqualTo(viewWithOptionalFields); }
@Override public Optional<EventProcessorSchedulerConfig> toJobSchedulerConfig(EventDefinition eventDefinition, JobSchedulerClock clock) { final DateTime now = clock.nowUTC(); // We need an initial timerange for the first execution of the event processor final AbsoluteRange timerange; final JobSchedule schedule; if (useCronScheduling()) { CronJobSchedule cronJobSchedule = CronJobSchedule.builder() .timezone(cronTimezone()) .cronExpression(cronExpression()) .build(); DateTime nextTime = cronJobSchedule.calculateNextTime(now, now, clock).orElse(now); schedule = cronJobSchedule; timerange = AbsoluteRange.create(nextTime.minus(searchWithinMs()), nextTime); } else { schedule = IntervalJobSchedule.builder().interval(executeEveryMs()).unit(TimeUnit.MILLISECONDS).build(); timerange = AbsoluteRange.create(now.minus(searchWithinMs()), now); } final EventProcessorExecutionJob.Config jobDefinitionConfig = EventProcessorExecutionJob.Config.builder() .eventDefinitionId(eventDefinition.id()) .processingWindowSize(searchWithinMs()) .processingHopSize(executeEveryMs()) .parameters(AggregationEventProcessorParameters.builder() .timerange(timerange) .build()) .isCron(useCronScheduling()) .build(); return Optional.of(EventProcessorSchedulerConfig.create(jobDefinitionConfig, schedule)); }
@Test @MongoDBFixtures("aggregation-processors.json") public void toJobSchedulerConfig() { final EventDefinitionDto dto = dbService.get("54e3deadbeefdeadbeefaffe").orElse(null); assertThat(dto).isNotNull(); assertThat(dto.config().toJobSchedulerConfig(dto, clock)).isPresent().get().satisfies(schedulerConfig -> { assertThat(schedulerConfig.jobDefinitionConfig()).satisfies(jobDefinitionConfig -> { assertThat(jobDefinitionConfig).isInstanceOf(EventProcessorExecutionJob.Config.class); final EventProcessorExecutionJob.Config config = (EventProcessorExecutionJob.Config) jobDefinitionConfig; assertThat(config.eventDefinitionId()).isEqualTo(dto.id()); assertThat(config.processingWindowSize()).isEqualTo(300000); assertThat(config.processingHopSize()).isEqualTo(300000); assertThat(config.parameters()).isEqualTo(AggregationEventProcessorParameters.builder() .timerange(AbsoluteRange.create(clock.nowUTC().minus(300000), clock.nowUTC())) .build()); }); assertThat(schedulerConfig.schedule()).satisfies(schedule -> { assertThat(schedule).isInstanceOf(IntervalJobSchedule.class); final IntervalJobSchedule config = (IntervalJobSchedule) schedule; assertThat(config.interval()).isEqualTo(300000); assertThat(config.unit()).isEqualTo(TimeUnit.MILLISECONDS); }); }); }
public Map<String, String> confirm(RdaConfirmRequest params) { AppSession appSession = appSessionService.getSession(params.getAppSessionId()); AppAuthenticator appAuthenticator = appAuthenticatorService.findByUserAppId(appSession.getUserAppId()); if(!checkSecret(params, appSession) || !checkAccount(params, appSession)){ appSession.setRdaSessionStatus("ABORTED"); appSessionService.save(appSession); return Map.of("arrivalStatus", "NOK"); } if(checkAndProcessError(params, appSession)){ appSessionService.save(appSession); return Map.of("arrivalStatus", "OK"); } if (!switchService.digidAppSwitchEnabled()) { digidClient.remoteLog("853", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); appSession.setRdaSessionStatus("REFUTED"); } else if (!switchService.digidRdaSwitchEnabled()){ digidClient.remoteLog("579", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); appSession.setRdaSessionStatus("REFUTED"); } else if (params.isVerified() && (SCANNING.equals(appSession.getRdaSessionStatus()) || SCANNING_FOREIGN.equals(appSession.getRdaSessionStatus()))) { appSession.setRdaSessionStatus("VERIFIED"); appAuthenticator.setSubstantieelActivatedAt(ZonedDateTime.now()); appAuthenticator.setSubstantieelDocumentType(params.getDocumentType().toLowerCase()); if (appAuthenticator.getWidActivatedAt() == null) { appAuthenticator.setIssuerType("rda"); } storeIdCheckDocument(params.getDocumentNumber(), params.getDocumentType(), appSession.getAccountId(), appAuthenticator.getUserAppId()); if (ID_CHECK_ACTION.equals(appSession.getRdaAction())) { digidClient.remoteLog("1321", Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId())); } else { digidClient.remoteLog("848", Map.of("document_type", params.getDocumentType().toLowerCase(), lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } appAuthenticatorService.save(appAuthenticator); if(appSession.getFlow().equals(UpgradeLoginLevel.NAME)) { digidClient.sendNotificationMessage(appSession.getAccountId(), "ED024", "SMS20"); logger.debug("Sending notify email ED024 / SMS20 for device {}", appAuthenticator.getDeviceName()); } } appSession.setAppAuthenticationLevel(appAuthenticator.getAuthenticationLevel()); appSessionService.save(appSession); return Map.of("arrivalStatus", "OK"); }
@Test void checkErrorError(){ rdaConfirmRequest.setError("CANCELLED"); when(appSessionService.getSession(any())).thenReturn(appSession); when(appAuthenticatorService.findByUserAppId(any())).thenReturn(appAuthenticator); Map<String, String> result = rdaService.confirm(rdaConfirmRequest); assertEquals("CANCELLED", appSession.getRdaSessionStatus()); assertEquals("OK", result.get("arrivalStatus")); }
@VisibleForTesting static UUnionType create(UExpression... typeAlternatives) { return create(ImmutableList.copyOf(typeAlternatives)); }
@Test public void serialization() { SerializableTester.reserializeAndAssert( UUnionType.create( UClassIdent.create("java.lang.IllegalArgumentException"), UClassIdent.create("java.lang.IllegalStateException"))); }
@Override public ClusterInfo clusterGetClusterInfo() { RFuture<Map<String, String>> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.CLUSTER_INFO); Map<String, String> entries = syncFuture(f); Properties props = new Properties(); for (Entry<String, String> entry : entries.entrySet()) { props.setProperty(entry.getKey(), entry.getValue()); } return new ClusterInfo(props); }
@Test public void testClusterGetClusterInfo() { ClusterInfo info = connection.clusterGetClusterInfo(); assertThat(info.getSlotsFail()).isEqualTo(0); assertThat(info.getSlotsOk()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); assertThat(info.getSlotsAssigned()).isEqualTo(MasterSlaveConnectionManager.MAX_SLOT); }
public static InstanceAssignmentConfig getInstanceAssignmentConfig(TableConfig tableConfig, InstancePartitionsType instancePartitionsType) { Preconditions.checkState(allowInstanceAssignment(tableConfig, instancePartitionsType), "Instance assignment is not allowed for the given table config"); // Use the instance assignment config from the table config if it exists Map<String, InstanceAssignmentConfig> instanceAssignmentConfigMap = tableConfig.getInstanceAssignmentConfigMap(); if (instanceAssignmentConfigMap != null) { InstanceAssignmentConfig instanceAssignmentConfig = instanceAssignmentConfigMap.get(instancePartitionsType.toString()); if (instanceAssignmentConfig != null) { return instanceAssignmentConfig; } } // Generate default instance assignment config if it does not exist // Only allow default config for offline table with replica-group segment assignment for backward-compatibility InstanceTagPoolConfig tagPoolConfig = new InstanceTagPoolConfig(TagNameUtils.extractOfflineServerTag(tableConfig.getTenantConfig()), false, 0, null); InstanceReplicaGroupPartitionConfig replicaGroupPartitionConfig; SegmentsValidationAndRetentionConfig segmentConfig = tableConfig.getValidationConfig(); int numReplicaGroups = tableConfig.getReplication(); ReplicaGroupStrategyConfig replicaGroupStrategyConfig = segmentConfig.getReplicaGroupStrategyConfig(); Preconditions.checkState(replicaGroupStrategyConfig != null, "Failed to find the replica-group strategy config"); String partitionColumn = replicaGroupStrategyConfig.getPartitionColumn(); boolean minimizeDataMovement = segmentConfig.isMinimizeDataMovement(); if (partitionColumn != null) { int numPartitions = tableConfig.getIndexingConfig().getSegmentPartitionConfig().getNumPartitions(partitionColumn); Preconditions.checkState(numPartitions > 0, "Number of partitions for column: %s is not properly configured", partitionColumn); replicaGroupPartitionConfig = new InstanceReplicaGroupPartitionConfig(true, 0, numReplicaGroups, 0, numPartitions, replicaGroupStrategyConfig.getNumInstancesPerPartition(), minimizeDataMovement, partitionColumn); } else { // If partition column is not configured, use replicaGroupStrategyConfig.getNumInstancesPerPartition() as // number of instances per replica-group for backward-compatibility replicaGroupPartitionConfig = new InstanceReplicaGroupPartitionConfig(true, 0, numReplicaGroups, replicaGroupStrategyConfig.getNumInstancesPerPartition(), 0, 0, minimizeDataMovement, null); } return new InstanceAssignmentConfig(tagPoolConfig, null, replicaGroupPartitionConfig, null, minimizeDataMovement); }
@Test public void testGetInstanceAssignmentConfigWhenInstanceAssignmentConfigIsNotPresentAndPartitionColumnNotPresent() { TagOverrideConfig tagOverrideConfig = new TagOverrideConfig("broker", "Server"); Map<InstancePartitionsType, String> instancePartitionsTypeStringMap = new HashMap<>(); instancePartitionsTypeStringMap.put(InstancePartitionsType.OFFLINE, "offlineString"); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("testTable") .setTagOverrideConfig(tagOverrideConfig).setInstancePartitionsMap(instancePartitionsTypeStringMap) .build(); SegmentsValidationAndRetentionConfig segmentsValidationAndRetentionConfig = new SegmentsValidationAndRetentionConfig(); ReplicaGroupStrategyConfig replicaGroupStrategyConfig = new ReplicaGroupStrategyConfig(null, 2); segmentsValidationAndRetentionConfig.setReplicaGroupStrategyConfig(replicaGroupStrategyConfig); segmentsValidationAndRetentionConfig.setReplication("1"); tableConfig.setValidationConfig(segmentsValidationAndRetentionConfig); IndexingConfig indexingConfig = new IndexingConfig(); Map<String, ColumnPartitionConfig> columnPartitionConfigMap = new HashMap<>(); ColumnPartitionConfig columnPartitionConfig = new ColumnPartitionConfig("column1", 1); columnPartitionConfigMap.put("column1", columnPartitionConfig); SegmentPartitionConfig segmentPartitionConfig = new SegmentPartitionConfig(columnPartitionConfigMap); indexingConfig.setSegmentPartitionConfig(segmentPartitionConfig); tableConfig.setIndexingConfig(indexingConfig); Assert.assertEquals(InstanceAssignmentConfigUtils .getInstanceAssignmentConfig(tableConfig, InstancePartitionsType.OFFLINE) .getReplicaGroupPartitionConfig().isReplicaGroupBased(), Boolean.TRUE); Assert.assertEquals(InstanceAssignmentConfigUtils .getInstanceAssignmentConfig(tableConfig, InstancePartitionsType.OFFLINE) .getReplicaGroupPartitionConfig().getPartitionColumn(), null); Assert.assertEquals(InstanceAssignmentConfigUtils .getInstanceAssignmentConfig(tableConfig, InstancePartitionsType.OFFLINE) .getReplicaGroupPartitionConfig().getNumInstancesPerReplicaGroup(), 2); }
static double evaluateRaw(boolean isCaseSensitive, boolean tokenize, String term, String text, String wordSeparatorCharacterRE, LOCAL_TERM_WEIGHTS localTermWeights, COUNT_HITS countHits, LevenshteinDistance levenshteinDistance) { if (!isCaseSensitive) { term = term.toLowerCase(); text = text.toLowerCase(); } Pattern pattern = tokenize ? Pattern.compile(wordSeparatorCharacterRE) : Pattern.compile(DEFAULT_TOKENIZER); List<String> terms = splitText(term, pattern); List<String> texts = splitText(text, pattern); int calculatedLevenshteinDistance; switch (countHits) { case ALL_HITS: calculatedLevenshteinDistance = evaluateLevenshteinDistanceAllHits(levenshteinDistance, terms, texts); break; case BEST_HITS: calculatedLevenshteinDistance = evaluateLevenshteinDistanceBestHits(levenshteinDistance, terms, texts); break; default: throw new IllegalArgumentException("Unknown COUNT_HITS " + countHits); } switch (localTermWeights) { case TERM_FREQUENCY: return calculatedLevenshteinDistance; case BINARY: return evaluateBinary(calculatedLevenshteinDistance); case LOGARITHMIC: return evaluateLogarithmic(calculatedLevenshteinDistance); case AUGMENTED_NORMALIZED_TERM_FREQUENCY: return evaluateAugmentedNormalizedTermFrequency(calculatedLevenshteinDistance, texts); default: throw new IllegalArgumentException("Unknown LOCAL_TERM_WEIGHTS " + localTermWeights); } }
@Test void evaluateRawTokenize() { LevenshteinDistance levenshteinDistance = new LevenshteinDistance(2); double frequency = 3.0; double logarithmic = Math.log10(1 + frequency); int maxFrequency = 2; double augmentedNormalizedTermFrequency = 0.5 * (1 + (frequency / (double) maxFrequency)); // cast // for java:S2184 Map<LOCAL_TERM_WEIGHTS, Double> expectedResults = new HashMap<>(); expectedResults.put(TERM_FREQUENCY, frequency); expectedResults.put(BINARY, 1.0); expectedResults.put(LOGARITHMIC, logarithmic); expectedResults.put(AUGMENTED_NORMALIZED_TERM_FREQUENCY, augmentedNormalizedTermFrequency); expectedResults.forEach((localTermWeights, expected) -> { double evaluateRaw = KiePMMLTextIndex.evaluateRaw(true, true, TERM_0, TEXT_0, "\\s+", localTermWeights, COUNT_HITS.ALL_HITS, levenshteinDistance); assertThat(evaluateRaw).isCloseTo(expected, Offset.offset(0.0000001)); }); //--- maxFrequency = 3; augmentedNormalizedTermFrequency = 0.5 * (1 + (frequency / (double) maxFrequency)); // cast // for java:S2184 expectedResults = new HashMap<>(); expectedResults.put(TERM_FREQUENCY, frequency); expectedResults.put(BINARY, 1.0); expectedResults.put(LOGARITHMIC, logarithmic); expectedResults.put(AUGMENTED_NORMALIZED_TERM_FREQUENCY, augmentedNormalizedTermFrequency); expectedResults.forEach((localTermWeights, expected) -> { double evaluateRaw = KiePMMLTextIndex.evaluateRaw(false, true, TERM_0, TEXT_0, "\\s+", localTermWeights, COUNT_HITS.ALL_HITS, levenshteinDistance); assertThat(evaluateRaw).isCloseTo(expected, Offset.offset(0.0000001)); }); //--- frequency = 4.0; logarithmic = Math.log10(1 + frequency); augmentedNormalizedTermFrequency = 0.5 * (1 + (frequency / (double) maxFrequency)); // cast // for java:S2184 expectedResults = new HashMap<>(); expectedResults.put(TERM_FREQUENCY, frequency); expectedResults.put(BINARY, 1.0); expectedResults.put(LOGARITHMIC, logarithmic); expectedResults.put(AUGMENTED_NORMALIZED_TERM_FREQUENCY, augmentedNormalizedTermFrequency); expectedResults.forEach((localTermWeights, expected) -> { double evaluateRaw = KiePMMLTextIndex.evaluateRaw(false, true, TERM_0, TEXT_0, "[\\s\\-]", localTermWeights, COUNT_HITS.ALL_HITS, levenshteinDistance); assertThat(evaluateRaw).isCloseTo(expected, Offset.offset(0.0000001)); }); }
String getUrl() { return "http://" + this.httpServer.getInetAddress().getHostAddress() + ":" + this.httpServer.getLocalPort(); }
@Test public void action_is_matched_on_exact_URL() throws IOException { Response response = call(underTest.getUrl() + "/pompom"); assertIsPomPomResponse(response); }
public SourceWithMetadata lookupSource(int globalLineNumber, int sourceColumn) throws IncompleteSourceWithMetadataException { LineToSource lts = this.sourceReferences().stream() .filter(lts1 -> lts1.includeLine(globalLineNumber)) .findFirst() .orElseThrow(() -> new IllegalArgumentException("can't find the config segment related to line " + globalLineNumber)); return new SourceWithMetadata(lts.source.getProtocol(), lts.source.getId(), globalLineNumber + 1 - lts.startLine, sourceColumn, lts.source.getText()); }
@Test(expected = IllegalArgumentException.class) public void testSourceAndLineRemapping_pipelineDefinedMInMultipleFiles_dontmatch() throws IncompleteSourceWithMetadataException { final SourceWithMetadata[] parts = { new SourceWithMetadata("file", "/tmp/input", 0, 0, PIPELINE_CONFIG_PART_1), new SourceWithMetadata("file", "/tmp/output", 0, 0, PIPELINE_CONFIG_PART_2) }; sut = new PipelineConfig(source, pipelineIdSym, toRubyArray(parts), SETTINGS); sut.lookupSource(100, 0); }
public static OAuthBearerValidationResult validateScope(OAuthBearerToken token, List<String> requiredScope) { final Set<String> tokenScope = token.scope(); if (requiredScope == null || requiredScope.isEmpty()) return OAuthBearerValidationResult.newSuccess(); for (String requiredScopeElement : requiredScope) { if (!tokenScope.contains(requiredScopeElement)) return OAuthBearerValidationResult.newFailure(String.format( "The provided scope (%s) was missing a required scope (%s). All required scope elements: %s", tokenScope, requiredScopeElement, requiredScope), requiredScope.toString(), null); } return OAuthBearerValidationResult.newSuccess(); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Test public void validateScope() { long nowMs = TIME.milliseconds(); double nowClaimValue = ((double) nowMs) / 1000; final List<String> noScope = Collections.emptyList(); final List<String> scope1 = Collections.singletonList("scope1"); final List<String> scope1And2 = Arrays.asList("scope1", "scope2"); for (boolean actualScopeExists : new boolean[] {true, false}) { List<? extends List> scopes = !actualScopeExists ? Collections.singletonList((List) null) : Arrays.asList(noScope, scope1, scope1And2); for (List<String> actualScope : scopes) { for (boolean requiredScopeExists : new boolean[] {true, false}) { List<? extends List> requiredScopes = !requiredScopeExists ? Collections.singletonList((List) null) : Arrays.asList(noScope, scope1, scope1And2); for (List<String> requiredScope : requiredScopes) { StringBuilder sb = new StringBuilder("{"); appendJsonText(sb, "exp", nowClaimValue); appendCommaJsonText(sb, "sub", "principalName"); if (actualScope != null) sb.append(',').append(scopeJson(actualScope)); sb.append("}"); String compactSerialization = HEADER_COMPACT_SERIALIZATION + Base64.getUrlEncoder() .withoutPadding().encodeToString(sb.toString().getBytes(StandardCharsets.UTF_8)) + "."; OAuthBearerUnsecuredJws testJwt = new OAuthBearerUnsecuredJws(compactSerialization, "sub", "scope"); OAuthBearerValidationResult result = OAuthBearerValidationUtils.validateScope(testJwt, requiredScope); if (!requiredScopeExists || requiredScope.isEmpty()) assertTrue(isSuccess(result)); else if (!actualScopeExists || actualScope.size() < requiredScope.size()) assertTrue(isFailureWithMessageAndFailureScope(result)); else assertTrue(isSuccess(result)); } } } } }
@SuppressWarnings("unchecked") @Override public boolean canHandleReturnType(Class returnType) { return rxSupportedTypes.stream() .anyMatch(classType -> classType.isAssignableFrom(returnType)); }
@Test public void testCheckTypes() { assertThat(rxJava3CircuitBreakerAspectExt.canHandleReturnType(Flowable.class)).isTrue(); assertThat(rxJava3CircuitBreakerAspectExt.canHandleReturnType(Single.class)).isTrue(); }
private <T> RestResponse<T> get(final String path, final Class<T> type) { return executeRequestSync(HttpMethod.GET, path, null, r -> deserialize(r.getBody(), type), Optional.empty()); }
@Test public void shouldPostQueryRequest_chunkHandler_exception() { ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST, Collections.emptyMap(), RequestOptions.DEFAULT_TIMEOUT); executor.submit(this::expectPostQueryRequestChunkHandler); assertThatEventually(requestStarted::get, is(true)); exceptionCaptor.getValue().handle(new RuntimeException("Error!")); assertThatEventually(error::get, notNullValue()); assertThat(error.get().getMessage(), containsString("Error issuing POST to KSQL server. path:/query")); }
public static ByteString encodeDoubleDistribution( long count, double sum, double min, double max) { ByteStringOutputStream output = new ByteStringOutputStream(); try { VARINT_CODER.encode(count, output); DOUBLE_CODER.encode(sum, output); DOUBLE_CODER.encode(min, output); DOUBLE_CODER.encode(max, output); } catch (IOException e) { throw new RuntimeException(e); } return output.toByteString(); }
@Test public void testDoubleDistributionEncoding() { ByteString payload = encodeDoubleDistribution(1L, 2.0, 3.0, 4.0); assertEquals( ByteString.copyFrom( new byte[] { 1, 64, 0, 0, 0, 0, 0, 0, 0, 64, 8, 0, 0, 0, 0, 0, 0, 64, 16, 0, 0, 0, 0, 0, 0 }), payload); }
@Nullable @Override public Message decode(@Nonnull final RawMessage rawMessage) { final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress()); final String json = gelfMessage.getJSON(decompressSizeLimit, charset); final JsonNode node; try { node = objectMapper.readTree(json); if (node == null) { throw new IOException("null result"); } } catch (final Exception e) { log.error("Could not parse JSON, first 400 characters: " + StringUtils.abbreviate(json, 403), e); throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e); } try { validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress()); } catch (IllegalArgumentException e) { log.trace("Invalid GELF message <{}>", node); throw e; } // Timestamp. final double messageTimestamp = timestampValue(node); final DateTime timestamp; if (messageTimestamp <= 0) { timestamp = rawMessage.getTimestamp(); } else { // we treat this as a unix timestamp timestamp = Tools.dateTimeFromDouble(messageTimestamp); } final Message message = messageFactory.createMessage( stringValue(node, "short_message"), stringValue(node, "host"), timestamp ); message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message")); final String file = stringValue(node, "file"); if (file != null && !file.isEmpty()) { message.addField("file", file); } final long line = longValue(node, "line"); if (line > -1) { message.addField("line", line); } // Level is set by server if not specified by client. final int level = intValue(node, "level"); if (level > -1) { message.addField("level", level); } // Facility is set by server if not specified by client. final String facility = stringValue(node, "facility"); if (facility != null && !facility.isEmpty()) { message.addField("facility", facility); } // Add additional data if there is some. final Iterator<Map.Entry<String, JsonNode>> fields = node.fields(); while (fields.hasNext()) { final Map.Entry<String, JsonNode> entry = fields.next(); String key = entry.getKey(); // Do not index useless GELF "version" field. if ("version".equals(key)) { continue; } // Don't include GELF syntax underscore in message field key. if (key.startsWith("_") && key.length() > 1) { key = key.substring(1); } // We already set short_message and host as message and source. Do not add as fields again. if ("short_message".equals(key) || "host".equals(key)) { continue; } // Skip standard or already set fields. if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) { continue; } // Convert JSON containers to Strings, and pick a suitable number representation. final JsonNode value = entry.getValue(); final Object fieldValue; if (value.isContainerNode()) { fieldValue = value.toString(); } else if (value.isFloatingPointNumber()) { fieldValue = value.asDouble(); } else if (value.isIntegralNumber()) { fieldValue = value.asLong(); } else if (value.isNull()) { log.debug("Field [{}] is NULL. Skipping.", key); continue; } else if (value.isTextual()) { fieldValue = value.asText(); } else { log.debug("Field [{}] has unknown value type. Skipping.", key); continue; } message.addField(key, fieldValue); } return message; }
@Test public void decodeSucceedsWithValidTimestampAsStringIssue4027() throws Exception { // https://github.com/Graylog2/graylog2-server/issues/4027 final String json = "{" + "\"version\": \"1.1\"," + "\"short_message\": \"A short message that helps you identify what is going on\"," + "\"host\": \"example.org\"," + "\"timestamp\": \"1500646980.661\"" + "}"; final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8)); final Message message = codec.decode(rawMessage); assertThat(message).isNotNull(); assertThat(message.getTimestamp()).isEqualTo(DateTime.parse("2017-07-21T14:23:00.661Z")); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldLoadFunctionWithListReturnType() { // Given: final UdfFactory toList = FUNC_REG.getUdfFactory(FunctionName.of("tolist")); // When: final List<SqlArgument> args = Collections.singletonList(SqlArgument.of(SqlTypes.STRING)); final KsqlScalarFunction function = toList.getFunction(args); assertThat(function.getReturnType(args), is(SqlTypes.array(SqlTypes.STRING)) ); }
@Override public void open(int taskNumber, int numTasks) throws IOException { Preconditions.checkState( numTasks == 1, "SavepointOutputFormat should only be executed with parallelism 1"); targetLocation = createSavepointLocation(savepointPath); }
@Test(expected = IllegalStateException.class) public void testSavepointOutputFormatOnlyWorksWithParallelismOne() throws Exception { Path path = new Path(temporaryFolder.newFolder().getAbsolutePath()); SavepointOutputFormat format = createSavepointOutputFormat(path); format.open(0, 2); }
@Override public Object getDefaultValue() { return defaultValue; }
@Test public void testGetDefaultValue() throws Exception { TextField f = new TextField("test", "Name", "default", "description"); assertEquals("default", f.getDefaultValue()); }
public boolean isSatisfy(DistributionSpec spec) { if (spec.type.equals(DistributionType.ANY)) { return true; } return spec instanceof RoundRobinDistributionSpec; }
@Test void isSatisfy() { DistributionSpec rr = new RoundRobinDistributionSpec(); assertTrue(rr.isSatisfy(AnyDistributionSpec.INSTANCE)); assertTrue(rr.isSatisfy(new RoundRobinDistributionSpec())); assertFalse(rr.isSatisfy(new ReplicatedDistributionSpec())); }
@Override public String resolve(Method method, Object[] arguments, String spelExpression) { if (StringUtils.isEmpty(spelExpression)) { return spelExpression; } if (spelExpression.matches(PLACEHOLDER_SPEL_REGEX) && stringValueResolver != null) { return stringValueResolver.resolveStringValue(spelExpression); } if (spelExpression.matches(METHOD_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } if (spelExpression.matches(BEAN_SPEL_REGEX)) { SpelRootObject rootObject = new SpelRootObject(method, arguments); MethodBasedEvaluationContext evaluationContext = new MethodBasedEvaluationContext(rootObject, method, arguments, parameterNameDiscoverer); evaluationContext.setBeanResolver(new BeanFactoryResolver(this.beanFactory)); Object evaluated = expressionParser.parseExpression(spelExpression).getValue(evaluationContext); return (String) evaluated; } return spelExpression; }
@Test public void beanMethodSpelTest() throws Exception { String testExpression = "@dummySpelBean.getBulkheadName(#parameter)"; String testMethodArg = "argg"; String bulkheadName = "sgt. bulko"; DefaultSpelResolverTest target = new DefaultSpelResolverTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); given(dummySpelBean.getBulkheadName(testMethodArg)).willReturn(bulkheadName); String result = sut.resolve(testMethod, new Object[]{testMethodArg}, testExpression); then(dummySpelBean).should(times(1)).getBulkheadName(testMethodArg); assertThat(result).isEqualTo(bulkheadName); }
CompletableFuture<String> stopWithSavepoint( @Nullable final String targetDirectory, boolean terminate, SavepointFormatType formatType) { final ExecutionGraph executionGraph = getExecutionGraph(); StopWithSavepointTerminationManager.checkSavepointActionPreconditions( executionGraph.getCheckpointCoordinator(), targetDirectory, executionGraph.getJobID(), getLogger()); getLogger().info("Triggering stop-with-savepoint for job {}.", executionGraph.getJobID()); CheckpointScheduling schedulingProvider = new CheckpointSchedulingProvider(executionGraph); schedulingProvider.stopCheckpointScheduler(); final CompletableFuture<String> savepointFuture = Objects.requireNonNull(executionGraph.getCheckpointCoordinator()) .triggerSynchronousSavepoint(terminate, targetDirectory, formatType) .thenApply(CompletedCheckpoint::getExternalPointer); return context.goToStopWithSavepoint( executionGraph, getExecutionGraphHandler(), getOperatorCoordinatorHandler(), schedulingProvider, savepointFuture, getFailures()); }
@Test void testTransitionToStopWithSavepointState() throws Exception { try (MockExecutingContext ctx = new MockExecutingContext()) { CheckpointCoordinator coordinator = new CheckpointCoordinatorTestingUtils.CheckpointCoordinatorBuilder() .build(EXECUTOR_EXTENSION.getExecutor()); StateTrackingMockExecutionGraph mockedExecutionGraphWithCheckpointCoordinator = new StateTrackingMockExecutionGraph() { @Nullable @Override public CheckpointCoordinator getCheckpointCoordinator() { return coordinator; } }; Executing exec = new ExecutingStateBuilder() .setExecutionGraph(mockedExecutionGraphWithCheckpointCoordinator) .build(ctx); ctx.setExpectStopWithSavepoint(assertNonNull()); exec.stopWithSavepoint("file:///tmp/target", true, SavepointFormatType.CANONICAL); } }
public static String[] concat(String[] array1, String[] array2) { if (array1.length == 0) { return array2; } if (array2.length == 0) { return array1; } String[] resultArray = new String[array1.length + array2.length]; System.arraycopy(array1, 0, resultArray, 0, array1.length); System.arraycopy(array2, 0, resultArray, array1.length, array2.length); return resultArray; }
@Test void concatArrays() { String[] array1 = new String[] {"A", "B", "C", "D", "E", "F", "G"}; String[] array2 = new String[] {"1", "2", "3"}; assertThat(ArrayUtils.concat(array1, array2)) .isEqualTo(new String[] {"A", "B", "C", "D", "E", "F", "G", "1", "2", "3"}); assertThat(ArrayUtils.concat(array2, array1)) .isEqualTo(new String[] {"1", "2", "3", "A", "B", "C", "D", "E", "F", "G"}); }
@Override public int chown(String path, long uid, long gid) { return AlluxioFuseUtils.call(LOG, () -> chownInternal(path, uid, gid), FuseConstants.FUSE_CHOWN, "path=%s,uid=%d,gid=%d", path, uid, gid); }
@Test @DoraTestTodoItem(action = DoraTestTodoItem.Action.FIX, owner = "LuQQiu", comment = "waiting on security metadata to be implemented in Dora") @Ignore public void chownWithoutValidGid() throws Exception { Optional<Long> uid = AlluxioFuseUtils.getUid(System.getProperty("user.name")); assertTrue(uid.isPresent()); long gid = AlluxioFuseUtils.ID_NOT_SET_VALUE; URIStatus status = mock(URIStatus.class); when(status.getOwner()).thenReturn("user"); when(status.getGroup()).thenReturn("group"); when(mFileSystem.getStatus(any(AlluxioURI.class))).thenReturn(status); mFuseFs.chown("/foo/bar", uid.get(), gid); String userName = System.getProperty("user.name"); Optional<String> groupName = AlluxioFuseUtils.getGroupName(userName); assertTrue(groupName.isPresent()); AlluxioURI expectedPath = BASE_EXPECTED_URI.join("/foo/bar"); // invalid gid will not be contained in options SetAttributePOptions options = SetAttributePOptions.newBuilder().setOwner(userName).build(); verify(mFileSystem).setAttribute(expectedPath, options); gid = AlluxioFuseUtils.ID_NOT_SET_VALUE_UNSIGNED; mFuseFs.chown("/foo/bar", uid.get(), gid); verify(mFileSystem, times(2)).setAttribute(expectedPath, options); }
@VisibleForTesting OutputBufferMemoryManager getMemoryManager() { return memoryManager; }
@Test public void testSharedBufferBlockingNoBlockOnFull() { SettableFuture<?> blockedFuture = SettableFuture.create(); MockMemoryReservationHandler reservationHandler = new MockMemoryReservationHandler(blockedFuture); AggregatedMemoryContext memoryContext = newRootAggregatedMemoryContext(reservationHandler, 0L); Page page = createPage(1); long pageSize = PAGES_SERDE.serialize(page).getRetainedSizeInBytes(); // create a buffer that can only hold two pages BroadcastOutputBuffer buffer = createBroadcastBuffer(createInitialEmptyOutputBuffers(BROADCAST), new DataSize(pageSize * 2, BYTE), memoryContext, directExecutor()); OutputBufferMemoryManager memoryManager = buffer.getMemoryManager(); memoryManager.setNoBlockOnFull(); // even if setNoBlockOnFull() is called the buffer should block on memory when we add the first page // as no memory is available (MockMemoryReservationHandler will return a future that is not done) enqueuePage(buffer, page); // more memory is available blockedFuture.set(null); memoryManager.onMemoryAvailable(); assertTrue(memoryManager.getBufferBlockedFuture().isDone(), "buffer shouldn't be blocked"); // we should be able to add one more page after more memory is available addPage(buffer, page); // the buffer is full now, but setNoBlockOnFull() is called so the buffer shouldn't block addPage(buffer, page); }
public DdlCommandResult execute( final String sql, final DdlCommand ddlCommand, final boolean withQuery, final Set<SourceName> withQuerySources ) { return execute(sql, ddlCommand, withQuery, withQuerySources, false); }
@Test public void shouldThrowOnAlterCAS() { // Given: givenCreateStream(); cmdExec.execute(SQL_TEXT, createStream, true, NO_QUERY_SOURCES); alterSource = new AlterSourceCommand(STREAM_NAME, DataSourceType.KSTREAM.getKsqlType(), NEW_COLUMNS); // When: final KsqlException e = assertThrows(KsqlException.class, () -> cmdExec.execute(SQL_TEXT, alterSource, false, NO_QUERY_SOURCES)); // Then: assertThat(e.getMessage(), is("ALTER command is not supported for CREATE ... AS statements.")); }
boolean hasProjectionMaskApi(JClass definedClass, ClassTemplateSpec templateSpec) { return _hasProjectionMaskCache.computeIfAbsent(definedClass, (jClass) -> { try { final Class<?> clazz = _classLoader.loadClass(jClass.fullName()); return Arrays.stream(clazz.getClasses()).anyMatch( c -> c.getSimpleName().equals(JavaDataTemplateGenerator.PROJECTION_MASK_CLASSNAME)); } catch (ClassNotFoundException e) { // Ignore, and check if the class will be generated from a source PDL } return isGeneratedFromSource(templateSpec); }); }
@Test public void testHasProjectionMaskApiClassFoundWithProjectionMask() throws Exception { ProjectionMaskApiChecker projectionMaskApiChecker = new ProjectionMaskApiChecker( _templateSpecGenerator, _sourceFiles, _classLoader); Mockito.when(_nestedTypeSource.getAbsolutePath()).thenReturn(pegasusDir + FS + "Bar.pdl"); Mockito.when(_nestedType.fullName()).thenReturn(FakeRecordWithProjectionMask.class.getName()); Assert.assertTrue(projectionMaskApiChecker.hasProjectionMaskApi(_nestedType, _templateSpec)); Mockito.verify(_nestedType, Mockito.times(1)).fullName(); Mockito.verify(_nestedTypeSource, Mockito.never()).getAbsolutePath(); // Check caching Assert.assertTrue(projectionMaskApiChecker.hasProjectionMaskApi(_nestedType, _templateSpec)); Mockito.verifyNoMoreInteractions(_nestedType); }
@Override public void write(String key, InputStream data) { checkNotNull(data); try { write(key, data.readAllBytes()); } catch (IOException e) { throw new IllegalStateException("Failed to read sensor write cache data", e); } }
@Test public void dont_write_if_its_pull_request() { byte[] b1 = new byte[] {1, 2, 3}; when(branchConfiguration.isPullRequest()).thenReturn(true); writeCache.write("key1", b1); writeCache.write("key2", new ByteArrayInputStream(b1)); assertThatCacheContains(Map.of()); }
public static byte[] signMessage( final RawPrivateTransaction privateTransaction, final Credentials credentials) { final byte[] encodedTransaction = encode(privateTransaction); final Sign.SignatureData signatureData = Sign.signMessage(encodedTransaction, credentials.getEcKeyPair()); return encode(privateTransaction, signatureData); }
@Test public void testSign1559Transaction() { final String expected = "0x02f8d48207e2800101832dc6c094627306090abab3a6e1400e9345bc60c78a8bef57808001a0c4b5ae238eaa5cb154788d675ff61946e6886bfcc007591042d6a7daf14cbd6fa047f417ac1923e7e6adc77b3384dc1dd3bdf9208e4f1e5436775d56e5f595e249a0035695b4cc4b0941e60551d7a19cf30603db5bfc23e5ac43a56f57f25f75486af842a0035695b4cc4b0941e60551d7a19cf30603db5bfc23e5ac43a56f57f25f75486aa02a8d9b56a0fe9cd94d60be4413bcb721d3a7be27ed8e28b3a6346df874ee141b8a72657374726963746564"; final long chainId = 2018; final RawPrivateTransaction privateTransactionCreation = RawPrivateTransaction.createTransaction( chainId, BigInteger.ZERO, BigInteger.ONE, BigInteger.ONE, BigInteger.valueOf(3000000), "0x627306090abab3a6e1400e9345bc60c78a8bef57", "0x", MOCK_ENCLAVE_KEY, MOCK_PRIVATE_FOR, RESTRICTED); final String privateKey = "8f2a55949038a9610f50fb23b5883af3b4ecb3c3bb792cbcefbd1542c692be63"; final Credentials credentials = Credentials.create(privateKey); final String privateRawTransaction = Numeric.toHexString( PrivateTransactionEncoder.signMessage( privateTransactionCreation, credentials)); assertEquals(expected, privateRawTransaction); }
public static <T> AsList<T> asList() { return new AsList<>(null, false); }
@Test @Category(ValidatesRunner.class) public void testEmptyListSideInput() throws Exception { final PCollectionView<List<Integer>> view = pipeline.apply("CreateEmptyView", Create.empty(VarIntCoder.of())).apply(View.asList()); PCollection<Integer> results = pipeline .apply("Create1", Create.of(1)) .apply( "OutputSideInputs", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement(ProcessContext c) { assertTrue(c.sideInput(view).isEmpty()); assertFalse(c.sideInput(view).iterator().hasNext()); c.output(1); } }) .withSideInputs(view)); // Pass at least one value through to guarantee that DoFn executes. PAssert.that(results).containsInAnyOrder(1); pipeline.run(); }
@Override public Space get() throws BackgroundException { try { final AccountSettingsApi account = new AccountSettingsApi(session.getClient()); final AccountStorage quota = account.accountSettingsGetAccountStorage(); return new Space(quota.getUsed(), quota.getAvailable()); } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Failure to read attributes of {0}", e, new DefaultHomeFinderService(session).find()); } }
@Test public void get() throws Exception { final Quota.Space quota = new StoregateQuotaFeature(session, new StoregateIdProvider(session)).get(); assertNotNull(quota.available); assertNotNull(quota.used); assertNotEquals(0L, quota.available, 0L); assertNotEquals(0L, quota.used, 0L); assertTrue(quota.available < quota.available + quota.used); }
public ProcessContinuation run( PartitionRecord partitionRecord, RestrictionTracker<StreamProgress, StreamProgress> tracker, OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) throws IOException { BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator = new BytesThroughputEstimator<>(sizeEstimator, Instant.now()); // Lock the partition if (tracker.currentRestriction().isEmpty()) { boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord); // Clean up NewPartition on the first run regardless of locking result. If locking fails it // means this partition is being streamed, then cleaning up NewPartitions avoids lingering // NewPartitions. for (NewPartition newPartition : partitionRecord.getParentPartitions()) { metadataTableDao.deleteNewPartition(newPartition); } if (!lockedPartition) { LOG.info( "RCSP {} : Could not acquire lock with uid: {}, because this is a " + "duplicate and another worker is working on this partition already.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } } else if (tracker.currentRestriction().getCloseStream() == null && !metadataTableDao.doHoldLock( partitionRecord.getPartition(), partitionRecord.getUuid())) { // We only verify the lock if we are not holding CloseStream because if this is a retry of // CloseStream we might have already cleaned up the lock in a previous attempt. // Failed correctness check on this worker holds the lock on this partition. This shouldn't // fail because there's a restriction tracker which means this worker has already acquired the // lock and once it has acquired the lock it shouldn't fail the lock check. LOG.warn( "RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and " + "should probably be reviewed.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } // Process CloseStream if it exists CloseStream closeStream = tracker.currentRestriction().getCloseStream(); if (closeStream != null) { LOG.debug("RCSP: Processing CloseStream"); metrics.decPartitionStreamCount(); if (closeStream.getStatus().getCode() == Status.Code.OK) { // We need to update watermark here. We're terminating this stream because we have reached // endTime. Instant.now is greater or equal to endTime. The goal here is // DNP will need to know this stream has passed the endTime so DNP can eventually terminate. Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE); Instant endTime = partitionRecord.getEndTime(); if (endTime != null) { terminatingWatermark = endTime; } watermarkEstimator.setWatermark(terminatingWatermark); metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null); LOG.info( "RCSP {}: Reached end time, terminating...", formatByteStringRange(partitionRecord.getPartition())); return ProcessContinuation.stop(); } if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) { LOG.error( "RCSP {}: Reached unexpected terminal state: {}", formatByteStringRange(partitionRecord.getPartition()), closeStream.getStatus()); return ProcessContinuation.stop(); } // Release the lock only if the uuid matches. In normal operation this doesn't change // anything. However, it's possible for this RCSP to crash while processing CloseStream but // after the side effects of writing the new partitions to the metadata table. New partitions // can be created while this RCSP restarts from the previous checkpoint and processes the // CloseStream again. In certain race scenarios the child partitions may merge back to this // partition, but as a new RCSP. The new partition (same as this partition) would write the // exact same content to the metadata table but with a different uuid. We don't want to // accidentally delete the StreamPartition because it now belongs to the new RCSP. // If the uuid is the same (meaning this race scenario did not take place) we release the lock // and mark the StreamPartition to be deleted, so we can delete it after we have written the // NewPartitions. metadataTableDao.releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); // The partitions in the continuation tokens must cover the same key space as this partition. // If there's only 1 token, then the token's partition is equals to this partition. // If there are more than 1 tokens, then the tokens form a continuous row range equals to this // partition. List<ByteStringRange> childPartitions = new ArrayList<>(); List<ByteStringRange> tokenPartitions = new ArrayList<>(); // Check if NewPartitions field exists, if not we default to using just the // ChangeStreamContinuationTokens. boolean useNewPartitionsField = closeStream.getNewPartitions().size() == closeStream.getChangeStreamContinuationTokens().size(); for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) { ByteStringRange childPartition; if (useNewPartitionsField) { childPartition = closeStream.getNewPartitions().get(i); } else { childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition(); } childPartitions.add(childPartition); ChangeStreamContinuationToken token = getTokenWithCorrectPartition( partitionRecord.getPartition(), closeStream.getChangeStreamContinuationTokens().get(i)); tokenPartitions.add(token.getPartition()); metadataTableDao.writeNewPartition( new NewPartition( childPartition, Collections.singletonList(token), watermarkEstimator.getState())); } LOG.info( "RCSP {}: Split/Merge into {}", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(childPartitions)); if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) { LOG.warn( "RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(tokenPartitions)); } // Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and // merges results back to this partition again) because when we register the "new" partition, // we unset the deletion bit. metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition()); return ProcessContinuation.stop(); } // Update the metadata table with the watermark metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.getState(), tracker.currentRestriction().getCurrentToken()); // Start to stream the partition. ServerStream<ChangeStreamRecord> stream = null; try { stream = changeStreamDao.readChangeStreamPartition( partitionRecord, tracker.currentRestriction(), partitionRecord.getEndTime(), heartbeatDuration); for (ChangeStreamRecord record : stream) { Optional<ProcessContinuation> result = changeStreamAction.run( partitionRecord, record, tracker, receiver, watermarkEstimator, throughputEstimator); // changeStreamAction will usually return Optional.empty() except for when a checkpoint // (either runner or pipeline initiated) is required. if (result.isPresent()) { return result.get(); } } } catch (Exception e) { throw e; } finally { if (stream != null) { stream.cancel(); } } return ProcessContinuation.resume(); }
@Test public void testCloseStreamWritesContinuationTokens() throws IOException { // Force lock fail because CloseStream should not depend on locking when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(false); ChangeStreamContinuationToken tokenAB = ChangeStreamContinuationToken.create(ByteStringRange.create("A", "B"), "AB"); ChangeStreamContinuationToken tokenBC = ChangeStreamContinuationToken.create(ByteStringRange.create("B", "C"), "BC"); CloseStream mockCloseStream = Mockito.mock(CloseStream.class); Status statusProto = Status.newBuilder().setCode(11).build(); Mockito.when(mockCloseStream.getStatus()) .thenReturn(com.google.cloud.bigtable.common.Status.fromProto(statusProto)); Mockito.when(mockCloseStream.getChangeStreamContinuationTokens()) .thenReturn(Arrays.asList(tokenAB, tokenBC)); Mockito.when(mockCloseStream.getNewPartitions()) .thenReturn(Arrays.asList(tokenAB.getPartition(), tokenBC.getPartition())); when(restriction.getCloseStream()).thenReturn(mockCloseStream); when(restriction.isEmpty()).thenReturn(false); final DoFn.ProcessContinuation result = action.run(partitionRecord, tracker, receiver, watermarkEstimator); assertEquals(DoFn.ProcessContinuation.stop(), result); // Should terminate before reaching processing stream partition responses. verify(changeStreamAction, never()).run(any(), any(), any(), any(), any(), any()); // Should not try claim any restriction when processing CloseStream verify(tracker, (never())).tryClaim(any()); // Should decrement the metric on termination. verify(metrics).decPartitionStreamCount(); // Write the new partitions. NewPartition newPartitionAB = new NewPartition( tokenAB.getPartition(), Collections.singletonList(tokenAB), watermarkEstimator.getState()); verify(metadataTableDao).writeNewPartition(newPartitionAB); NewPartition newPartitionBC = new NewPartition( tokenBC.getPartition(), Collections.singletonList(tokenBC), watermarkEstimator.getState()); verify(metadataTableDao).writeNewPartition(newPartitionBC); verify(metadataTableDao, times(1)) .releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); verify(metadataTableDao, times(1)).deleteStreamPartitionRow(partitionRecord.getPartition()); }
@Override public V fetch(final K key, final long time) { return getValueOrNull(inner.fetch(key, time)); }
@Test public void shouldReturnPlainKeyValuePairsOnRangeFetchLongParameters() { when(mockedKeyValueWindowTimestampIterator.next()) .thenReturn(KeyValue.pair( new Windowed<>("key1", new TimeWindow(21L, 22L)), ValueAndTimestamp.make("value1", 22L))) .thenReturn(KeyValue.pair( new Windowed<>("key2", new TimeWindow(42L, 43L)), ValueAndTimestamp.make("value2", 100L))); when(mockedWindowTimestampStore.fetch("key1", "key2", Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L))) .thenReturn(mockedKeyValueWindowTimestampIterator); final KeyValueIterator<Windowed<String>, String> iterator = readOnlyWindowStoreFacade.fetch("key1", "key2", Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L)); assertThat(iterator.next(), is(KeyValue.pair(new Windowed<>("key1", new TimeWindow(21L, 22L)), "value1"))); assertThat(iterator.next(), is(KeyValue.pair(new Windowed<>("key2", new TimeWindow(42L, 43L)), "value2"))); }
public PiPreReplica(PortNumber egressPort, int instanceId) { this.egressPort = checkNotNull(egressPort); this.instanceId = instanceId; }
@Test public void testPiPreReplica() { assertThat("Invalid port", replica1of1.egressPort(), is(port1)); assertThat("Invalid instance ID", replica1of1.instanceId(), is(instanceId1)); assertThat("Invalid port", replica1of2.egressPort(), is(port2)); assertThat("Invalid instance ID", replica1of2.instanceId(), is(instanceId1)); }
static void quoteExternalName(StringBuilder sb, String externalName) { List<String> parts = splitByNonQuotedDots(externalName); for (int i = 0; i < parts.size(); i++) { String unescaped = unescapeQuotes(parts.get(i)); String unquoted = unquoteIfQuoted(unescaped); DIALECT.quoteIdentifier(sb, unquoted); if (i < parts.size() - 1) { sb.append("."); } } }
@Test public void quoteExternalName_with_space() { String externalName = "schema with space.table with space"; StringBuilder sb = new StringBuilder(); MappingHelper.quoteExternalName(sb, externalName); assertThat(sb.toString()).isEqualTo("\"schema with space\".\"table with space\""); }
public static long getImmunityTime(String checkImmunityTimeStr, long transactionTimeout) { long checkImmunityTime = 0; try { checkImmunityTime = Long.parseLong(checkImmunityTimeStr) * 1000; } catch (Throwable ignored) { } //If a custom first check time is set, the minimum check time; //The default check protection period is transactionTimeout if (checkImmunityTime < transactionTimeout) { checkImmunityTime = transactionTimeout; } return checkImmunityTime; }
@Test public void testGetImmunityTime() { long transactionTimeout = 6 * 1000; String checkImmunityTimeStr = "1"; long immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(6 * 1000, immunityTime); checkImmunityTimeStr = "5"; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(6 * 1000, immunityTime); checkImmunityTimeStr = "7"; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(7 * 1000, immunityTime); checkImmunityTimeStr = null; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(6 * 1000, immunityTime); checkImmunityTimeStr = "-1"; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(6 * 1000, immunityTime); checkImmunityTimeStr = "60"; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(60 * 1000, immunityTime); checkImmunityTimeStr = "100"; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(100 * 1000, immunityTime); checkImmunityTimeStr = "100.5"; immunityTime = TransactionalMessageUtil.getImmunityTime(checkImmunityTimeStr, transactionTimeout); Assert.assertEquals(6 * 1000, immunityTime); }
public FloatArrayAsIterable usingExactEquality() { return new FloatArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_containsAtLeast_primitiveFloatArray_failure() { expectFailureWhenTestingThat(array(1.1f, 2.2f, 3.3f)) .usingExactEquality() .containsAtLeast(array(2.2f, 99.99f)); assertFailureKeys( "value of", "missing (1)", "---", "expected to contain at least", "testing whether", "but was"); assertFailureValue("missing (1)", Float.toString(99.99f)); }
public PageListResponse<IndexSetFieldTypeSummary> getIndexSetFieldTypeSummary(final Set<String> streamIds, final String fieldName, final Predicate<String> indexSetPermissionPredicate) { return getIndexSetFieldTypeSummary(streamIds, fieldName, indexSetPermissionPredicate, 1, 50, DEFAULT_SORT.id(), DEFAULT_SORT.direction()); }
@Test void testDoesNotReturnResultsForIndexSetsIfUserMissesPriviledges() { Predicate<String> indexSetPermissionPredicateAlwaysReturningFalse = x -> false; doReturn(Set.of("index_set_id")).when(streamService).indexSetIdsByIds(Set.of("stream_id")); final PageListResponse<IndexSetFieldTypeSummary> summary = toTest.getIndexSetFieldTypeSummary(Set.of("stream_id"), "field_name", indexSetPermissionPredicateAlwaysReturningFalse); assertThat(summary.elements()).isEmpty(); verifyNoInteractions(indexFieldTypesService); verifyNoMoreInteractions(streamService); verifyNoInteractions(indexSetService); }
@Override public void accept(Props props) { if (isClusterEnabled(props)) { checkClusterProperties(props); } }
@Test @UseDataProvider("validIPv4andIPv6Addresses") public void accept_throws_MessageException_if_internal_property_for_startup_leader_is_configured(String host) { TestAppSettings settings = newSettingsForAppNode(host, of("sonar.cluster.web.startupLeader", "true")); ClusterSettings clusterSettings = new ClusterSettings(network); Props props = settings.getProps(); assertThatThrownBy(() -> clusterSettings.accept(props)) .isInstanceOf(MessageException.class) .hasMessage("Property [sonar.cluster.web.startupLeader] is forbidden"); }
public String getEffectiveMainBranchName() { return configuration.get(SONAR_PROJECTCREATION_MAINBRANCHNAME).orElse(DEFAULT_MAIN_BRANCH_NAME); }
@Test public void getEffectiveMainBranchName_givenDevelopInConfiguration_returnDevelop() { Configuration config = mock(Configuration.class); when(config.get(SONAR_PROJECTCREATION_MAINBRANCHNAME)).thenReturn(Optional.of("develop")); DefaultBranchNameResolver defaultBranchNameResolver = new DefaultBranchNameResolver(config); String effectiveMainBranchName = defaultBranchNameResolver.getEffectiveMainBranchName(); assertThat(effectiveMainBranchName).isEqualTo("develop"); }
public abstract T_Sess loadRawSession(OmemoDevice userDevice, OmemoDevice contactsDevice) throws IOException;
@Test public void loadNonExistentRawSessionReturnsNullTest() throws IOException { T_Sess session = store.loadRawSession(alice, bob); assertNull(session); }
@Override @DSTransactional // 多数据源,使用 @DSTransactional 保证本地事务,以及数据源的切换 public Long createTenant(TenantSaveReqVO createReqVO) { // 校验租户名称是否重复 validTenantNameDuplicate(createReqVO.getName(), null); // 校验租户域名是否重复 validTenantWebsiteDuplicate(createReqVO.getWebsite(), null); // 校验套餐被禁用 TenantPackageDO tenantPackage = tenantPackageService.validTenantPackage(createReqVO.getPackageId()); // 创建租户 TenantDO tenant = BeanUtils.toBean(createReqVO, TenantDO.class); tenantMapper.insert(tenant); // 创建租户的管理员 TenantUtils.execute(tenant.getId(), () -> { // 创建角色 Long roleId = createRole(tenantPackage); // 创建用户,并分配角色 Long userId = createUser(roleId, createReqVO); // 修改租户的管理员 tenantMapper.updateById(new TenantDO().setId(tenant.getId()).setContactUserId(userId)); }); return tenant.getId(); }
@Test public void testCreateTenant() { // mock 套餐 100L TenantPackageDO tenantPackage = randomPojo(TenantPackageDO.class, o -> o.setId(100L)); when(tenantPackageService.validTenantPackage(eq(100L))).thenReturn(tenantPackage); // mock 角色 200L when(roleService.createRole(argThat(role -> { assertEquals(RoleCodeEnum.TENANT_ADMIN.getName(), role.getName()); assertEquals(RoleCodeEnum.TENANT_ADMIN.getCode(), role.getCode()); assertEquals(0, role.getSort()); assertEquals("系统自动生成", role.getRemark()); return true; }), eq(RoleTypeEnum.SYSTEM.getType()))).thenReturn(200L); // mock 用户 300L when(userService.createUser(argThat(user -> { assertEquals("yunai", user.getUsername()); assertEquals("yuanma", user.getPassword()); assertEquals("芋道", user.getNickname()); assertEquals("15601691300", user.getMobile()); return true; }))).thenReturn(300L); // 准备参数 TenantSaveReqVO reqVO = randomPojo(TenantSaveReqVO.class, o -> { o.setContactName("芋道"); o.setContactMobile("15601691300"); o.setPackageId(100L); o.setStatus(randomCommonStatus()); o.setWebsite("https://www.iocoder.cn"); o.setUsername("yunai"); o.setPassword("yuanma"); }).setId(null); // 设置为 null,方便后面校验 // 调用 Long tenantId = tenantService.createTenant(reqVO); // 断言 assertNotNull(tenantId); // 校验记录的属性是否正确 TenantDO tenant = tenantMapper.selectById(tenantId); assertPojoEquals(reqVO, tenant, "id"); assertEquals(300L, tenant.getContactUserId()); // verify 分配权限 verify(permissionService).assignRoleMenu(eq(200L), same(tenantPackage.getMenuIds())); // verify 分配角色 verify(permissionService).assignUserRole(eq(300L), eq(singleton(200L))); }
public UiTopoLayout offsetY(double offsetY) { this.offsetY = offsetY; return this; }
@Test public void setYOff() { mkOtherLayout(); layout.offsetY(2.71828); assertEquals("wrong y-offset", 2.71828, layout.offsetY(), DELTA); }
public static NetworkInterface[] filterBySubnet(final InetAddress address, final int subnetPrefix) throws SocketException { return filterBySubnet(NetworkInterfaceShim.DEFAULT, address, subnetPrefix); }
@Test void shouldFilterBySubnetAndFindMultipleIpV6ResultsOrderedByMatchLength() throws Exception { final NetworkInterfaceStub stub = new NetworkInterfaceStub(); stub.add("ee80:0:0:0001:0:0:0:1/64"); final NetworkInterface ifc1 = stub.add("fe80:0:0:0:0:0:0:1/16"); final NetworkInterface ifc2 = stub.add("fe80:0001:0:0:0:0:0:1/32"); final NetworkInterface ifc3 = stub.add("fe80:0001:abcd:0:0:0:0:1/48"); final NetworkInterface[] filteredBySubnet = filterBySubnet(stub, getByName("fe80:0:0:0:0:0:0:0"), 16); assertEquals(3, filteredBySubnet.length); assertThat(filteredBySubnet[0], sameInstance(ifc3)); assertThat(filteredBySubnet[1], sameInstance(ifc2)); assertThat(filteredBySubnet[2], sameInstance(ifc1)); }
public List<String> generate(String tableName, String columnName, boolean isAutoGenerated) throws SQLException { return generate(tableName, singleton(columnName), isAutoGenerated); }
@Test public void generate_for_postgres_sql_no_seq() throws SQLException { when(dbConstraintFinder.findConstraintName(TABLE_NAME)).thenReturn(Optional.of(CONSTRAINT)); when(dbConstraintFinder.getPostgresSqlSequence(TABLE_NAME, "id")).thenReturn(null); when(db.getDialect()).thenReturn(POSTGRESQL); List<String> sqls = underTest.generate(TABLE_NAME, PK_COLUMN, true); assertThat(sqls).containsExactly("ALTER TABLE issues ALTER COLUMN id DROP DEFAULT", "ALTER TABLE issues DROP CONSTRAINT pk_id"); }
public void fillMaxSpeed(Graph graph, EncodingManager em) { // In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info, // but now we have and can fill the country-dependent max_speed value where missing. EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class); fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL); }
@Test public void testDifferentStates() { ReaderWay way = new ReaderWay(0L); way.setTag("country", Country.USA); way.setTag("highway", "primary"); way.setTag("country_state", State.US_CA); EdgeIteratorState edge1 = createEdge(way); way.setTag("country_state", State.US_FL); EdgeIteratorState edge2 = createEdge(way); calc.fillMaxSpeed(graph, em); assertEquals(106, edge1.get(maxSpeedEnc)); assertEquals(90, edge2.get(maxSpeedEnc)); }
public boolean is32BitsEnough() { switch (bitmapType) { case EMPTY: return true; case SINGLE_VALUE: return isLongValue32bitEnough(singleValue); case BITMAP_VALUE: return bitmap.is32BitsEnough(); case SET_VALUE: { for (Long v : set) { if (!isLongValue32bitEnough(v)) { return false; } } return true; } default: return false; } }
@Test public void testIs32BitsEnough() { BitmapValue bitmapValue = new BitmapValue(); bitmapValue.add(0); bitmapValue.add(2); bitmapValue.add(Integer.MAX_VALUE); // unsigned 32-bit long unsigned32bit = Integer.MAX_VALUE; bitmapValue.add(unsigned32bit + 1); Assert.assertTrue(bitmapValue.is32BitsEnough()); bitmapValue.add(Long.MAX_VALUE); Assert.assertFalse(bitmapValue.is32BitsEnough()); }
@Override protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName) throws Exception { Message in = exchange.getIn(); MetricsTimerAction action = endpoint.getAction(); MetricsTimerAction finalAction = in.getHeader(HEADER_TIMER_ACTION, action, MetricsTimerAction.class); if (finalAction == MetricsTimerAction.start) { handleStart(exchange, registry, metricsName); } else if (finalAction == MetricsTimerAction.stop) { handleStop(exchange, metricsName); } else { LOG.warn("No action provided for timer \"{}\"", metricsName); } }
@Test public void testProcessStart() throws Exception { when(endpoint.getAction()).thenReturn(MetricsTimerAction.start); when(in.getHeader(HEADER_TIMER_ACTION, MetricsTimerAction.start, MetricsTimerAction.class)) .thenReturn(MetricsTimerAction.start); when(exchange.getProperty(PROPERTY_NAME, Timer.Context.class)).thenReturn(null); producer.doProcess(exchange, endpoint, registry, METRICS_NAME); inOrder.verify(exchange, times(1)).getIn(); inOrder.verify(endpoint, times(1)).getAction(); inOrder.verify(in, times(1)).getHeader(HEADER_TIMER_ACTION, MetricsTimerAction.start, MetricsTimerAction.class); inOrder.verify(exchange, times(1)).getProperty(PROPERTY_NAME, Timer.Context.class); inOrder.verify(registry, times(1)).timer(METRICS_NAME); inOrder.verify(timer, times(1)).time(); inOrder.verify(exchange, times(1)).setProperty(PROPERTY_NAME, context); inOrder.verifyNoMoreInteractions(); }