focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public final void writeByte(final int v) throws IOException { write(v); }
@Test public void testWriteByteForPositionV() throws Exception { out.writeByte(0, 10); assertEquals(10, out.buffer[0]); }
@Override public boolean skip(final ServerWebExchange exchange) { return skipExceptHttpLike(exchange); }
@Test public void testSkip() { ServerWebExchange exchangeNormal = generateServerWebExchange(); assertTrue(webClientPlugin.skip(exchangeNormal)); ServerWebExchange exchangeHttp = generateServerWebExchange(); when(((ShenyuContext) exchangeHttp.getAttributes().get(Constants.CONTEXT)).getRpcType()) .thenReturn(RpcTypeEnum.HTTP.getName()); assertFalse(webClientPlugin.skip(exchangeHttp)); ServerWebExchange exchangeSpringCloud = generateServerWebExchange(); when(((ShenyuContext) exchangeSpringCloud.getAttributes().get(Constants.CONTEXT)).getRpcType()) .thenReturn(RpcTypeEnum.SPRING_CLOUD.getName()); assertFalse(webClientPlugin.skip(exchangeSpringCloud)); }
public static Type convertType(TypeInfo typeInfo) { switch (typeInfo.getOdpsType()) { case BIGINT: return Type.BIGINT; case INT: return Type.INT; case SMALLINT: return Type.SMALLINT; case TINYINT: return Type.TINYINT; case FLOAT: return Type.FLOAT; case DECIMAL: DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; return ScalarType.createUnifiedDecimalType(decimalTypeInfo.getPrecision(), decimalTypeInfo.getScale()); case DOUBLE: return Type.DOUBLE; case CHAR: CharTypeInfo charTypeInfo = (CharTypeInfo) typeInfo; return ScalarType.createCharType(charTypeInfo.getLength()); case VARCHAR: VarcharTypeInfo varcharTypeInfo = (VarcharTypeInfo) typeInfo; return ScalarType.createVarcharType(varcharTypeInfo.getLength()); case STRING: case JSON: return ScalarType.createDefaultCatalogString(); case BINARY: return Type.VARBINARY; case BOOLEAN: return Type.BOOLEAN; case DATE: return Type.DATE; case TIMESTAMP: case DATETIME: return Type.DATETIME; case MAP: MapTypeInfo mapTypeInfo = (MapTypeInfo) typeInfo; return new MapType(convertType(mapTypeInfo.getKeyTypeInfo()), convertType(mapTypeInfo.getValueTypeInfo())); case ARRAY: ArrayTypeInfo arrayTypeInfo = (ArrayTypeInfo) typeInfo; return new ArrayType(convertType(arrayTypeInfo.getElementTypeInfo())); case STRUCT: StructTypeInfo structTypeInfo = (StructTypeInfo) typeInfo; List<Type> fieldTypeList = structTypeInfo.getFieldTypeInfos().stream().map(EntityConvertUtils::convertType) .collect(Collectors.toList()); return new StructType(fieldTypeList); default: return Type.VARCHAR; } }
@Test public void testConvertTypeCaseArray() { TypeInfo elementTypeInfo = TypeInfoFactory.INT; ArrayTypeInfo arrayTypeInfo = TypeInfoFactory.getArrayTypeInfo(elementTypeInfo); Type result = EntityConvertUtils.convertType(arrayTypeInfo); Type expectedType = new ArrayType(Type.INT); assertEquals(expectedType, result); }
@Override public void incIteration() { throw new UnsupportedOperationException(); }
@Test public void testIncIteration() { assertThrowsUnsupportedOperation(unmodifiables::incIteration); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void kGroupedStreamZeroArgCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.stream("input-topic") .groupByKey() .count(); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000002\n" + " Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true)); }
public static AvroGenericCoder of(Schema schema) { return AvroGenericCoder.of(schema); }
@Test public void testDeterminismHasGenericRecord() { assertDeterministic(AvroCoder.of(HasGenericRecord.class)); }
public abstract byte[] encode(MutableSpan input);
@Test void span_minimum_JSON_V2() { MutableSpan span = new MutableSpan(); span.traceId("7180c278b62e8f6a216a2aea45d08fc9"); span.id("5b4185666d50f68b"); assertThat(new String(encoder.encode(span), UTF_8)) .isEqualTo( "{\"traceId\":\"7180c278b62e8f6a216a2aea45d08fc9\",\"id\":\"5b4185666d50f68b\"}"); }
ClassicGroup getOrMaybeCreateClassicGroup( String groupId, boolean createIfNotExists ) throws GroupIdNotFoundException { Group group = groups.get(groupId); if (group == null && !createIfNotExists) { throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId)); } if (group == null) { ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics); groups.put(groupId, classicGroup); metrics.onClassicGroupStateTransition(null, classicGroup.currentState()); return classicGroup; } else { if (group.type() == CLASSIC) { return (ClassicGroup) group; } else { // We don't support upgrading/downgrading between protocols at the moment so // we throw an exception if a group exists with the wrong type. throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.", groupId)); } } }
@Test public void testStaticMemberFenceDuplicateSyncingFollowerAfterMemberIdChanged() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); GroupMetadataManagerTestContext.RebalanceResult rebalanceResult = context.staticMembersJoinAndRebalance( "group-id", "leader-instance-id", "follower-instance-id" ); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); // Known leader rejoins will trigger rebalance. JoinGroupRequestData request = new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withGroupInstanceId("leader-instance-id") .withMemberId(rebalanceResult.leaderId) .withProtocolSuperset() .withRebalanceTimeoutMs(10000) .build(); GroupMetadataManagerTestContext.JoinResult leaderJoinResult = context.sendClassicGroupJoin(request); assertTrue(leaderJoinResult.records.isEmpty()); assertFalse(leaderJoinResult.joinFuture.isDone()); assertTrue(group.isInState(PREPARING_REBALANCE)); // Old follower rejoins group will match current member.id. GroupMetadataManagerTestContext.JoinResult oldFollowerJoinResult = context.sendClassicGroupJoin( request .setMemberId(rebalanceResult.followerId) .setGroupInstanceId("follower-instance-id") ); assertTrue(oldFollowerJoinResult.records.isEmpty()); assertTrue(oldFollowerJoinResult.joinFuture.isDone()); assertTrue(leaderJoinResult.joinFuture.isDone()); JoinGroupResponseData expectedLeaderResponse = new JoinGroupResponseData() .setErrorCode(Errors.NONE.code()) .setGenerationId(rebalanceResult.generationId + 1) .setMemberId(rebalanceResult.leaderId) .setLeader(rebalanceResult.leaderId) .setProtocolName("range") .setProtocolType("consumer") .setMembers(toJoinResponseMembers(group)); checkJoinGroupResponse( expectedLeaderResponse, leaderJoinResult.joinFuture.get(), group, COMPLETING_REBALANCE, mkSet("leader-instance-id", "follower-instance-id") ); assertEquals(rebalanceResult.leaderId, leaderJoinResult.joinFuture.get().memberId()); assertEquals(rebalanceResult.leaderId, leaderJoinResult.joinFuture.get().leader()); // Old follower should get a successful join group response. assertTrue(oldFollowerJoinResult.joinFuture.isDone()); JoinGroupResponseData expectedFollowerResponse = new JoinGroupResponseData() .setErrorCode(Errors.NONE.code()) .setGenerationId(rebalanceResult.generationId + 1) .setMemberId(oldFollowerJoinResult.joinFuture.get().memberId()) .setLeader(rebalanceResult.leaderId) .setProtocolName("range") .setProtocolType("consumer"); checkJoinGroupResponse( expectedFollowerResponse, oldFollowerJoinResult.joinFuture.get(), group, COMPLETING_REBALANCE, Collections.emptySet() ); assertTrue(group.isInState(COMPLETING_REBALANCE)); assertEquals(rebalanceResult.followerId, oldFollowerJoinResult.joinFuture.get().memberId()); assertEquals(rebalanceResult.leaderId, oldFollowerJoinResult.joinFuture.get().leader()); // Duplicate follower joins group with unknown member id will trigger member.id replacement, // and will also trigger a rebalance under CompletingRebalance state; the old follower sync callback // will return fenced exception while broker replaces the member identity with the duplicate follower joins. GroupMetadataManagerTestContext.SyncResult oldFollowerSyncResult = context.sendClassicGroupSync( new GroupMetadataManagerTestContext.SyncGroupRequestBuilder() .withGroupId("group-id") .withGroupInstanceId("follower-instance-id") .withGenerationId(oldFollowerJoinResult.joinFuture.get().generationId()) .withMemberId(oldFollowerJoinResult.joinFuture.get().memberId()) .build() ); assertTrue(oldFollowerSyncResult.records.isEmpty()); assertFalse(oldFollowerSyncResult.syncFuture.isDone()); GroupMetadataManagerTestContext.JoinResult duplicateFollowerJoinResult = context.sendClassicGroupJoin( request .setMemberId(UNKNOWN_MEMBER_ID) .setGroupInstanceId("follower-instance-id") ); assertTrue(duplicateFollowerJoinResult.records.isEmpty()); assertTrue(group.isInState(PREPARING_REBALANCE)); assertFalse(duplicateFollowerJoinResult.joinFuture.isDone()); assertTrue(oldFollowerSyncResult.syncFuture.isDone()); assertEquals(Errors.FENCED_INSTANCE_ID.code(), oldFollowerSyncResult.syncFuture.get().errorCode()); // Advance clock by rebalance timeout so that the join phase completes with duplicate follower. // Both heartbeats will expire but only the leader is kicked out. List<ExpiredTimeout<Void, CoordinatorRecord>> timeouts = context.sleep(10000); assertEquals(2, timeouts.size()); timeouts.forEach(timeout -> assertEquals(timeout.result, EMPTY_RESULT)); assertTrue(duplicateFollowerJoinResult.joinFuture.isDone()); assertTrue(group.isInState(COMPLETING_REBALANCE)); assertEquals(3, group.generationId()); assertEquals(1, group.numMembers()); assertTrue(group.hasMember(duplicateFollowerJoinResult.joinFuture.get().memberId())); assertEquals(duplicateFollowerJoinResult.joinFuture.get().memberId(), duplicateFollowerJoinResult.joinFuture.get().leader()); }
public void edge(T from, T to) { if (from == null || to == null) throw new IllegalArgumentException("Null vertices are not allowed, edge: " + from + "->" + to); adjMap.computeIfAbsent(from, k -> new LinkedHashSet<>()).add(to); adjMap.computeIfAbsent(to, k -> new LinkedHashSet<>()); }
@Test void null_vertices_are_not_allowed() { var graph = new Graph<Vertices>(); try { graph.edge(A, null); fail(); } catch (IllegalArgumentException e) { assertEquals("Null vertices are not allowed, edge: A->null", e.getMessage()); } }
@Override public List<Container> allocateContainers(ResourceBlacklistRequest blackList, List<ResourceRequest> oppResourceReqs, ApplicationAttemptId applicationAttemptId, OpportunisticContainerContext opportContext, long rmIdentifier, String appSubmitter) throws YarnException { // Update black list. updateBlacklist(blackList, opportContext); // Add OPPORTUNISTIC requests to the outstanding ones. opportContext.addToOutstandingReqs(oppResourceReqs); Set<String> nodeBlackList = new HashSet<>(opportContext.getBlacklist()); Set<String> allocatedNodes = new HashSet<>(); List<Container> allocatedContainers = new ArrayList<>(); // Satisfy the outstanding OPPORTUNISTIC requests. boolean continueLoop = true; while (continueLoop) { continueLoop = false; List<Map<Resource, List<Allocation>>> allocations = new ArrayList<>(); for (SchedulerRequestKey schedulerKey : opportContext.getOutstandingOpReqs().descendingKeySet()) { // Allocated containers : // Key = Requested Capability, // Value = List of Containers of given cap (the actual container size // might be different than what is requested, which is why // we need the requested capability (key) to match against // the outstanding reqs) int remAllocs = -1; int maxAllocationsPerAMHeartbeat = getMaxAllocationsPerAMHeartbeat(); if (maxAllocationsPerAMHeartbeat > 0) { remAllocs = maxAllocationsPerAMHeartbeat - allocatedContainers.size() - getTotalAllocations(allocations); if (remAllocs <= 0) { LOG.info("Not allocating more containers as we have reached max " + "allocations per AM heartbeat {}", maxAllocationsPerAMHeartbeat); break; } } Map<Resource, List<Allocation>> allocation = allocate( rmIdentifier, opportContext, schedulerKey, applicationAttemptId, appSubmitter, nodeBlackList, allocatedNodes, remAllocs); if (allocation.size() > 0) { allocations.add(allocation); continueLoop = true; } } matchAllocation(allocations, allocatedContainers, opportContext); } return allocatedContainers; }
@Test public void testNodeLocalAllocation() throws Exception { ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance( new ArrayList<>(), new ArrayList<>()); List<ResourceRequest> reqs = Arrays.asList( ResourceRequest.newBuilder().allocationRequestId(1) .priority(PRIORITY_NORMAL) .resourceName(ResourceRequest.ANY) .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build(), ResourceRequest.newBuilder().allocationRequestId(2) .priority(PRIORITY_NORMAL) .resourceName("/r1") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build(), ResourceRequest.newBuilder().allocationRequestId(2) .priority(PRIORITY_NORMAL) .resourceName("h1") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build(), ResourceRequest.newBuilder().allocationRequestId(2) .priority(PRIORITY_NORMAL) .resourceName(ResourceRequest.ANY) .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build(), ResourceRequest.newBuilder().allocationRequestId(3) .priority(PRIORITY_NORMAL) .resourceName("/r1") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build(), ResourceRequest.newBuilder().allocationRequestId(3) .priority(PRIORITY_NORMAL) .resourceName("h1") .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build(), ResourceRequest.newBuilder().allocationRequestId(3) .priority(PRIORITY_NORMAL) .resourceName(ResourceRequest.ANY) .capability(CAPABILITY_1GB) .relaxLocality(true) .executionType(ExecutionType.OPPORTUNISTIC).build()); ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(0L, 1), 1); oppCntxt.updateNodeList( Arrays.asList( RemoteNode.newInstance( NodeId.newInstance("h1", 1234), "h1:1234", "/r1"), RemoteNode.newInstance( NodeId.newInstance("h2", 1234), "h2:1234", "/r1"), RemoteNode.newInstance( NodeId.newInstance("h3", 1234), "h3:1234", "/r1"))); List<Container> containers = allocator.allocateContainers( blacklistRequest, reqs, appAttId, oppCntxt, 1L, "luser"); LOG.info("Containers: {}", containers); // all 3 containers should be allocated. Assert.assertEquals(3, containers.size()); // container with allocation id 2 and 3 should be allocated on node h1 for (Container c : containers) { if (c.getAllocationRequestId() == 2 || c.getAllocationRequestId() == 3) { Assert.assertEquals("h1:1234", c.getNodeHttpAddress()); } } }
static List<ClassLoader> selectClassLoaders(ClassLoader classLoader) { // list prevents reordering! List<ClassLoader> classLoaders = new ArrayList<>(); if (classLoader != null) { classLoaders.add(classLoader); } // check if TCCL is same as given classLoader ClassLoader tccl = Thread.currentThread().getContextClassLoader(); if (tccl != null && tccl != classLoader) { classLoaders.add(tccl); } // Hazelcast core classLoader ClassLoader coreClassLoader = ServiceLoader.class.getClassLoader(); if (coreClassLoader != classLoader && coreClassLoader != tccl) { classLoaders.add(coreClassLoader); } // Hazelcast client classLoader try { Class<?> hzClientClass = Class.forName("com.hazelcast.client.HazelcastClient"); ClassLoader clientClassLoader = hzClientClass.getClassLoader(); if (clientClassLoader != classLoader && clientClassLoader != tccl && clientClassLoader != coreClassLoader) { classLoaders.add(clientClassLoader); } } catch (ClassNotFoundException ignore) { // ignore since we may not have the HazelcastClient in the classpath ignore(ignore); } return classLoaders; }
@Test public void selectingSimpleDifferentThreadContextClassLoader() { Thread currentThread = Thread.currentThread(); ClassLoader tccl = currentThread.getContextClassLoader(); currentThread.setContextClassLoader(new URLClassLoader(new URL[0])); List<ClassLoader> classLoaders = ServiceLoader.selectClassLoaders(null); currentThread.setContextClassLoader(tccl); assertEquals(2, classLoaders.size()); }
public void execute() { Profiler stepProfiler = Profiler.create(LOGGER).logTimeLast(true); boolean allStepsExecuted = false; try { executeSteps(stepProfiler); allStepsExecuted = true; } finally { if (listener != null) { executeListener(allStepsExecuted); } } }
@Test public void execute_throws_IAE_if_step_adds_statistic_multiple_times() { ComputationStep step = new StepWithStatistics("A Step", "foo", "100", "foo", "20"); try (ChangeLogLevel executor = new ChangeLogLevel(ComputationStepExecutor.class, LoggerLevel.INFO)) { assertThatThrownBy(() -> new ComputationStepExecutor(mockComputationSteps(step), taskInterrupter).execute()) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Statistic with key [foo] is already present"); } }
public static boolean shouldEnablePushdownForTable(ConnectorSession session, Table table, String path, Optional<Partition> optionalPartition) { if (!isS3SelectPushdownEnabled(session)) { return false; } if (path == null) { return false; } // Hive table partitions could be on different storages, // as a result, we have to check each individual optionalPartition Properties schema = optionalPartition .map(partition -> getHiveSchema(partition, table)) .orElseGet(() -> getHiveSchema(table)); return shouldEnablePushdownForTable(table, path, schema); }
@Test public void testShouldNotEnableSelectPushdownWhenColumnTypesAreNotSupported() { Column newColumn = new Column("column", HIVE_BINARY, Optional.empty(), Optional.empty()); Table newTable = new Table( "db", "table", "owner", EXTERNAL_TABLE, storage, singletonList(newColumn), emptyList(), emptyMap(), Optional.empty(), Optional.empty()); assertFalse(shouldEnablePushdownForTable(session, newTable, "s3://fakeBucket/fakeObject", Optional.empty())); Partition newPartition = new Partition("db", "table", emptyList(), storage, singletonList(column), emptyMap(), Optional.empty(), false, false, 1234, 4567L, Optional.empty()); assertFalse(shouldEnablePushdownForTable(session, newTable, "s3://fakeBucket/fakeObject", Optional.of(newPartition))); }
public DoubleArrayAsIterable usingExactEquality() { return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_containsAtLeast_primitiveDoubleArray_success() { assertThat(array(1.1, 2.2, 3.3)).usingExactEquality().containsAtLeast(array(2.2, 1.1)); }
@Override @CacheEvict(cacheNames = RedisKeyConstants.NOTIFY_TEMPLATE, allEntries = true) // allEntries 清空所有缓存,因为 id 不是直接的缓存 code,不好清理 public void deleteNotifyTemplate(Long id) { // 校验存在 validateNotifyTemplateExists(id); // 删除 notifyTemplateMapper.deleteById(id); }
@Test public void testDeleteNotifyTemplate_success() { // mock 数据 NotifyTemplateDO dbNotifyTemplate = randomPojo(NotifyTemplateDO.class); notifyTemplateMapper.insert(dbNotifyTemplate);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbNotifyTemplate.getId(); // 调用 notifyTemplateService.deleteNotifyTemplate(id); // 校验数据不存在了 assertNull(notifyTemplateMapper.selectById(id)); }
static Object parseCell(String cell, Schema.Field field) { Schema.FieldType fieldType = field.getType(); try { switch (fieldType.getTypeName()) { case STRING: return cell; case INT16: return Short.parseShort(cell); case INT32: return Integer.parseInt(cell); case INT64: return Long.parseLong(cell); case BOOLEAN: return Boolean.parseBoolean(cell); case BYTE: return Byte.parseByte(cell); case DECIMAL: return new BigDecimal(cell); case DOUBLE: return Double.parseDouble(cell); case FLOAT: return Float.parseFloat(cell); case DATETIME: return Instant.parse(cell); default: throw new UnsupportedOperationException( "Unsupported type: " + fieldType + ", consider using withCustomRecordParsing"); } } catch (IllegalArgumentException e) { throw new IllegalArgumentException( e.getMessage() + " field " + field.getName() + " was received -- type mismatch"); } }
@Test public void givenCellSchemaFieldMismatch_throws() { String boolTrue = "true"; Schema schema = Schema.builder().addBooleanField("a_boolean").addFloatField("a_float").build(); IllegalArgumentException e = assertThrows( IllegalArgumentException.class, () -> CsvIOParseHelpers.parseCell(boolTrue, schema.getField("a_float"))); assertEquals( "For input string: \"" + boolTrue + "\" field a_float was received -- type mismatch", e.getMessage()); }
@Override public ConfigOperateResult insertOrUpdateCas(String srcIp, String srcUser, ConfigInfo configInfo, Map<String, Object> configAdvanceInfo) { try { ConfigInfoStateWrapper configInfoState = findConfigInfoState(configInfo.getDataId(), configInfo.getGroup(), configInfo.getTenant()); if (configInfoState == null) { return addConfigInfo(srcIp, srcUser, configInfo, configAdvanceInfo); } else { return updateConfigInfoCas(configInfo, srcIp, srcUser, configAdvanceInfo); } } catch (Exception exception) { LogUtil.FATAL_LOG.error("[db-error] try to update or add config failed, {}", exception.getMessage(), exception); throw exception; } }
@Test void testInsertOrUpdateCasOfInsertConfigSuccess() { Map<String, Object> configAdvanceInfo = new HashMap<>(); configAdvanceInfo.put("config_tags", "tag1,tag2"); String dataId = "dataId"; String group = "group"; String tenant = "tenant"; String appName = "appName"; String content = "content132456"; ConfigInfo configInfo = new ConfigInfo(dataId, group, tenant, appName, content); long insertConfigIndoId = 12345678765L; GeneratedKeyHolder generatedKeyHolder = TestCaseUtils.createGeneratedKeyHolder(insertConfigIndoId); externalStorageUtilsMockedStatic.when(ExternalStorageUtils::createKeyHolder).thenReturn(generatedKeyHolder); //mock get config state Mockito.when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(null, new ConfigInfoStateWrapper()); //mock insert config info Mockito.when(jdbcTemplate.update(any(PreparedStatementCreator.class), eq(generatedKeyHolder))).thenReturn(1); Mockito.when(jdbcTemplate.update(eq(externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag1"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant))).thenReturn(1); Mockito.when(jdbcTemplate.update(eq(externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag2"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant))).thenReturn(1); String srcIp = "srcIp"; String srcUser = "srcUser"; //mock insert config info Mockito.doNothing().when(historyConfigInfoPersistService) .insertConfigHistoryAtomic(eq(0), eq(configInfo), eq(srcIp), eq(srcUser), any(Timestamp.class), eq("I")); externalConfigInfoPersistService.insertOrUpdateCas(srcIp, srcUser, configInfo, configAdvanceInfo); //expect insert config info Mockito.verify(jdbcTemplate, times(1)).update(any(PreparedStatementCreator.class), eq(generatedKeyHolder)); //expect insert config tags Mockito.verify(jdbcTemplate, times(1)).update(eq( externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag1"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant)); Mockito.verify(jdbcTemplate, times(1)).update(eq( externalConfigInfoPersistService.mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_TAGS_RELATION) .insert(Arrays.asList("id", "tag_name", "tag_type", "data_id", "group_id", "tenant_id"))), eq(insertConfigIndoId), eq("tag2"), eq(StringUtils.EMPTY), eq(dataId), eq(group), eq(tenant)); //expect insert history info Mockito.verify(historyConfigInfoPersistService, times(1)) .insertConfigHistoryAtomic(eq(0L), eq(configInfo), eq(srcIp), eq(srcUser), any(Timestamp.class), eq("I")); }
public static Builder builder() { return new Builder(); }
@TestTemplate public void rewriteWithDuplicateFiles() { assertThat(listManifestFiles()).isEmpty(); table.newAppend().appendFile(FILE_A2).appendFile(FILE_A2).appendFile(FILE_A2).commit(); table .newRewrite() .deleteFile(FILE_A2) .deleteFile(DataFiles.builder(SPEC).copy(FILE_A2).build()) .deleteFile(FILE_A2) .addFile(FILE_A) .addFile(DataFiles.builder(SPEC).copy(FILE_A).build()) .addFile(FILE_A) .commit(); assertThat(table.currentSnapshot().summary()) .hasSize(14) .containsEntry(SnapshotSummary.ADDED_FILES_PROP, "1") .containsEntry(SnapshotSummary.ADDED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.ADDED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "1") .containsEntry(SnapshotSummary.DELETED_FILES_PROP, "1") .containsEntry(SnapshotSummary.DELETED_RECORDS_PROP, "1") .containsEntry(SnapshotSummary.REMOVED_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_DATA_FILES_PROP, "1") .containsEntry(SnapshotSummary.TOTAL_DELETE_FILES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_EQ_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_POS_DELETES_PROP, "0") .containsEntry(SnapshotSummary.TOTAL_FILE_SIZE_PROP, "10") .containsEntry(SnapshotSummary.TOTAL_RECORDS_PROP, "1"); }
@Scheduled(initialDelayString = "#{T(org.apache.commons.lang3.RandomUtils).nextLong(0, ${sql.ttl.alarms.checking_interval})}", fixedDelayString = "${sql.ttl.alarms.checking_interval}") public void cleanUp() { PageDataIterable<TenantId> tenants = new PageDataIterable<>(tenantService::findTenantsIds, 10_000); for (TenantId tenantId : tenants) { try { cleanUp(tenantId); } catch (Exception e) { log.warn("Failed to clean up alarms by ttl for tenant {}", tenantId, e); } } }
@Test public void testAlarmsCleanUp() throws Exception { int ttlDays = 1; updateDefaultTenantProfileConfig(profileConfiguration -> { profileConfiguration.setAlarmsTtlDays(ttlDays); }); loginTenantAdmin(); Device device = createDevice("device_0", "device_0"); int count = 100; long ts = System.currentTimeMillis() - TimeUnit.DAYS.toMillis(ttlDays) - TimeUnit.MINUTES.toMillis(10); List<AlarmId> outdatedAlarms = new ArrayList<>(); List<AlarmId> freshAlarms = new ArrayList<>(); for (int i = 0; i < count; i++) { Alarm alarm = Alarm.builder() .tenantId(tenantId) .originator(device.getId()) .cleared(false) .acknowledged(false) .severity(AlarmSeverity.CRITICAL) .type("outdated_alarm_" + i) .startTs(ts) .endTs(ts) .build(); alarm.setId(new AlarmId(UUID.randomUUID())); alarm.setCreatedTime(ts); outdatedAlarms.add(alarmDao.save(tenantId, alarm).getId()); alarm.setType("fresh_alarm_" + i); alarm.setStartTs(System.currentTimeMillis()); alarm.setEndTs(alarm.getStartTs()); alarm.setId(new AlarmId(UUID.randomUUID())); alarm.setCreatedTime(alarm.getStartTs()); freshAlarms.add(alarmDao.save(tenantId, alarm).getId()); } alarmsCleanUpService.cleanUp(); for (AlarmId outdatedAlarm : outdatedAlarms) { verify(alarmService).delAlarm(eq(tenantId), eq(outdatedAlarm), eq(false)); } for (AlarmId freshAlarm : freshAlarms) { verify(alarmService, never()).delAlarm(eq(tenantId), eq(freshAlarm), eq(false)); } verify(cleanUpServiceLogger).info(startsWith("Removed {} outdated alarm"), eq((long) count), eq(tenantId), any()); }
@Validated(OnUpdate.class) void validateForUpdate(@Valid InputWithCustomValidator input){ // do something }
@Test void whenInputIsInvalidForUpdate_thenThrowsException() { InputWithCustomValidator input = validInput(); input.setId(null); assertThrows(ConstraintViolationException.class, () -> { service.validateForUpdate(input); }); }
public void putValue(String fieldName, @Nullable Object value) { _fieldToValueMap.put(fieldName, value); }
@Test public void testEmptyRowNotEqualToNonEmptyRow() { GenericRow first = new GenericRow(); GenericRow second = new GenericRow(); second.putValue("one", 1); Assert.assertNotEquals(first, second); }
private long getLastInsertId(final Collection<UpdateResult> updateResults, final Collection<Comparable<?>> autoIncrementGeneratedValues) { List<Long> lastInsertIds = new ArrayList<>(updateResults.size() + autoIncrementGeneratedValues.size()); for (UpdateResult each : updateResults) { if (each.getLastInsertId() > 0L) { lastInsertIds.add(each.getLastInsertId()); } } for (Comparable<?> each : autoIncrementGeneratedValues) { if (each instanceof Number) { lastInsertIds.add(((Number) each).longValue()); } } return lastInsertIds.isEmpty() ? 0L : getMinLastInsertId(lastInsertIds); }
@Test void assertGetLastInsertIdWhenAutoGeneratedIdIsNotEmpty() { UpdateResponseHeader actual = new UpdateResponseHeader(mock(SQLStatement.class), Collections.emptyList(), createAutoIncrementGeneratedValues()); assertThat(actual.getLastInsertId(), is(1L)); }
@Override public Map<String, HivePartitionStats> getPartitionStatistics(Table table, List<String> partitionNames) { String dbName = ((HiveMetaStoreTable) table).getDbName(); String tblName = ((HiveMetaStoreTable) table).getTableName(); List<HivePartitionName> hivePartitionNames = partitionNames.stream() .map(partitionName -> HivePartitionName.of(dbName, tblName, partitionName)) .collect(Collectors.toList()); Map<HivePartitionName, HivePartitionStats> statistics = getAll(partitionStatsCache, hivePartitionNames); return statistics.entrySet() .stream() .collect(toImmutableMap(entry -> entry.getKey().getPartitionNames().get(), Map.Entry::getValue)); }
@Test public void testGetPartitionStatistics() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false); com.starrocks.catalog.Table hiveTable = cachingHiveMetastore.getTable("db1", "table1"); Map<String, HivePartitionStats> statistics = cachingHiveMetastore.getPartitionStatistics( hiveTable, Lists.newArrayList("col1=1", "col1=2")); HivePartitionStats stats1 = statistics.get("col1=1"); HiveCommonStats commonStats1 = stats1.getCommonStats(); Assert.assertEquals(50, commonStats1.getRowNums()); Assert.assertEquals(100, commonStats1.getTotalFileBytes()); HiveColumnStats columnStatistics1 = stats1.getColumnStats().get("col2"); Assert.assertEquals(0, columnStatistics1.getTotalSizeBytes()); Assert.assertEquals(1, columnStatistics1.getNumNulls()); Assert.assertEquals(2, columnStatistics1.getNdv()); HivePartitionStats stats2 = statistics.get("col1=2"); HiveCommonStats commonStats2 = stats2.getCommonStats(); Assert.assertEquals(50, commonStats2.getRowNums()); Assert.assertEquals(100, commonStats2.getTotalFileBytes()); HiveColumnStats columnStatistics2 = stats2.getColumnStats().get("col2"); Assert.assertEquals(0, columnStatistics2.getTotalSizeBytes()); Assert.assertEquals(2, columnStatistics2.getNumNulls()); Assert.assertEquals(5, columnStatistics2.getNdv()); List<HivePartitionName> partitionNames = Lists.newArrayList( HivePartitionName.of("db1", "table1", "col1=1"), HivePartitionName.of("db1", "table1", "col1=2")); Assert.assertEquals(2, cachingHiveMetastore.getPresentPartitionsStatistics(partitionNames).size()); }
protected void close() { if (ctx != null) { ctx.close(); } }
@Test(timeOut = 30000) public void testDuplicateConcurrentSubscribeCommand() throws Exception { resetChannel(); setChannelConnected(); CompletableFuture<Topic> delayFuture = new CompletableFuture<>(); doReturn(delayFuture).when(brokerService).getOrCreateTopic(any(String.class)); // Create subscriber first time ByteBuf clientCommand = Commands.newSubscribe(successTopicName, // successSubName, 1 /* consumer id */, 1 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(clientCommand); BackGroundExecutor backGroundExecutor = startBackgroundExecutorForEmbeddedChannel(channel); // Create producer second time clientCommand = Commands.newSubscribe(successTopicName, // successSubName, 2 /* consumer id */, 2 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(clientCommand); Awaitility.await().untilAsserted(() -> { Object response = getResponse(); assertTrue(response instanceof CommandError, "Response is not CommandError but " + response); CommandError error = (CommandError) response; assertEquals(error.getError(), ServerError.ConsumerBusy); }); // cleanup. backGroundExecutor.close(); channel.finish(); }
static void autoAddHttpExtractor(Connection c, String name, ChannelHandler handler) { if (handler instanceof ByteToMessageDecoder || handler instanceof ByteToMessageCodec || handler instanceof CombinedChannelDuplexHandler) { String extractorName = name + "$extractor"; if (c.channel().pipeline().context(extractorName) != null) { return; } c.channel().pipeline().addBefore(name, extractorName, HTTP_EXTRACTOR); if (c.isPersistent()) { c.onTerminate().subscribe(null, null, () -> c.removeHandler(extractorName)); } } }
@Test void httpAndJsonDecoders() { EmbeddedChannel channel = new EmbeddedChannel(); Connection testContext = () -> channel; ChannelHandler handler = new JsonObjectDecoder(true); testContext.addHandlerLast("foo", handler); HttpOperations.autoAddHttpExtractor(testContext, "foo", handler); String json1 = "[{\"some\": 1} , {\"valu"; String json2 = "e\": true, \"test\": 1}]"; Object[] content = new Object[3]; content[0] = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); content[1] = new DefaultHttpContent(Unpooled.copiedBuffer(json1, CharsetUtil.UTF_8)); content[2] = new DefaultLastHttpContent(Unpooled.copiedBuffer(json2, CharsetUtil.UTF_8)); channel.writeInbound(content); Object t = channel.readInbound(); assertThat(t).isInstanceOf(HttpResponse.class); assertThat(t).isNotInstanceOf(HttpContent.class); t = channel.readInbound(); assertThat(t).isInstanceOf(ByteBuf.class); ByteBuf b = (ByteBuf) t; assertThat(b.readCharSequence(b.readableBytes(), CharsetUtil.UTF_8)).isEqualTo("{\"some\": 1}"); b.release(); t = channel.readInbound(); assertThat(t).isInstanceOf(ByteBuf.class); b = (ByteBuf) t; assertThat(b.readCharSequence(b.readableBytes(), CharsetUtil.UTF_8)).isEqualTo("{\"value\": true, \"test\": 1}"); b.release(); t = channel.readInbound(); assertThat(t).isEqualTo(LastHttpContent.EMPTY_LAST_CONTENT); ((LastHttpContent) t).release(); t = channel.readInbound(); assertThat(t).isNull(); }
public ProcessingMode getMode() { return mode; }
@Test public void testParse_mode() { Jar jarCommand = CommandLine.populateCommand( new Jar(), "--target=test-image-ref", "--mode=packaged", "my-app.jar"); assertThat(jarCommand.getMode()).isEqualTo(ProcessingMode.packaged); }
@Override public Deserializer<AvroWrapper<T>> getDeserializer(Class<AvroWrapper<T>> c) { Configuration conf = getConf(); GenericData dataModel = createDataModel(conf); if (AvroKey.class.isAssignableFrom(c)) { Schema writerSchema = getKeyWriterSchema(conf); Schema readerSchema = getKeyReaderSchema(conf); DatumReader<T> datumReader = (readerSchema != null) ? dataModel.createDatumReader(writerSchema, readerSchema) : dataModel.createDatumReader(writerSchema); return new AvroKeyDeserializer<>(writerSchema, readerSchema, datumReader); } else if (AvroValue.class.isAssignableFrom(c)) { Schema writerSchema = getValueWriterSchema(conf); Schema readerSchema = getValueReaderSchema(conf); DatumReader<T> datumReader = (readerSchema != null) ? dataModel.createDatumReader(writerSchema, readerSchema) : dataModel.createDatumReader(writerSchema); return new AvroValueDeserializer<>(writerSchema, readerSchema, datumReader); } else { throw new IllegalStateException("Only AvroKey and AvroValue are supported."); } }
@Test void classPath() throws Exception { Configuration conf = new Configuration(); ClassLoader loader = conf.getClass().getClassLoader(); AvroSerialization serialization = new AvroSerialization(); serialization.setConf(conf); AvroDeserializer des = (AvroDeserializer) serialization.getDeserializer(AvroKey.class); ReflectData data = (ReflectData) ((ReflectDatumReader) des.mAvroDatumReader).getData(); assertEquals(loader, data.getClassLoader()); }
public static AvroGenericCoder of(Schema schema) { return AvroGenericCoder.of(schema); }
@Test public void testAvroCoderCyclicRecords() { // Recursive record assertNonDeterministic( AvroCoder.of( SchemaBuilder.record("cyclicRecord") .fields() .name("cycle") .type("cyclicRecord") .noDefault() .endRecord()), reason("cyclicRecord.cycle", "cyclicRecord appears recursively")); }
@Override public int compareTo(SegmentPointer other) { if (idPage == other.idPage) { return Long.compare(offset, other.offset); } else { return Integer.compare(idPage, other.idPage); } }
@Test public void testCompareInSameSegment() { final SegmentPointer minor = new SegmentPointer(1, 10); final SegmentPointer otherMinor = new SegmentPointer(1, 10); final SegmentPointer major = new SegmentPointer(1, 12); assertEquals(-1, minor.compareTo(major), "minor is less than major"); assertEquals(1, major.compareTo(minor), "major is greater than minor"); assertEquals(0, minor.compareTo(otherMinor), "minor equals itself"); }
public Collection<SQLException> closeConnections(final boolean forceRollback) { Collection<SQLException> result = new LinkedList<>(); synchronized (cachedConnections) { resetSessionVariablesIfNecessary(cachedConnections.values(), result); for (Connection each : cachedConnections.values()) { try { if (forceRollback && connectionSession.getTransactionStatus().isInTransaction()) { each.rollback(); } each.close(); } catch (final SQLException ex) { result.add(ex); } } cachedConnections.clear(); } if (!forceRollback) { connectionPostProcessors.clear(); } return result; }
@Test void assertCloseConnectionsCorrectlyWhenForceRollbackAndInTransaction() throws SQLException { connectionSession.getTransactionStatus().setInTransaction(true); Connection connection = prepareCachedConnections(); databaseConnectionManager.closeConnections(true); verify(connection).rollback(); }
public static String processPattern(String pattern, TbMsg tbMsg) { try { String result = processPattern(pattern, tbMsg.getMetaData()); JsonNode json = JacksonUtil.toJsonNode(tbMsg.getData()); if (json.isObject()) { Matcher matcher = DATA_PATTERN.matcher(result); while (matcher.find()) { String group = matcher.group(2); String[] keys = group.split("\\."); JsonNode jsonNode = json; for (String key : keys) { if (!StringUtils.isEmpty(key) && jsonNode != null) { jsonNode = jsonNode.get(key); } else { jsonNode = null; break; } } if (jsonNode != null && jsonNode.isValueNode()) { result = result.replace(formatDataVarTemplate(group), jsonNode.asText()); } } } return result; } catch (Exception e) { throw new RuntimeException("Failed to process pattern!", e); } }
@Test public void testNoReplacement() { String pattern = "ABC ${metadata_key} $[data_key]"; TbMsgMetaData md = new TbMsgMetaData(); md.putValue("key", "metadata_value"); ObjectNode node = JacksonUtil.newObjectNode(); node.put("key", "data_value"); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, TenantId.SYS_TENANT_ID, md, JacksonUtil.toString(node)); String result = TbNodeUtils.processPattern(pattern, msg); Assertions.assertEquals(pattern, result); }
@Override public AppResponse process(Flow flow, AppRequest params) { var result = digidClient.getWidstatus(appSession.getWidRequestId()); switch(result.get("status").toString()){ case "NO_DOCUMENTS": appSession.setRdaSessionStatus("NO_DOCUMENTS"); appSession.setBrpIdentifier(result.get("brp_identifier").toString()); appSessionService.save(appSession); return new StatusResponse("NO_DOCUMENTS"); case "PENDING": setValid(false); // Do not progress to next state return new StatusResponse("PENDING"); case "NOK": return new NokResponse(); } digidClient.remoteLog("867", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); appSession.setRdaSessionStatus("DOCUMENTS_RECEIVED"); Map<String, String> rdaSession = rdaClient.startSession(returnUrl + "/iapi/rda/confirm", appSession.getId(), params.getIpAddress(), result.get("travel_documents"), result.get("driving_licences")); if (rdaSession.isEmpty()) { digidClient.remoteLog("873", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new NokResponse(); } appSession.setConfirmSecret(rdaSession.get("confirmSecret")); appSession.setUrl(rdaSession.get("url")); appSession.setRdaSessionId(rdaSession.get("sessionId")); appSession.setRdaSessionTimeoutInSeconds(rdaSession.get("expiration")); appSession.setRdaSessionStatus("SCANNING"); digidClient.remoteLog("868", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), HIDDEN, true)); return new RdaResponse(appSession.getUrl(), appSession.getRdaSessionId()); }
@Test void processWidstatusValidRdaSessionEmpty(){ when(digidClientMock.getWidstatus(mockedAppSession.getWidRequestId())).thenReturn(validDigidClientResponse); when(rdaClientMock.startSession(anyString(), anyString(), anyString(), any(), any())).thenReturn(rdaResponseNoSession); AppResponse appResponse = rdaPolling.process(mockedFlow, mockedAbstractAppRequest); verify(digidClientMock, times(1)).remoteLog("873", Map.of(lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId(), HIDDEN, true)); }
@Override public long remove(long key) { final long valueAddr = hsa.get(key); if (valueAddr == NULL_ADDRESS) { return nullValue; } final long oldValue = mem.getLong(valueAddr); hsa.remove(key); return oldValue; }
@Test public void testRemove() { long key = newKey(); assertEqualsKV(MISSING_VALUE, map.remove(key), key, 0); long value = newValue(); map.put(key, value); long oldValue = map.remove(key); assertEqualsKV(value, oldValue, key, value); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedForwardWildCard() { String[] forwardedFields = {"f1->*"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, threeIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
private QueryParamsDataMap() { }
@Test public void testNumericKeyIndicesWithHoles() throws Exception { DataMap queryParamDataMap = queryParamsDataMap("ids[4]=1&ids[1]=0&ids[8]=2"); Object idsObj = queryParamDataMap.get("ids"); Assert.assertTrue(idsObj instanceof DataList); DataList ids = (DataList) idsObj; Assert.assertEquals(ids.get(0), "0"); Assert.assertEquals(ids.get(1), "1"); Assert.assertEquals(ids.get(2), "2"); }
public static StreamExecutionEnvironment getExecutionEnvironment() { return getExecutionEnvironment(new Configuration()); }
@Test void testUserDefinedJobName() { String jobName = "MyTestJob"; Configuration config = new Configuration(); config.set(PipelineOptions.NAME, jobName); StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(config); testJobName(jobName, env); }
public static Sensor throttleTimeSensor(SenderMetricsRegistry metrics) { Sensor produceThrottleTimeSensor = metrics.sensor("produce-throttle-time"); produceThrottleTimeSensor.add(metrics.produceThrottleTimeAvg, new Avg()); produceThrottleTimeSensor.add(metrics.produceThrottleTimeMax, new Max()); return produceThrottleTimeSensor; }
@Test public void testQuotaMetrics() { MockSelector selector = new MockSelector(time); Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry); Cluster cluster = TestUtils.singletonCluster("test", 1); Node node = cluster.nodes().get(0); NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, 10 * 1000, 127 * 1000, time, true, new ApiVersions(), throttleTimeSensor, logContext, MetadataRecoveryStrategy.NONE); ApiVersionsResponse apiVersionsResponse = TestUtils.defaultApiVersionsResponse( 400, ApiMessageType.ListenerType.ZK_BROKER); ByteBuffer buffer = RequestTestUtils.serializeResponseWithHeader(apiVersionsResponse, ApiKeys.API_VERSIONS.latestVersion(), 0); selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer))); while (!client.ready(node, time.milliseconds())) { client.poll(1, time.milliseconds()); // If a throttled response is received, advance the time to ensure progress. time.sleep(client.throttleDelayMs(node, time.milliseconds())); } selector.clear(); for (int i = 1; i <= 3; i++) { int throttleTimeMs = 100 * i; ProduceRequest.Builder builder = ProduceRequest.forCurrentMagic(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection()) .setAcks((short) 1) .setTimeoutMs(1000)); ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true); client.send(request, time.milliseconds()); client.poll(1, time.milliseconds()); ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs); buffer = RequestTestUtils.serializeResponseWithHeader(response, ApiKeys.PRODUCE.latestVersion(), request.correlationId()); selector.completeReceive(new NetworkReceive(node.idString(), buffer)); client.poll(1, time.milliseconds()); // If a throttled response is received, advance the time to ensure progress. time.sleep(client.throttleDelayMs(node, time.milliseconds())); selector.clear(); } Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric avgMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeAvg); KafkaMetric maxMetric = allMetrics.get(this.senderMetricsRegistry.produceThrottleTimeMax); // Throttle times are ApiVersions=400, Produce=(100, 200, 300) assertEquals(250, (Double) avgMetric.metricValue(), EPS); assertEquals(400, (Double) maxMetric.metricValue(), EPS); client.close(); }
@Override public void register(JobManagerRunner jobManagerRunner) { Preconditions.checkArgument( !isRegistered(jobManagerRunner.getJobID()), "A job with the ID %s is already registered.", jobManagerRunner.getJobID()); this.jobManagerRunners.put(jobManagerRunner.getJobID(), jobManagerRunner); }
@Test void testRegister() { final JobID jobId = new JobID(); testInstance.register(TestingJobManagerRunner.newBuilder().setJobId(jobId).build()); assertThat(testInstance.isRegistered(jobId)).isTrue(); }
public void shutdown(final Duration duration) { for (final TaskExecutor t: taskExecutors) { t.requestShutdown(); } signalTaskExecutors(); for (final TaskExecutor t: taskExecutors) { t.awaitShutdown(duration); } }
@Test public void shouldBlockOnAwait() throws InterruptedException { final AwaitingRunnable awaitingRunnable = new AwaitingRunnable(); final Thread awaitingThread = new Thread(awaitingRunnable); awaitingThread.start(); assertFalse(awaitingRunnable.awaitDone.await(100, TimeUnit.MILLISECONDS)); awaitingRunnable.shutdown(); }
@Override public boolean equals(final Object o) { if(this == o) { return true; } if(!(o instanceof Distribution)) { return false; } final Distribution that = (Distribution) o; return Objects.equals(method, that.method); }
@Test public void testEquals() { assertEquals(new Distribution(Distribution.DOWNLOAD, URI.create("o"), false), new Distribution(Distribution.DOWNLOAD, URI.create("o"), false)); assertEquals(new Distribution(Distribution.DOWNLOAD, URI.create("o"), true), new Distribution(Distribution.DOWNLOAD, URI.create("o"), false)); assertNotEquals(new Distribution(Distribution.DOWNLOAD, URI.create("o"), false), new Distribution(Distribution.STREAMING, URI.create("o"), false)); assertNotEquals(new Distribution(Distribution.DOWNLOAD, URI.create("o"), false), new Distribution(Distribution.CUSTOM, URI.create("o"), false)); assertNotEquals(new Distribution(Distribution.DOWNLOAD, URI.create("o"), false), new Distribution(Distribution.WEBSITE, URI.create("o"), false)); assertNotEquals(new Distribution(Distribution.DOWNLOAD, URI.create("o"), false), new Distribution(Distribution.WEBSITE_CDN, URI.create("o"), false)); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1AesEncryptedDsa() throws Exception { PrivateKey key = SslContext.toPrivateKey(new File(getClass().getResource("dsa_pkcs1_aes_encrypted.key") .getFile()), "example"); assertNotNull(key); }
public static String generate(String command, CommandType type) { return generate(Lists.newArrayList(command), type); }
@Test public void manyRequestsTest() throws IOException { InputStream streamOrig = getClass().getResourceAsStream(REQ_FILE2); ObjectMapper om = new ObjectMapper(); JsonNode node1 = om.readTree(streamOrig); ArrayList<String> cmds = new ArrayList<>(); cmds.add("show interface"); cmds.add("show ver"); JsonNode node2 = om.readTree(NxApiRequest.generate(cmds, NxApiRequest.CommandType.CLI)); assertEquals(node1.toString(), node2.toString()); }
public InputStream getBody() throws IOException { return httpResponse.getContent(); }
@Test public void testGetContent() throws IOException { byte[] expectedResponse = "crepecake\nis\ngood!".getBytes(StandardCharsets.UTF_8); ByteArrayInputStream responseInputStream = new ByteArrayInputStream(expectedResponse); Mockito.when(httpResponseMock.getContent()).thenReturn(responseInputStream); try (Response response = new Response(httpResponseMock)) { Assert.assertArrayEquals(expectedResponse, ByteStreams.toByteArray(response.getBody())); } }
private IcebergFixedObjectInspector() { super(TypeInfoFactory.binaryTypeInfo); }
@Test public void testIcebergFixedObjectInspector() { IcebergFixedObjectInspector oi = IcebergFixedObjectInspector.get(); assertThat(oi.getCategory()).isEqualTo(ObjectInspector.Category.PRIMITIVE); assertThat(oi.getPrimitiveCategory()) .isEqualTo(PrimitiveObjectInspector.PrimitiveCategory.BINARY); assertThat(oi.getTypeInfo()).isEqualTo(TypeInfoFactory.binaryTypeInfo); assertThat(oi.getTypeName()).isEqualTo(TypeInfoFactory.binaryTypeInfo.getTypeName()); assertThat(oi.getJavaPrimitiveClass()).isEqualTo(byte[].class); assertThat(oi.getPrimitiveWritableClass()).isEqualTo(BytesWritable.class); assertThat(oi.copyObject(null)).isNull(); assertThat(oi.getPrimitiveJavaObject(null)).isNull(); assertThat(oi.getPrimitiveWritableObject(null)).isNull(); assertThat(oi.convert(null)).isNull(); byte[] bytes = new byte[] {0, 1}; BytesWritable bytesWritable = new BytesWritable(bytes); assertThat(oi.getPrimitiveJavaObject(bytes)).isEqualTo(bytes); assertThat(oi.getPrimitiveWritableObject(bytes)).isEqualTo(bytesWritable); assertThat(oi.convert(bytes)).isEqualTo(bytes); byte[] copy = (byte[]) oi.copyObject(bytes); assertThat(copy).isEqualTo(bytes); assertThat(copy).isNotSameAs(bytes); assertThat(oi.preferWritable()).isFalse(); }
@Override public PartialConfig load(File configRepoCheckoutDirectory, PartialConfigLoadContext context) { File[] allFiles = getFiles(configRepoCheckoutDirectory, context); // if context had changed files list then we could parse only new content PartialConfig[] allFragments = parseFiles(allFiles); PartialConfig partialConfig = new PartialConfig(); collectFragments(allFragments, partialConfig); return partialConfig; }
@Test public void shouldLoadDirectoryWithOnePipeline() throws Exception { GoConfigMother mother = new GoConfigMother(); PipelineConfig pipe1 = mother.cruiseConfigWithOnePipelineGroup().getAllPipelineConfigs().get(0); helper.addFileWithPipeline("pipe1.gocd.xml", pipe1); PartialConfig part = xmlPartialProvider.load(tmpFolder,mock(PartialConfigLoadContext.class)); PipelineConfig pipeRead = part.getGroups().get(0).get(0); assertThat(pipeRead,is(pipe1)); }
public static HttpRequest newJDiscRequest(CurrentContainer container, HttpServletRequest servletRequest) { try { var jettyRequest = (Request) servletRequest; var jdiscHttpReq = HttpRequest.newServerRequest( container, getUri(servletRequest), getMethod(servletRequest), HttpRequest.Version.fromString(servletRequest.getProtocol()), new InetSocketAddress(servletRequest.getRemoteAddr(), servletRequest.getRemotePort()), getConnection(jettyRequest).getCreatedTimeStamp(), jettyRequest.getTimeStamp()); jdiscHttpReq.context().put(RequestUtils.JDISC_REQUEST_X509CERT, getCertChain(servletRequest)); jdiscHttpReq.context().put(RequestUtils.JDICS_REQUEST_PORT, servletRequest.getLocalPort()); SSLSession sslSession = (SSLSession) servletRequest.getAttribute(RequestUtils.JETTY_REQUEST_SSLSESSION); jdiscHttpReq.context().put(RequestUtils.JDISC_REQUEST_SSLSESSION, sslSession); servletRequest.setAttribute(HttpRequest.class.getName(), jdiscHttpReq); return jdiscHttpReq; } catch (Utf8Appendable.NotUtf8Exception e) { throw createBadQueryException(e); } }
@Test final void illegal_host_throws_requestexception2() { try { HttpRequestFactory.newJDiscRequest( new MockContainer(), createMockRequest("http", ".", "/foo", "")); fail("Above statement should throw"); } catch (RequestException e) { assertThat(e.getResponseStatus(), is(Response.Status.BAD_REQUEST)); } }
@VisibleForTesting CompletableFuture<Acknowledge> getBootstrapCompletionFuture() { return bootstrapCompletionFuture; }
@Test void testClusterShutdownWhenApplicationGetsCancelled() throws Exception { // we're "listening" on this to be completed to verify that the cluster // is being shut down from the ApplicationDispatcherBootstrap final CompletableFuture<ApplicationStatus> externalShutdownFuture = new CompletableFuture<>(); final TestingDispatcherGateway.Builder dispatcherBuilder = canceledJobGatewayBuilder() .setClusterShutdownFunction( status -> { externalShutdownFuture.complete(status); return CompletableFuture.completedFuture(Acknowledge.get()); }); ApplicationDispatcherBootstrap bootstrap = createApplicationDispatcherBootstrap( 3, dispatcherBuilder.build(), scheduledExecutor); final CompletableFuture<Acknowledge> completionFuture = bootstrap.getBootstrapCompletionFuture(); // wait until the bootstrap "thinks" it's done completionFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS); // verify that the dispatcher is actually being shut down assertThat(externalShutdownFuture.get(TIMEOUT_SECONDS, TimeUnit.SECONDS)) .isEqualTo(ApplicationStatus.CANCELED); }
@Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { return delegate.scheduleAtFixedRate(command, initialDelay, period, unit); }
@Test public void scheduleAtFixedRate() { underTest.scheduleAtFixedRate(runnable, initialDelay, period, SECONDS); verify(executorService).scheduleAtFixedRate(runnable, initialDelay, period, SECONDS); }
public boolean isAvailable() { return type == Type.available; }
@Test public void isPresenceAvailableTest() { PresenceBuilder presence = getNewPresence(); presence.ofType(Presence.Type.available); assertTrue(presence.build().isAvailable()); presence.ofType(Presence.Type.unavailable); assertFalse(presence.build().isAvailable()); }
static boolean shouldStoreMessage(final Message message) { // XEP-0334: Implement the <no-store/> hint to override offline storage if (message.getChildElement("no-store", "urn:xmpp:hints") != null) { return false; } // OF-2083: Prevent storing offline message that is already stored if (message.getChildElement("offline", "http://jabber.org/protocol/offline") != null) { return false; } switch (message.getType()) { case chat: // XEP-0160: Messages with a 'type' attribute whose value is "chat" SHOULD be stored offline, with the exception of messages that contain only Chat State Notifications (XEP-0085) [7] content // Iterate through the child elements to see if we can find anything that's not a chat state notification or // real time text notification Iterator<?> it = message.getElement().elementIterator(); while (it.hasNext()) { Object item = it.next(); if (item instanceof Element) { Element el = (Element) item; if (Namespace.NO_NAMESPACE.equals(el.getNamespace())) { continue; } if (!el.getNamespaceURI().equals("http://jabber.org/protocol/chatstates") && !(el.getQName().equals(QName.get("rtt", "urn:xmpp:rtt:0"))) ) { return true; } } } return message.getBody() != null && !message.getBody().isEmpty(); case groupchat: case headline: // XEP-0160: "groupchat" message types SHOULD NOT be stored offline // XEP-0160: "headline" message types SHOULD NOT be stored offline return false; case error: // XEP-0160: "error" message types SHOULD NOT be stored offline, // although a server MAY store advanced message processing errors offline if (message.getChildElement("amp", "http://jabber.org/protocol/amp") == null) { return false; } break; default: // XEP-0160: Messages with a 'type' attribute whose value is "normal" (or messages with no 'type' attribute) SHOULD be stored offline. break; } return true; }
@Test public void shouldNotStoreHeadlineMessages() { // XEP-0160: "headline" message types SHOULD NOT be stored offline Message message = new Message(); message.setType(Message.Type.headline); assertFalse(OfflineMessageStore.shouldStoreMessage(message)); }
public static String getOwner(FileDescriptor fd) throws IOException { ensureInitialized(); if (Shell.WINDOWS) { String owner = Windows.getOwner(fd); owner = stripDomain(owner); return owner; } else { long uid = POSIX.getUIDforFDOwnerforOwner(fd); CachedUid cUid = uidCache.get(uid); long now = System.currentTimeMillis(); if (cUid != null && (cUid.timestamp + cacheTimeout) > now) { return cUid.username; } String user = POSIX.getUserName(uid); LOG.info("Got UserName " + user + " for UID " + uid + " from the native implementation"); cUid = new CachedUid(user, now); uidCache.put(uid, cUid); return user; } }
@Test (timeout = 30000) public void testFstat() throws Exception { FileOutputStream fos = new FileOutputStream( new File(TEST_DIR, "testfstat")); NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD()); fos.close(); LOG.info("Stat: " + String.valueOf(stat)); String owner = stat.getOwner(); String expectedOwner = System.getProperty("user.name"); if (Path.WINDOWS) { UserGroupInformation ugi = UserGroupInformation.createRemoteUser(expectedOwner); final String adminsGroupString = "Administrators"; if (Arrays.asList(ugi.getGroupNames()).contains(adminsGroupString)) { expectedOwner = adminsGroupString; } } assertEquals(expectedOwner, owner); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); assertEquals("Stat mode field should indicate a regular file", S_IFREG, stat.getMode() & S_IFMT); }
@Udf public <T> List<T> intersect( @UdfParameter(description = "First array of values") final List<T> left, @UdfParameter(description = "Second array of values") final List<T> right) { if (left == null || right == null) { return null; } final Set<T> intersection = Sets.newLinkedHashSet(left); intersection.retainAll(Sets.newHashSet(right)); return Lists.newArrayList(intersection); }
@Test public void shouldReturnNullForArraysOfOnlyNulls() { final List<String> input1 = Arrays.asList(null, null); final List<String> input2 = Arrays.asList(null, null, null); final List<String> result = udf.intersect(input1, input2); assertThat(result.get(0), is(nullValue())); }
@Override public void validTenant(Long id) { TenantDO tenant = getTenant(id); if (tenant == null) { throw exception(TENANT_NOT_EXISTS); } if (tenant.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { throw exception(TENANT_DISABLE, tenant.getName()); } if (DateUtils.isExpired(tenant.getExpireTime())) { throw exception(TENANT_EXPIRE, tenant.getName()); } }
@Test public void testValidTenant_expired() { // mock 数据 TenantDO tenant = randomPojo(TenantDO.class, o -> o.setId(1L).setStatus(CommonStatusEnum.ENABLE.getStatus()) .setExpireTime(buildTime(2020, 2, 2))); tenantMapper.insert(tenant); // 调用,并断言业务异常 assertServiceException(() -> tenantService.validTenant(1L), TENANT_EXPIRE, tenant.getName()); }
@Override public void write(Record<GenericObject> record) throws Exception { // kpl-thread captures publish-failure. fail the publish on main pulsar-io-thread to maintain the ordering if (kinesisSinkConfig.isRetainOrdering() && previousPublishFailed == TRUE) { LOG.warn("Skip acking message to retain ordering with previous failed message {}-{}", this.streamName, record.getRecordSequence()); throw new IllegalStateException("kinesis queue has publish failure"); } String partitionedKey = record.getKey().orElse(record.getTopicName().orElse(defaultPartitionedKey)); partitionedKey = partitionedKey.length() > maxPartitionedKeyLength ? partitionedKey.substring(0, maxPartitionedKeyLength - 1) : partitionedKey; // partitionedKey Length must be at least one, and at most 256 ByteBuffer data = createKinesisMessage(kinesisSinkConfig.getMessageFormat(), record); int size = data.remaining(); sendUserRecord(ProducerSendCallback.create(this, record, System.nanoTime(), partitionedKey, data)); if (sinkContext != null) { sinkContext.recordMetric(METRICS_TOTAL_INCOMING, 1); sinkContext.recordMetric(METRICS_TOTAL_INCOMING_BYTES, data.array().length); } if (LOG.isDebugEnabled()) { LOG.debug("Published message to kinesis stream {} with size {}", streamName, size); } }
@Test public void testWrite() throws Exception { AtomicBoolean ackCalled = new AtomicBoolean(); AtomicLong sequenceCounter = new AtomicLong(0); Message<GenericObject> message = mock(Message.class); when(message.getData()).thenReturn("hello".getBytes(StandardCharsets.UTF_8)); final Record<GenericObject> pulsarRecord = new Record<GenericObject>() { @Override public Optional<String> getKey() { return Optional.of( "key-" + sequenceCounter.incrementAndGet()); } @Override public GenericObject getValue() { // Value comes from the message raw data, not the GenericObject return null; } @Override public void ack() { ackCalled.set(true); } @Override public Optional<Message<GenericObject>> getMessage() { return Optional.of(message); } }; try (final KinesisSink sink = new KinesisSink()) { Map<String, Object> map = createConfig(); SinkContext mockSinkContext = mock(SinkContext.class); sink.open(map, mockSinkContext); for (int i = 0; i < 10; i++) { sink.write(pulsarRecord); } Awaitility.await().untilAsserted(() -> { assertTrue(ackCalled.get()); }); final GetRecordsResponse getRecords = getStreamRecords(); assertEquals(getRecords.records().size(), 10); for (software.amazon.awssdk.services.kinesis.model.Record record : getRecords.records()) { assertEquals(record.data().asString(StandardCharsets.UTF_8), "hello"); } } }
public final void performDispatch(Notification notification, Context context) { if (StringUtils.equals(notification.getType(), notificationType) || StringUtils.equals("", notificationType)) { dispatch(notification, context); } }
@Test public void shouldAlwaysRunDispatchForGenericDispatcher() { NotificationDispatcher dispatcher = new FakeGenericNotificationDispatcher(); dispatcher.performDispatch(notification, context); verify(context, times(1)).addUser("user1", channel); }
@SuppressWarnings("unchecked") @Override public void configure(final Map<String, ?> configs, final boolean isKey) { final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE); Serde<T> windowInnerClassSerde = null; if (windowedInnerClassSerdeConfig != null) { try { windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class); } catch (final ClassNotFoundException e) { throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig, "Serde class " + windowedInnerClassSerdeConfig + " could not be found."); } } if (inner != null && windowedInnerClassSerdeConfig != null) { if (!inner.getClass().getName().equals(windowInnerClassSerde.deserializer().getClass().getName())) { throw new IllegalArgumentException("Inner class deserializer set using constructor " + "(" + inner.getClass().getName() + ")" + " is different from the one set in windowed.inner.class.serde config " + "(" + windowInnerClassSerde.deserializer().getClass().getName() + ")."); } } else if (inner == null && windowedInnerClassSerdeConfig == null) { throw new IllegalArgumentException("Inner class deserializer should be set either via constructor " + "or via the windowed.inner.class.serde config"); } else if (inner == null) inner = windowInnerClassSerde.deserializer(); }
@Test public void shouldThrowConfigExceptionWhenInvalidWindowInnerClassDeserialiserSupplied() { props.put(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, "some.non.existent.class"); assertThrows(ConfigException.class, () -> sessionWindowedDeserializer.configure(props, false)); }
@Override public void addInput(Page page) { checkState(!finishing, "Operator is already finishing"); boolean done = topNBuilder.processPage(requireNonNull(page, "page is null")).process(); // there is no grouping so work will always be done verify(done); topNBuilder.updateMemoryReservations(); }
@Test public void testExceedMemoryLimit() throws Exception { List<Page> input = rowPagesBuilder(BIGINT) .row(1L) .build(); DriverContext smallDiverContext = createTaskContext(executor, scheduledExecutor, TEST_SESSION, new DataSize(1, BYTE)) .addPipelineContext(0, true, true, false) .addDriverContext(); TopNOperatorFactory operatorFactory = new TopNOperatorFactory( 0, new PlanNodeId("test"), ImmutableList.of(BIGINT), 100, ImmutableList.of(0), ImmutableList.of(ASC_NULLS_LAST)); try (Operator operator = operatorFactory.createOperator(smallDiverContext)) { operator.addInput(input.get(0)); fail("must fail because of exceeding local memory limit"); } catch (ExceededMemoryLimitException ignore) { } }
public void transitionTo(ClassicGroupState groupState) { assertValidTransition(groupState); previousState = state; state = groupState; currentStateTimestamp = Optional.of(time.milliseconds()); metrics.onClassicGroupStateTransition(previousState, state); }
@Test public void testDeadToDeadIllegalTransition() { group.transitionTo(PREPARING_REBALANCE); group.transitionTo(DEAD); group.transitionTo(DEAD); assertState(group, DEAD); }
@Override public void validate(Context context) { if (!context.deployState().isHosted() || context.deployState().zone().system().isPublic()) return; if (context.deployState().getProperties().allowDisableMtls()) return; context.model().getContainerClusters().forEach((id, cluster) -> { Http http = cluster.getHttp(); if (http != null) { if (http.getAccessControl().isPresent()) { verifyNoExclusions(id, http.getAccessControl().get(), context); } } }); }
@Test public void validator_accepts_when_allowed_to_exclude() throws IOException, SAXException { DeployState deployState = createDeployState(zone(CloudName.AWS, SystemName.main), new StringBuffer(), true); VespaModel model = new VespaModel( MapConfigModelRegistry.createFromList(new ModelBuilderAddingAccessControlFilter()), deployState); ValidationTester.validate(new AccessControlFilterExcludeValidator(), model, deployState); }
static CommandLineOptions parse(Iterable<String> options) { CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder(); List<String> expandedOptions = new ArrayList<>(); expandParamsFiles(options, expandedOptions); Iterator<String> it = expandedOptions.iterator(); while (it.hasNext()) { String option = it.next(); if (!option.startsWith("-")) { optionsBuilder.filesBuilder().add(option).addAll(it); break; } String flag; String value; int idx = option.indexOf('='); if (idx >= 0) { flag = option.substring(0, idx); value = option.substring(idx + 1); } else { flag = option; value = null; } // NOTE: update usage information in UsageException when new flags are added switch (flag) { case "-i": case "-r": case "-replace": case "--replace": optionsBuilder.inPlace(true); break; case "--lines": case "-lines": case "--line": case "-line": parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value)); break; case "--offset": case "-offset": optionsBuilder.addOffset(parseInteger(it, flag, value)); break; case "--length": case "-length": optionsBuilder.addLength(parseInteger(it, flag, value)); break; case "--aosp": case "-aosp": case "-a": optionsBuilder.aosp(true); break; case "--version": case "-version": case "-v": optionsBuilder.version(true); break; case "--help": case "-help": case "-h": optionsBuilder.help(true); break; case "--fix-imports-only": optionsBuilder.fixImportsOnly(true); break; case "--skip-sorting-imports": optionsBuilder.sortImports(false); break; case "--skip-removing-unused-imports": optionsBuilder.removeUnusedImports(false); break; case "--skip-reflowing-long-strings": optionsBuilder.reflowLongStrings(false); break; case "--skip-javadoc-formatting": optionsBuilder.formatJavadoc(false); break; case "-": optionsBuilder.stdin(true); break; case "-n": case "--dry-run": optionsBuilder.dryRun(true); break; case "--set-exit-if-changed": optionsBuilder.setExitIfChanged(true); break; case "-assume-filename": case "--assume-filename": optionsBuilder.assumeFilename(getValue(flag, it, value)); break; default: throw new IllegalArgumentException("unexpected flag: " + flag); } } return optionsBuilder.build(); }
@Test public void setExitIfChanged() { assertThat( CommandLineOptionsParser.parse(Arrays.asList("--set-exit-if-changed")) .setExitIfChanged()) .isTrue(); }
@Override public void apply(IntentOperationContext<FlowObjectiveIntent> intentOperationContext) { Objects.requireNonNull(intentOperationContext); Optional<IntentData> toUninstall = intentOperationContext.toUninstall(); Optional<IntentData> toInstall = intentOperationContext.toInstall(); List<FlowObjectiveIntent> uninstallIntents = intentOperationContext.intentsToUninstall(); List<FlowObjectiveIntent> installIntents = intentOperationContext.intentsToInstall(); if (!toInstall.isPresent() && !toUninstall.isPresent()) { intentInstallCoordinator.intentInstallSuccess(intentOperationContext); return; } if (toUninstall.isPresent()) { IntentData intentData = toUninstall.get(); trackerService.removeTrackedResources(intentData.key(), intentData.intent().resources()); uninstallIntents.forEach(installable -> trackerService.removeTrackedResources(intentData.intent().key(), installable.resources())); } if (toInstall.isPresent()) { IntentData intentData = toInstall.get(); trackerService.addTrackedResources(intentData.key(), intentData.intent().resources()); installIntents.forEach(installable -> trackerService.addTrackedResources(intentData.key(), installable.resources())); } FlowObjectiveIntentInstallationContext intentInstallationContext = new FlowObjectiveIntentInstallationContext(intentOperationContext); uninstallIntents.stream() .map(intent -> buildObjectiveContexts(intent, REMOVE)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addPendingContext(context); }); installIntents.stream() .map(intent -> buildObjectiveContexts(intent, ADD)) .flatMap(Collection::stream) .forEach(context -> { context.intentInstallationContext(intentInstallationContext); intentInstallationContext.addContext(context); intentInstallationContext.addNextPendingContext(context); }); intentInstallationContext.apply(); }
@Test public void testGroupInstallationFailedError() { // Group install failed, and retry threshold exceed intentInstallCoordinator = new TestIntentInstallCoordinator(); installer.intentInstallCoordinator = intentInstallCoordinator; errors = ImmutableList.of(GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED, GROUPINSTALLATIONFAILED); installer.flowObjectiveService = new TestFailedFlowObjectiveService(errors); context = createInstallContext(); installer.apply(context); failedContext = intentInstallCoordinator.failedContext; assertEquals(failedContext, context); }
@Override public SnippetType getSnippetType() { return configurationParameters .get(SNIPPET_TYPE_PROPERTY_NAME, SnippetTypeParser::parseSnippetType) .orElse(SnippetType.UNDERSCORE); }
@Test void getSnippetType() { ConfigurationParameters underscore = new MapConfigurationParameters( Constants.SNIPPET_TYPE_PROPERTY_NAME, "underscore"); assertThat(new CucumberEngineOptions(underscore).getSnippetType(), is(SnippetType.UNDERSCORE)); ConfigurationParameters camelcase = new MapConfigurationParameters( Constants.SNIPPET_TYPE_PROPERTY_NAME, "camelcase"); assertThat(new CucumberEngineOptions(camelcase).getSnippetType(), is(SnippetType.CAMELCASE)); }
@Override public void truncate() { truncateToEntries(0); }
@Test public void testTruncate() { appendEntries(maxEntries - 1); idx.truncate(); assertEquals(0, idx.entries()); appendEntries(maxEntries - 1); idx.truncateTo(10 + baseOffset); assertEquals(0, idx.entries()); }
public ResourceID getResourceID() { return unresolvedTaskManagerLocation.getResourceID(); }
@Test void testInitialSlotReport() throws Exception { final TaskExecutor taskExecutor = createTaskExecutor(1); taskExecutor.start(); try { final TestingResourceManagerGateway testingResourceManagerGateway = new TestingResourceManagerGateway(); final CompletableFuture<ResourceID> initialSlotReportFuture = new CompletableFuture<>(); testingResourceManagerGateway.setSendSlotReportFunction( resourceIDInstanceIDSlotReportTuple3 -> { initialSlotReportFuture.complete(resourceIDInstanceIDSlotReportTuple3.f0); return CompletableFuture.completedFuture(Acknowledge.get()); }); rpc.registerGateway( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway); resourceManagerLeaderRetriever.notifyListener( testingResourceManagerGateway.getAddress(), testingResourceManagerGateway.getFencingToken().toUUID()); assertThatFuture(initialSlotReportFuture) .eventuallySucceeds() .isEqualTo(taskExecutor.getResourceID()); } finally { RpcUtils.terminateRpcEndpoint(taskExecutor); } }
public static Patch<String> diffInline(String original, String revised) { List<String> origList = new ArrayList<>(); List<String> revList = new ArrayList<>(); for (Character character : original.toCharArray()) { origList.add(character.toString()); } for (Character character : revised.toCharArray()) { revList.add(character.toString()); } Patch<String> patch = DiffUtils.diff(origList, revList); for (AbstractDelta<String> delta : patch.getDeltas()) { delta.getSource().setLines(compressLines(delta.getSource().getLines(), "")); delta.getTarget().setLines(compressLines(delta.getTarget().getLines(), "")); } return patch; }
@Test public void testDiffInline2() { final Patch<String> patch = DiffUtils.diffInline("es", "fest"); assertEquals(2, patch.getDeltas().size()); assertTrue(patch.getDeltas().get(0) instanceof InsertDelta); assertEquals(0, patch.getDeltas().get(0).getSource().getPosition()); assertEquals(2, patch.getDeltas().get(1).getSource().getPosition()); assertEquals(0, patch.getDeltas().get(0).getSource().getLines().size()); assertEquals(0, patch.getDeltas().get(1).getSource().getLines().size()); assertEquals("f", patch.getDeltas().get(0).getTarget().getLines().get(0)); assertEquals("t", patch.getDeltas().get(1).getTarget().getLines().get(0)); }
static final String generateForFragment(RuleBuilderStep step, Configuration configuration) { final String fragmentName = step.function(); try { Template template = configuration.getTemplate(fragmentName); StringWriter writer = new StringWriter(); Map<String, Object> filteredParams = new HashMap<>(); if (step.parameters() != null) { for (Map.Entry<String, Object> val : step.parameters().entrySet()) { if (val.getValue() instanceof String s) { if (StringUtils.isBlank(s)) { } else if (s.startsWith("$")) { filteredParams.put(val.getKey(), s.substring(1)); } else { filteredParams.put(val.getKey(), "\"" + s + "\""); } } else { filteredParams.put(val.getKey(), val.getValue()); } } } template.process(filteredParams, writer); writer.close(); return writer.toString(); } catch (TemplateNotFoundException e) { throw new IllegalArgumentException(f("No template found for fragment %s", fragmentName)); } catch (Exception e) { throw new IllegalArgumentException("Error converting fragment template to fragment.", e); } }
@Test public void generateForFragmentThrowsException_WhenParameterNotSet() { RuleBuilderStep step = mock(RuleBuilderStep.class); when(step.function()).thenReturn("test_fragment1"); assertThatThrownBy(() -> ParserUtil.generateForFragment(step, configuration)) .isInstanceOf(IllegalArgumentException.class); }
public static Map<String, String> convertSubscribe(Map<String, String> subscribe) { Map<String, String> newSubscribe = new HashMap<>(); for (Map.Entry<String, String> entry : subscribe.entrySet()) { String serviceName = entry.getKey(); String serviceQuery = entry.getValue(); if (StringUtils.isNotContains(serviceName, ':') && StringUtils.isNotContains(serviceName, '/')) { Map<String, String> params = StringUtils.parseQueryString(serviceQuery); String group = params.get(GROUP_KEY); String version = params.get(VERSION_KEY); // params.remove("group"); // params.remove("version"); String name = serviceName; if (StringUtils.isNotEmpty(group)) { name = group + "/" + name; } if (StringUtils.isNotEmpty(version)) { name = name + ":" + version; } newSubscribe.put(name, StringUtils.toQueryString(params)); } else { newSubscribe.put(serviceName, serviceQuery); } } return newSubscribe; }
@Test void testSubscribe2() { String key = "dubbo.test.api.HelloService"; Map<String, String> subscribe = new HashMap<String, String>(); subscribe.put(key, "version=1.0.0&group=test&dubbo.version=2.0.0"); Map<String, String> newSubscribe = UrlUtils.convertSubscribe(subscribe); assertEquals( "dubbo.version=2.0.0&group=test&version=1.0.0", newSubscribe.get("test/dubbo.test.api.HelloService:1.0.0")); }
@Override public long getTokenTtlInSeconds(String token) throws AccessException { if (!authConfigs.isAuthEnabled()) { return tokenValidityInSeconds; } return jwtParser.getExpireTimeInSeconds(token) - TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis()); }
@Test void testGetTokenTtlInSeconds() throws AccessException { assertTrue(jwtTokenManager.getTokenTtlInSeconds(jwtTokenManager.createToken("nacos")) > 0); }
public Result waitForConditionAndFinish(Config config, Supplier<Boolean> conditionCheck) throws IOException { return waitForConditionsAndFinish(config, conditionCheck); }
@Test public void testFinishAfterCondition() throws IOException { // Arrange AtomicInteger callCount = new AtomicInteger(); int totalCalls = 3; Supplier<Boolean> checker = () -> callCount.incrementAndGet() >= totalCalls; when(client.getJobStatus(any(), any(), any())) .thenReturn(JobState.RUNNING) .thenThrow(new IOException()) .thenReturn(JobState.RUNNING) .thenReturn(JobState.CANCELLING) .thenReturn(JobState.CANCELLED); // Act Result result = new PipelineOperator(client).waitForConditionAndFinish(DEFAULT_CONFIG, checker); // Assert verify(client, atLeast(totalCalls)) .getJobStatus(projectCaptor.capture(), regionCaptor.capture(), jobIdCaptor.capture()); verify(client).drainJob(projectCaptor.capture(), regionCaptor.capture(), jobIdCaptor.capture()); Set<String> allProjects = new HashSet<>(projectCaptor.getAllValues()); Set<String> allRegions = new HashSet<>(regionCaptor.getAllValues()); Set<String> allJobIds = new HashSet<>(jobIdCaptor.getAllValues()); assertThat(allProjects).containsExactly(PROJECT); assertThat(allRegions).containsExactly(REGION); assertThat(allJobIds).containsExactly(JOB_ID); assertThat(result).isEqualTo(Result.CONDITION_MET); }
public void install(String [] names) { for (String name : names) { install(name); } }
@Test void install() { assertEquals(0, interpreterBaseDir.listFiles().length); installer.install("intp1"); assertTrue(new File(interpreterBaseDir, "intp1").isDirectory()); }
public Schema find(String name, String namespace) { Schema.Type type = PRIMITIVES.get(name); if (type != null) { return Schema.create(type); } String fullName = fullName(name, namespace); Schema schema = getNamedSchema(fullName); if (schema == null) { schema = getNamedSchema(name); } return schema != null ? schema : SchemaResolver.unresolvedSchema(fullName); }
@Test public void validateSchemaRetrievalByFullName() { assertSame(fooRecord, fooBarBaz.find(fooRecord.getFullName(), null)); }
public void addValueProviders(final String segmentName, final RocksDB db, final Cache cache, final Statistics statistics) { if (storeToValueProviders.isEmpty()) { logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId); streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this); } else if (storeToValueProviders.containsKey(segmentName)) { throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId + " has been already added. This is a bug in Kafka Streams. " + "Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues"); } verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics); logger.debug("Adding value providers for store {} of task {}", segmentName, taskId); storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics)); }
@Test public void shouldThrowIfCacheToAddIsNullButExistingCacheIsNotNull() { recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, null, statisticsToAdd1); final Throwable exception = assertThrows( IllegalStateException.class, () -> recorder.addValueProviders(SEGMENT_STORE_NAME_2, dbToAdd2, cacheToAdd1, statisticsToAdd1) ); assertThat( exception.getMessage(), is("Cache for segment " + SEGMENT_STORE_NAME_2 + " of task " + TASK_ID1 + " is not null although the cache of another segment in this metrics recorder is null. " + "This is a bug in Kafka Streams. " + "Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues") ); }
static void process(int maxMessages, MessageFormatter formatter, ConsumerWrapper consumer, PrintStream output, boolean rejectMessageOnError, AcknowledgeType acknowledgeType) { while (messageCount < maxMessages || maxMessages == -1) { ConsumerRecord<byte[], byte[]> msg; try { msg = consumer.receive(); } catch (WakeupException we) { LOG.trace("Caught WakeupException because consumer is shutdown, ignore and terminate."); // Consumer will be closed return; } catch (Throwable t) { LOG.error("Error processing message, terminating consumer process: ", t); // Consumer will be closed return; } messageCount += 1; try { formatter.writeTo(new ConsumerRecord<>(msg.topic(), msg.partition(), msg.offset(), msg.timestamp(), msg.timestampType(), 0, 0, msg.key(), msg.value(), msg.headers(), Optional.empty()), output); consumer.acknowledge(msg, acknowledgeType); } catch (Throwable t) { if (rejectMessageOnError) { LOG.error("Error processing message, rejecting this message: ", t); consumer.acknowledge(msg, AcknowledgeType.REJECT); } else { // Consumer will be closed throw t; } } if (checkErr(output)) { // Consumer will be closed return; } } }
@Test public void shouldStopWhenOutputCheckErrorFails() { ConsoleShareConsumer.ConsumerWrapper consumer = mock(ConsoleShareConsumer.ConsumerWrapper.class); MessageFormatter formatter = mock(MessageFormatter.class); PrintStream printStream = mock(PrintStream.class); ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("foo", 1, 1, new byte[0], new byte[0]); when(consumer.receive()).thenReturn(record); //Simulate an error on System.out after the first record has been printed when(printStream.checkError()).thenReturn(true); ConsoleShareConsumer.process(-1, formatter, consumer, printStream, true, AcknowledgeType.ACCEPT); verify(formatter).writeTo(any(), eq(printStream)); verify(consumer).receive(); verify(printStream).checkError(); consumer.cleanup(); }
public static String trim(String input) { if (input == null) { return null; } return input.strip(); }
@Test void testTrim() { // Test with null input assertNull(StringUtil.trim(null)); // Test with an empty string assertEquals("", StringUtil.trim("")); // Test with spaces assertEquals("Hello, World!", StringUtil.trim(" Hello, World! ")); // Test with Unicode spaces // Unicode character for em quad (\u2001) used for demonstration assertEquals("Hello, World!", StringUtil.trim("\u2001Hello, World!\u2001")); // Test with tabs and newlines assertEquals("Hello, World!", StringUtil.trim("\t\nHello, World!\t\n")); // Test with only leading spaces assertEquals("Hello, World!", StringUtil.trim(" Hello, World!")); // Test with only trailing spaces assertEquals("Hello, World!", StringUtil.trim("Hello, World! ")); // Test with a mix of spaces and Unicode characters assertEquals("Hello, World!", StringUtil.trim(" \tHello, World!\u2001 ")); // Test with a string containing only spaces and Unicode characters assertEquals("", StringUtil.trim(" \t\u2001 ")); }
public static StreamDecoder create(Decoder iteratorDecoder) { return new StreamDecoder(iteratorDecoder, null); }
@Test void simpleStreamTest() { MockWebServer server = new MockWebServer(); server.enqueue(new MockResponse().setBody("foo\nbar")); StreamInterface api = Feign.builder() .decoder(StreamDecoder.create( (response, type) -> new BufferedReader(response.body().asReader(UTF_8)).lines() .iterator())) .doNotCloseAfterDecode() .target(StreamInterface.class, server.url("/").toString()); try (Stream<String> stream = api.get()) { assertThat(stream.collect(Collectors.toList())).isEqualTo(Arrays.asList("foo", "bar")); } }
@Override public void loadPartitions(List<String> partitionPaths) { execute( () -> { preferredView.loadPartitions(partitionPaths); return null; }, () -> { getSecondaryView().loadPartitions(partitionPaths); return null; }); }
@Test public void testLoadPartitions() { String partitionPath = "/table2"; fsView.loadPartitions(Collections.singletonList(partitionPath)); verify(primary, times(1)).loadPartitions(Collections.singletonList(partitionPath)); verify(secondary, never()).loadPartitions(any()); verify(secondaryViewSupplier, never()).get(); resetMocks(); when(secondaryViewSupplier.get()).thenReturn(secondary); doThrow(new RuntimeException()).when(primary).loadPartitions(Collections.singletonList(partitionPath)); fsView.loadPartitions(Collections.singletonList(partitionPath)); verify(primary, times(1)).loadPartitions(Collections.singletonList(partitionPath)); verify(secondary, times(1)).loadPartitions(Collections.singletonList(partitionPath)); resetMocks(); doThrow(new RuntimeException()).when(secondary).loadPartitions(Collections.singletonList(partitionPath)); assertThrows(RuntimeException.class, () -> { fsView.loadPartitions(Collections.singletonList(partitionPath)); }); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String sqlServerType = typeDefine.getDataType().toUpperCase(); switch (sqlServerType) { case SQLSERVER_BIT: builder.sourceType(SQLSERVER_BIT); builder.dataType(BasicType.BOOLEAN_TYPE); break; case SQLSERVER_TINYINT: case SQLSERVER_TINYINT_IDENTITY: builder.sourceType(SQLSERVER_TINYINT); builder.dataType(BasicType.SHORT_TYPE); break; case SQLSERVER_SMALLINT: case SQLSERVER_SMALLINT_IDENTITY: builder.sourceType(SQLSERVER_SMALLINT); builder.dataType(BasicType.SHORT_TYPE); break; case SQLSERVER_INTEGER: case SQLSERVER_INTEGER_IDENTITY: case SQLSERVER_INT: case SQLSERVER_INT_IDENTITY: builder.sourceType(SQLSERVER_INT); builder.dataType(BasicType.INT_TYPE); break; case SQLSERVER_BIGINT: case SQLSERVER_BIGINT_IDENTITY: builder.sourceType(SQLSERVER_BIGINT); builder.dataType(BasicType.LONG_TYPE); break; case SQLSERVER_REAL: builder.sourceType(SQLSERVER_REAL); builder.dataType(BasicType.FLOAT_TYPE); break; case SQLSERVER_FLOAT: if (typeDefine.getPrecision() != null && typeDefine.getPrecision() <= 24) { builder.sourceType(SQLSERVER_REAL); builder.dataType(BasicType.FLOAT_TYPE); } else { builder.sourceType(SQLSERVER_FLOAT); builder.dataType(BasicType.DOUBLE_TYPE); } break; case SQLSERVER_DECIMAL: case SQLSERVER_NUMERIC: builder.sourceType( String.format( "%s(%s,%s)", SQLSERVER_DECIMAL, typeDefine.getPrecision(), typeDefine.getScale())); builder.dataType( new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case SQLSERVER_MONEY: builder.sourceType(SQLSERVER_MONEY); builder.dataType( new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case SQLSERVER_SMALLMONEY: builder.sourceType(SQLSERVER_SMALLMONEY); builder.dataType( new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale())); builder.columnLength(typeDefine.getPrecision()); builder.scale(typeDefine.getScale()); break; case SQLSERVER_CHAR: builder.sourceType(String.format("%s(%s)", SQLSERVER_CHAR, typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); break; case SQLSERVER_NCHAR: builder.sourceType( String.format("%s(%s)", SQLSERVER_NCHAR, typeDefine.getLength())); builder.dataType(BasicType.STRING_TYPE); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); break; case SQLSERVER_VARCHAR: if (typeDefine.getLength() == -1) { builder.sourceType(MAX_VARCHAR); builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1)); } else { builder.sourceType( String.format("%s(%s)", SQLSERVER_VARCHAR, typeDefine.getLength())); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); } builder.dataType(BasicType.STRING_TYPE); break; case SQLSERVER_NVARCHAR: if (typeDefine.getLength() == -1) { builder.sourceType(MAX_NVARCHAR); builder.columnLength(TypeDefineUtils.doubleByteTo4ByteLength(POWER_2_31 - 1)); } else { builder.sourceType( String.format("%s(%s)", SQLSERVER_NVARCHAR, typeDefine.getLength())); builder.columnLength( TypeDefineUtils.doubleByteTo4ByteLength(typeDefine.getLength())); } builder.dataType(BasicType.STRING_TYPE); break; case SQLSERVER_TEXT: builder.sourceType(SQLSERVER_TEXT); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_31 - 1); break; case SQLSERVER_NTEXT: builder.sourceType(SQLSERVER_NTEXT); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_30 - 1); break; case SQLSERVER_XML: builder.sourceType(SQLSERVER_XML); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_31 - 1); break; case SQLSERVER_UNIQUEIDENTIFIER: builder.sourceType(SQLSERVER_UNIQUEIDENTIFIER); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(TypeDefineUtils.charTo4ByteLength(typeDefine.getLength())); break; case SQLSERVER_SQLVARIANT: builder.sourceType(SQLSERVER_SQLVARIANT); builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); break; case SQLSERVER_BINARY: builder.sourceType( String.format("%s(%s)", SQLSERVER_BINARY, typeDefine.getLength())); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(typeDefine.getLength()); break; case SQLSERVER_VARBINARY: if (typeDefine.getLength() == -1) { builder.sourceType(MAX_VARBINARY); builder.columnLength(POWER_2_31 - 1); } else { builder.sourceType( String.format("%s(%s)", SQLSERVER_VARBINARY, typeDefine.getLength())); builder.columnLength(typeDefine.getLength()); } builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case SQLSERVER_IMAGE: builder.sourceType(SQLSERVER_IMAGE); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_31 - 1); break; case SQLSERVER_TIMESTAMP: builder.sourceType(SQLSERVER_TIMESTAMP); builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(8L); break; case SQLSERVER_DATE: builder.sourceType(SQLSERVER_DATE); builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case SQLSERVER_TIME: builder.sourceType(String.format("%s(%s)", SQLSERVER_TIME, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case SQLSERVER_DATETIME: builder.sourceType(SQLSERVER_DATETIME); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(3); break; case SQLSERVER_DATETIME2: builder.sourceType( String.format("%s(%s)", SQLSERVER_DATETIME2, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case SQLSERVER_DATETIMEOFFSET: builder.sourceType( String.format("%s(%s)", SQLSERVER_DATETIMEOFFSET, typeDefine.getScale())); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case SQLSERVER_SMALLDATETIME: builder.sourceType(SQLSERVER_SMALLDATETIME); builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.SQLSERVER, sqlServerType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertTime() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("time") .dataType("time") .scale(3) .build(); Column column = SqlServerTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getScale(), column.getScale()); Assertions.assertEquals( String.format("%s(%s)", typeDefine.getDataType(), typeDefine.getScale()), column.getSourceType().toLowerCase()); }
public EvictionConfig getEvictionConfig() { return evictionConfig; }
@Test public void testMaxSize_whenValueIsPositive_thenSetValue() { config.getEvictionConfig().setSize(4531); assertEquals(4531, config.getEvictionConfig().getSize()); }
void forwardToStateService(DeviceStateServiceMsgProto deviceStateServiceMsg, TbCallback callback) { if (statsEnabled) { stats.log(deviceStateServiceMsg); } stateService.onQueueMsg(deviceStateServiceMsg, callback); }
@Test public void givenProcessingSuccess_whenForwardingDisconnectMsgToStateService_thenOnSuccessCallbackIsCalled() { // GIVEN var disconnectMsg = TransportProtos.DeviceDisconnectProto.newBuilder() .setTenantIdMSB(tenantId.getId().getMostSignificantBits()) .setTenantIdLSB(tenantId.getId().getLeastSignificantBits()) .setDeviceIdMSB(deviceId.getId().getMostSignificantBits()) .setDeviceIdLSB(deviceId.getId().getLeastSignificantBits()) .setLastDisconnectTime(time) .build(); doCallRealMethod().when(defaultTbCoreConsumerServiceMock).forwardToStateService(disconnectMsg, tbCallbackMock); // WHEN defaultTbCoreConsumerServiceMock.forwardToStateService(disconnectMsg, tbCallbackMock); // THEN then(stateServiceMock).should().onDeviceDisconnect(tenantId, deviceId, time); then(tbCallbackMock).should().onSuccess(); then(tbCallbackMock).should(never()).onFailure(any()); }
public static FailoverStrategy.Factory loadFailoverStrategyFactory(final Configuration config) { checkNotNull(config); final String strategyParam = config.get(JobManagerOptions.EXECUTION_FAILOVER_STRATEGY); switch (strategyParam.toLowerCase()) { case FULL_RESTART_STRATEGY_NAME: return new RestartAllFailoverStrategy.Factory(); case PIPELINED_REGION_RESTART_STRATEGY_NAME: return new RestartPipelinedRegionFailoverStrategy.Factory(); default: throw new IllegalConfigurationException( "Unknown failover strategy: " + strategyParam); } }
@Test void testLoadRestartAllStrategyFactory() { final Configuration config = new Configuration(); config.set( JobManagerOptions.EXECUTION_FAILOVER_STRATEGY, FailoverStrategyFactoryLoader.FULL_RESTART_STRATEGY_NAME); assertThat(FailoverStrategyFactoryLoader.loadFailoverStrategyFactory(config)) .isInstanceOf(RestartAllFailoverStrategy.Factory.class); }
public static ObjectMapper createObjectMapper() { final ObjectMapper objectMapper = new ObjectMapper(); registerModules(objectMapper); return objectMapper; }
@Test void testObjectMappeDateTimeSupportedEnabled() throws Exception { final ObjectMapper mapper = JacksonMapperFactory.createObjectMapper(); final String instantString = "2022-08-07T12:00:33.107787800Z"; final Instant instant = Instant.parse(instantString); final String instantJson = String.format("{\"data\":\"%s\"}", instantString); assertThat(mapper.writeValueAsString(new TypeWithInstant(instant))).isEqualTo(instantJson); assertThat(mapper.readValue(instantJson, TypeWithInstant.class).data).isEqualTo(instant); }
public static boolean isDone( Map<String, StepTransition> runtimeDag, Map<String, Boolean> idStatusMap, RestartConfig restartConfig) { Map<String, Set<String>> parentMap = new HashMap<>(); Map<String, Set<String>> childMap = new HashMap<>(); Deque<String> deque = prepareDagForTraversal( runtimeDag, idStatusMap.keySet(), restartConfig, parentMap, childMap); return isDone(idStatusMap, deque, parentMap, childMap); }
@Test public void testRestartDoneWithFailedStep() { Map<String, Boolean> idStatusMap = new LinkedHashMap<>(); idStatusMap.put("job_3", Boolean.FALSE); idStatusMap.put("job_9", Boolean.TRUE); idStatusMap.put("job_8", Boolean.TRUE); Assert.assertTrue( DagHelper.isDone( runtimeDag1, idStatusMap, RestartConfig.builder().addRestartNode("sample-dag-test-1", 1, "job_9").build())); }
public static String lastElement(List<String> strings) { checkArgument(!strings.isEmpty(), "empty list"); return strings.get(strings.size() - 1); }
@Test public void testLastElementDouble() { assertEquals("2", lastElement(l("first", "2"))); }
@Override public boolean putIfAbsent(K key, V value) { return cache.putIfAbsent(key, value); }
@Test public void testPutIfAbsent() { cache.put(42, "oldValue"); assertTrue(adapter.putIfAbsent(23, "newValue")); assertFalse(adapter.putIfAbsent(42, "newValue")); assertEquals("newValue", cache.get(23)); assertEquals("oldValue", cache.get(42)); }
public long computeMemorySize(double fraction) { validateFraction(fraction); return (long) Math.floor(memoryBudget.getTotalMemorySize() * fraction); }
@Test void testComputeMemorySizeFailForTooLargeFraction() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> memoryManager.computeMemorySize(1.1)); }
@Override public Object apply(Object input) { return PropertyOrFieldSupport.EXTRACTION.getValueOf(propertyOrFieldName, input); }
@Test void should_ignore_property_with_bare_name_method_when_disabled() { try { // GIVEN Introspection.setExtractBareNamePropertyMethods(false); BareOptionalIntHolder holder = new BareOptionalIntHolder(42); ByNameSingleExtractor underTest = new ByNameSingleExtractor("value"); // WHEN Object result = underTest.apply(holder); // THEN then(result).isEqualTo(42); } finally { Introspection.setExtractBareNamePropertyMethods(true); } }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("s3.listing.chunksize")); }
@Test(expected = NotfoundException.class) public void testListNotfound() throws Exception { final Path container = new Path("notfound.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); new SpectraObjectListService(session).list(container, new DisabledListProgressListener()); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test public void testVisit13() { String s12 = "{\"type\": \"int\"}"; Assert.assertEquals("\"int\".", Schemas.visit(new Schema.Parser().parse(s12), new TestVisitor() { @Override public SchemaVisitorAction visitTerminal(Schema terminal) { sb.append(terminal).append('.'); return SchemaVisitorAction.SKIP_SIBLINGS; } })); }
@Override public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponseData response) { if (response.errorCode() != Errors.NONE.code()) { String errorMessage = String.format( "Unexpected error in Heartbeat response. Expected no error, but received: %s", Errors.forCode(response.errorCode()) ); throw new IllegalArgumentException(errorMessage); } MemberState state = state(); if (state == MemberState.LEAVING) { log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " + "already leaving the group.", memberId, memberEpoch); return; } if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; } if (isNotInGroup()) { log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" + " so it's not a member of the group. ", memberId, state); return; } // Update the group member id label in the client telemetry reporter if the member id has // changed. Initially the member id is empty, and it is updated when the member joins the // group. This is done here to avoid updating the label on every heartbeat response. Also // check if the member id is null, as the schema defines it as nullable. if (response.memberId() != null && !response.memberId().equals(memberId)) { clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels( Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId()))); } this.memberId = response.memberId(); updateMemberEpoch(response.memberEpoch()); ConsumerGroupHeartbeatResponseData.Assignment assignment = response.assignment(); if (assignment != null) { if (!state.canHandleNewAssignment()) { // New assignment received but member is in a state where it cannot take new // assignments (ex. preparing to leave the group) log.debug("Ignoring new assignment {} received from server because member is in {} state.", assignment, state); return; } Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>(); assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions()))); processAssignmentReceived(newAssignment); } }
@Test public void testTransitionToReconcilingIfEmptyAssignmentReceived() { ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(); assertEquals(MemberState.JOINING, membershipManager.state()); ConsumerGroupHeartbeatResponse responseWithoutAssignment = createConsumerGroupHeartbeatResponse(new Assignment()); membershipManager.onHeartbeatSuccess(responseWithoutAssignment.data()); assertEquals(MemberState.RECONCILING, membershipManager.state()); ConsumerGroupHeartbeatResponse responseWithAssignment = createConsumerGroupHeartbeatResponse(createAssignment(true)); membershipManager.onHeartbeatSuccess(responseWithAssignment.data()); assertEquals(MemberState.RECONCILING, membershipManager.state()); }
@Override @SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:MethodLength"}) protected IdentifiedDataSerializable getConfig() { MapConfig config = new MapConfig(parameters.name); config.setAsyncBackupCount(parameters.asyncBackupCount); config.setBackupCount(parameters.backupCount); config.setCacheDeserializedValues(CacheDeserializedValues.valueOf(parameters.cacheDeserializedValues)); if (parameters.listenerConfigs != null && !parameters.listenerConfigs.isEmpty()) { config.setEntryListenerConfigs( (List<EntryListenerConfig>) adaptListenerConfigs(parameters.listenerConfigs, parameters.userCodeNamespace)); } if (parameters.merkleTreeConfig != null) { config.setMerkleTreeConfig(parameters.merkleTreeConfig); } if (parameters.eventJournalConfig != null) { config.setEventJournalConfig(parameters.eventJournalConfig); } if (parameters.hotRestartConfig != null) { config.setHotRestartConfig(parameters.hotRestartConfig); } config.setInMemoryFormat(InMemoryFormat.valueOf(parameters.inMemoryFormat)); config.setAttributeConfigs(parameters.attributeConfigs); config.setReadBackupData(parameters.readBackupData); config.setStatisticsEnabled(parameters.statisticsEnabled); config.setPerEntryStatsEnabled(parameters.perEntryStatsEnabled); config.setIndexConfigs(parameters.indexConfigs); if (parameters.mapStoreConfig != null) { config.setMapStoreConfig(parameters.mapStoreConfig.asMapStoreConfig(serializationService, parameters.userCodeNamespace)); } config.setTimeToLiveSeconds(parameters.timeToLiveSeconds); config.setMaxIdleSeconds(parameters.maxIdleSeconds); if (parameters.evictionConfig != null) { config.setEvictionConfig(parameters.evictionConfig.asEvictionConfig(serializationService)); } if (parameters.mergePolicy != null) { config.setMergePolicyConfig(mergePolicyConfig(parameters.mergePolicy, parameters.mergeBatchSize)); } if (parameters.nearCacheConfig != null) { config.setNearCacheConfig(parameters.nearCacheConfig.asNearCacheConfig(serializationService)); } config.setPartitioningStrategyConfig(getPartitioningStrategyConfig()); if (parameters.partitionLostListenerConfigs != null && !parameters.partitionLostListenerConfigs.isEmpty()) { config.setPartitionLostListenerConfigs( (List<MapPartitionLostListenerConfig>) adaptListenerConfigs(parameters.partitionLostListenerConfigs, parameters.userCodeNamespace)); } config.setSplitBrainProtectionName(parameters.splitBrainProtectionName); if (parameters.queryCacheConfigs != null && !parameters.queryCacheConfigs.isEmpty()) { List<QueryCacheConfig> queryCacheConfigs = new ArrayList<>(parameters.queryCacheConfigs.size()); for (QueryCacheConfigHolder holder : parameters.queryCacheConfigs) { queryCacheConfigs.add(holder.asQueryCacheConfig(serializationService, parameters.userCodeNamespace)); } config.setQueryCacheConfigs(queryCacheConfigs); } config.setWanReplicationRef(parameters.wanReplicationRef); config.setMetadataPolicy(MetadataPolicy.getById(parameters.metadataPolicy)); if (parameters.isDataPersistenceConfigExists) { config.setDataPersistenceConfig(parameters.dataPersistenceConfig); } if (parameters.isTieredStoreConfigExists) { config.setTieredStoreConfig(parameters.tieredStoreConfig); } if (parameters.isPartitioningAttributeConfigsExists) { config.setPartitioningAttributeConfigs(parameters.partitioningAttributeConfigs); } if (parameters.isUserCodeNamespaceExists) { config.setUserCodeNamespace(parameters.userCodeNamespace); } return config; }
@Test public void testPartitioningAttributeConfigsTransmittedCorrectly() { MapConfig mapConfig = new MapConfig("my-map"); mapConfig.setPartitioningAttributeConfigs(Arrays.asList( new PartitioningAttributeConfig("attr1"), new PartitioningAttributeConfig("attr2") )); ClientMessage addMapConfigClientMessage = DynamicConfigAddMapConfigCodec.encodeRequest( mapConfig.getName(), mapConfig.getBackupCount(), mapConfig.getAsyncBackupCount(), mapConfig.getTimeToLiveSeconds(), mapConfig.getMaxIdleSeconds(), null, mapConfig.isReadBackupData(), mapConfig.getCacheDeserializedValues().name(), mapConfig.getMergePolicyConfig().getPolicy(), mapConfig.getMergePolicyConfig().getBatchSize(), mapConfig.getInMemoryFormat().name(), null, null, mapConfig.isStatisticsEnabled(), null, null, null, null, null, null, null, null, null, null, null, null, mapConfig.getMetadataPolicy().getId(), mapConfig.isPerEntryStatsEnabled(), mapConfig.getDataPersistenceConfig(), mapConfig.getTieredStoreConfig(), mapConfig.getPartitioningAttributeConfigs(), mapConfig.getUserCodeNamespace() ); AddMapConfigMessageTask addMapConfigMessageTask = createMessageTask(addMapConfigClientMessage); addMapConfigMessageTask.run(); MapConfig transmittedMapConfig = (MapConfig) addMapConfigMessageTask.getConfig(); assertEquals(mapConfig, transmittedMapConfig); }
public static ApplicationContextInitializer<ConfigurableApplicationContext> dynamicConfigPropertiesInitializer() { return appCtx -> new DynamicConfigOperations(appCtx) .loadDynamicPropertySource() .ifPresent(source -> appCtx.getEnvironment().getPropertySources().addFirst(source)); }
@Test void initializerAddsDynamicPropertySourceIfAllEnvVarsAreSet() throws Exception { Path propsFilePath = tmpDir.resolve("props.yaml"); Files.writeString(propsFilePath, SAMPLE_YAML_CONFIG, StandardOpenOption.CREATE); MutablePropertySources propertySources = new MutablePropertySources(); propertySources.addFirst(new MapPropertySource("test", Map.of("testK", "testV"))); when(envMock.getPropertySources()).thenReturn(propertySources); mockEnvWithVars(Map.of( DYNAMIC_CONFIG_ENABLED_ENV_PROPERTY, "true", DYNAMIC_CONFIG_PATH_ENV_PROPERTY, propsFilePath.toString() )); DynamicConfigOperations.dynamicConfigPropertiesInitializer().initialize(ctxMock); assertThat(propertySources.size()).isEqualTo(2); assertThat(propertySources.stream()) .element(0) .extracting(PropertySource::getName) .isEqualTo("dynamicProperties"); }
@Override /** * Parses the given text to transform it to the desired target type. * @param text The LLM output in string format. * @return The parsed output in the desired target type. */ public T convert(@NonNull String text) { try { // Remove leading and trailing whitespace text = text.trim(); // Check for and remove triple backticks and "json" identifier if (text.startsWith("```") && text.endsWith("```")) { // Remove the first line if it contains "```json" String[] lines = text.split("\n", 2); if (lines[0].trim().equalsIgnoreCase("```json")) { text = lines.length > 1 ? lines[1] : ""; } else { text = text.substring(3); // Remove leading ``` } // Remove trailing ``` text = text.substring(0, text.length() - 3); // Trim again to remove any potential whitespace text = text.trim(); } return (T) this.objectMapper.readValue(text, this.typeRef); } catch (JsonProcessingException e) { logger.error("Could not parse the given text to the desired target type:" + text + " into " + this.typeRef); throw new RuntimeException(e); } }
@Test public void convertClassTypeWithJsonAnnotations() { var converter = new BeanOutputConverter<>(TestClassWithJsonAnnotations.class); var testClass = converter.convert("{ \"string_property\": \"some value\" }"); assertThat(testClass.getSomeString()).isEqualTo("some value"); }
@Override public ClientDetailsEntity saveNewClient(ClientDetailsEntity client) { if (client.getId() != null) { // if it's not null, it's already been saved, this is an error throw new IllegalArgumentException("Tried to save a new client with an existing ID: " + client.getId()); } if (client.getRegisteredRedirectUri() != null) { for (String uri : client.getRegisteredRedirectUri()) { if (blacklistedSiteService.isBlacklisted(uri)) { throw new IllegalArgumentException("Client URI is blacklisted: " + uri); } } } // assign a random clientid if it's empty // NOTE: don't assign a random client secret without asking, since public clients have no secret if (Strings.isNullOrEmpty(client.getClientId())) { client = generateClientId(client); } // make sure that clients with the "refresh_token" grant type have the "offline_access" scope, and vice versa ensureRefreshTokenConsistency(client); // make sure we don't have both a JWKS and a JWKS URI ensureKeyConsistency(client); // check consistency when using HEART mode checkHeartMode(client); // timestamp this to right now client.setCreatedAt(new Date()); // check the sector URI checkSectorIdentifierUri(client); ensureNoReservedScopes(client); ClientDetailsEntity c = clientRepository.saveClient(client); statsService.resetCache(); return c; }
@Test(expected = IllegalArgumentException.class) public void saveNewClient_blacklisted() { ClientDetailsEntity client = Mockito.mock(ClientDetailsEntity.class); Mockito.when(client.getId()).thenReturn(null); String badUri = "badplace.xxx"; Mockito.when(blacklistedSiteService.isBlacklisted(badUri)).thenReturn(true); Mockito.when(client.getRegisteredRedirectUri()).thenReturn(Sets.newHashSet(badUri)); service.saveNewClient(client); }
public File getDatabaseFile(String filename) { File dbFile = null; if (filename != null && filename.trim().length() > 0) { dbFile = new File(filename); } if (dbFile == null || dbFile.isDirectory()) { dbFile = new File(new AndroidContextUtil().getDatabasePath("logback.db")); } return dbFile; }
@Test public void dirAsFilenameResultsInDefault() throws IOException { final File file = appender.getDatabaseFile(tmp.newFolder().getAbsolutePath()); assertThat(file, is(notNullValue())); assertThat(file.getName(), is("logback.db")); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatMap() { final SqlMap map = SqlTypes.map(SqlTypes.INTEGER, SqlTypes.BIGINT); assertThat(ExpressionFormatter.formatExpression(new Type(map)), equalTo("MAP<INTEGER, BIGINT>")); }
public static Token of(TokenDomain domain, String secretTokenString) { return new Token(domain, secretTokenString); }
@Test void tokens_are_equality_comparable() { var td1 = TokenDomain.of("hash 1"); var td2 = TokenDomain.of("hash 2"); var td1_t1 = Token.of(td1, "foo"); var td1_t2 = Token.of(td1, "foo"); var td1_t3 = Token.of(td1, "bar"); var td2_t1 = Token.of(td2, "foo"); // Tokens in same domain with same content are equal assertEquals(td1_t1, td1_t2); // Tokens in same domain with different content are not equal assertNotEquals(td1_t1, td1_t3); // Tokens in different domains are not considered equal assertNotEquals(td1_t1, td2_t1); }
public static Validator validUrl() { return (name, val) -> { if (!(val instanceof String)) { throw new IllegalArgumentException("validator should only be used with STRING defs"); } try { new URL((String)val); } catch (final Exception e) { throw new ConfigException(name, val, "Not valid URL: " + e.getMessage()); } }; }
@Test public void shouldNotThrowOnValidURL() { // Given: final Validator validator = ConfigValidators.validUrl(); // When: validator.ensureValid("propName", "http://valid:25896/somePath"); // Then: did not throw. }
static URITypes findURIType(final StringReader reader) { final StringReader copy = reader.copy(); if(!copy.endOfString()) { char c = (char) copy.read(); if(c == '/') { reader.skip(1); if(!copy.endOfString()) { c = (char) copy.read(); if(c == '/') { reader.skip(1); return URITypes.Authority; } reader.skip(-1); } return URITypes.Absolute; } return URITypes.Rootless; } return URITypes.Undefined; }
@Test public void testFindUriType() { final Map<String, HostParser.URITypes> tests = ImmutableMap.<String, HostParser.URITypes>builder() .put("/path", HostParser.URITypes.Absolute) .put("user@domain/path", HostParser.URITypes.Rootless) .put("//user@domain.tld:port/path", HostParser.URITypes.Authority) .put("", HostParser.URITypes.Undefined).build(); for(Map.Entry<String, HostParser.URITypes> entry : tests.entrySet()) { final HostParser.StringReader reader = new HostParser.StringReader(entry.getKey()); assertEquals(HostParser.findURIType(reader), entry.getValue()); } }
public static OffsetBasedPagination forOffset(int offset, int pageSize) { checkArgument(offset >= 0, "offset must be >= 0"); checkArgument(pageSize >= 1, "page size must be >= 1"); return new OffsetBasedPagination(offset, pageSize); }
@Test void forOffset_whenNegativePageSize_shouldfailsWithIAE() { assertThatThrownBy(() -> OffsetBasedPagination.forOffset(1, -1)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("page size must be >= 1"); }
@Udf public String trim( @UdfParameter( description = "The string to trim") final String input) { if (input == null) { return null; } return input.trim(); }
@Test public void shouldReturnEmptyForWhitespaceInput() { final String result = udf.trim(" \t "); assertThat(result, is("")); }