focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
static String formatDuration(long durationInMinutes) { if (durationInMinutes == 0) { return ZERO; } double days = (double) durationInMinutes / DURATION_HOURS_IN_DAY / DURATION_OF_ONE_HOUR_IN_MINUTES; if (days > DURATION_ALMOST_ONE) { return format(DURATION_DAYS_FORMAT, Math.round(days)); } double remainingDuration = durationInMinutes - (Math.floor(days) * DURATION_HOURS_IN_DAY * DURATION_OF_ONE_HOUR_IN_MINUTES); double hours = remainingDuration / DURATION_OF_ONE_HOUR_IN_MINUTES; if (hours > DURATION_ALMOST_ONE) { return format(DURATION_HOURS_FORMAT, Math.round(hours)); } double minutes = remainingDuration - (Math.floor(hours) * DURATION_OF_ONE_HOUR_IN_MINUTES); return format(DURATION_MINUTES_FORMAT, Math.round(minutes)); }
@Test public void format_duration() { assertThat(formatDuration(0)).isEqualTo("0"); assertThat(formatDuration(ONE_DAY)).isEqualTo("1d"); assertThat(formatDuration(ONE_HOUR)).isEqualTo("1h"); assertThat(formatDuration(ONE_MINUTE)).isEqualTo("1min"); assertThat(formatDuration(5 * ONE_DAY)).isEqualTo("5d"); assertThat(formatDuration(2 * ONE_HOUR)).isEqualTo("2h"); assertThat(formatDuration(ONE_MINUTE)).isEqualTo("1min"); assertThat(formatDuration(5 * ONE_DAY + 3 * ONE_HOUR)).isEqualTo("5d"); assertThat(formatDuration(3 * ONE_HOUR + 25 * ONE_MINUTE)).isEqualTo("3h"); assertThat(formatDuration(5 * ONE_DAY + 3 * ONE_HOUR + 40 * ONE_MINUTE)).isEqualTo("5d"); }
@Override public Set<Port> ports() { return ImmutableSet.copyOf(osNetworkStore.ports()); }
@Test public void testGetPortsByNetworkId() { createBasicNetworks(); assertEquals("Number of port did not match", 1, target.ports(NETWORK_ID).size()); assertEquals("Number of port did not match", 0, target.ports(UNKNOWN_ID).size()); }
private CoordinatorResult<ShareGroupHeartbeatResponseData, CoordinatorRecord> shareGroupHeartbeat( String groupId, String memberId, int memberEpoch, String rackId, String clientId, String clientHost, List<String> subscribedTopicNames ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the share group. boolean createIfNotExists = memberEpoch == 0; final ShareGroup group = getOrMaybeCreatePersistedShareGroup(groupId, createIfNotExists); throwIfShareGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); ShareGroupMember member = getOrMaybeSubscribeShareGroupMember( group, memberId, memberEpoch, createIfNotExists ); // 1. Create or update the member. If the member is new or has changed, a ShareGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ShareGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ShareGroupMetadataValue record to the partition. ShareGroupMember updatedMember = new ShareGroupMember.Builder(member) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. Map<String, Integer> subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newShareGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newShareGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); } group.setMetadataRefreshDeadline(currentTimeMs + shareGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, targetAssignmentEpoch, targetAssignment, records ); scheduleShareGroupSessionTimeout(groupId, memberId); // Prepare the response. ShareGroupHeartbeatResponseData response = new ShareGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(shareGroupHeartbeatIntervalMs); // The assignment is only provided in the following cases: // 1. The member just joined or rejoined to group (epoch equals to zero); // 2. The member's assignment has been updated. if (memberEpoch == 0 || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createShareGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testShareGroupHeartbeatRequestValidation() { MockPartitionAssignor assignor = new MockPartitionAssignor("share"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withShareGroupAssignor(assignor) .build(); Exception ex; // GroupId must be present in all requests. ex = assertThrows(InvalidRequestException.class, () -> context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData())); assertEquals("GroupId can't be empty.", ex.getMessage()); // GroupId can't be all whitespaces. ex = assertThrows(InvalidRequestException.class, () -> context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData() .setGroupId(" "))); assertEquals("GroupId can't be empty.", ex.getMessage()); // SubscribedTopicNames must be present and empty in the first request (epoch == 0). ex = assertThrows(InvalidRequestException.class, () -> context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData() .setGroupId("foo") .setMemberEpoch(0))); assertEquals("SubscribedTopicNames must be set in first request.", ex.getMessage()); // MemberId must be non-empty in all requests except for the first one where it // could be empty (epoch != 0). ex = assertThrows(InvalidRequestException.class, () -> context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData() .setGroupId("foo") .setMemberEpoch(1))); assertEquals("MemberId can't be empty.", ex.getMessage()); // RackId must be non-empty if provided in all requests. ex = assertThrows(InvalidRequestException.class, () -> context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData() .setGroupId("foo") .setMemberId(Uuid.randomUuid().toString()) .setMemberEpoch(1) .setRackId(""))); assertEquals("RackId can't be empty.", ex.getMessage()); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test @SuppressWarnings("unchecked") public void shouldReturnValuesForClosedEndBounds_fetchAll() { // Given: final Range<Instant> end = Range.closed( NOW, NOW.plusSeconds(10) ); final Range<Instant> startEqiv = Range.closed( end.lowerEndpoint().minus(WINDOW_SIZE), end.lowerEndpoint().minus(WINDOW_SIZE) ); final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> partitionResult = new StateQueryResult<>(); final QueryResult<KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> queryResult = QueryResult.forResult(keyValueIterator); queryResult.setPosition(POSITION); partitionResult.addResult(PARTITION, queryResult); when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult); when(keyValueIterator.hasNext()) .thenReturn(true, true, false); when(keyValueIterator.next()) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY, new TimeWindow(startEqiv.lowerEndpoint().toEpochMilli(), startEqiv.lowerEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_1)) .thenReturn(new KeyValue<>(new Windowed<>(A_KEY2, new TimeWindow(startEqiv.upperEndpoint().toEpochMilli(), startEqiv.upperEndpoint().toEpochMilli() + WINDOW_SIZE.toMillis())), VALUE_2)) .thenThrow(new AssertionError()); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(PARTITION, Range.all(), end); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is (WindowedRow.of( SCHEMA, windowedKey(startEqiv.lowerEndpoint()), VALUE_1.value(), VALUE_1.timestamp()))); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is(WindowedRow.of( SCHEMA, windowedKey(A_KEY2, startEqiv.upperEndpoint()), VALUE_2.value(), VALUE_2.timestamp()))); assertThat(rowIterator.hasNext(), is(false)); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
@Override public List<ServiceInstance> getInstances(String serviceName) { final List<ServiceRegistry> serviceRegistryListFiltered; { LocalDateTime healthTime = LocalDateTime.now() .minusSeconds(this.discoveryProperties.getHealthCheckIntervalInSecond()); List<ServiceRegistry> filterByHealthCheck = this.serviceRegistryService.findByServiceNameDataChangeLastModifiedTimeGreaterThan( serviceName, healthTime ); serviceRegistryListFiltered = filterByCluster(filterByHealthCheck, this.cluster); } return serviceRegistryListFiltered.stream() .map(DatabaseDiscoveryClientImpl::convert) .collect(Collectors.toList()); }
@Test void getInstances_filterByCluster() { final String serviceName = "a-service"; ServiceRegistryService serviceRegistryService = Mockito.mock(ServiceRegistryService.class); { List<ServiceRegistry> serviceRegistryList = Arrays.asList( newServiceRegistry(serviceName, "http://localhost:8081/", "1"), newServiceRegistry("b-service", "http://localhost:8082/", "2"), newServiceRegistry("c-service", "http://localhost:8082/", "3") ); Mockito.when( serviceRegistryService.findByServiceNameDataChangeLastModifiedTimeGreaterThan( eq(serviceName), any(LocalDateTime.class))) .thenReturn(serviceRegistryList); } DatabaseDiscoveryClient discoveryClient = new DatabaseDiscoveryClientImpl( serviceRegistryService, new ApolloServiceDiscoveryProperties(), "1" ); List<ServiceInstance> serviceInstances = discoveryClient.getInstances(serviceName); assertEquals(1, serviceInstances.size()); assertEquals(serviceName, serviceInstances.get(0).getServiceName()); assertEquals("1", serviceInstances.get(0).getCluster()); }
public ExceptionHandler exceptionHandler() { return this.exceptionHandler; }
@Test public void testExceptionHandler() { Blade blade = Blade.create(); ExceptionHandler exceptionHandler = mock(ExceptionHandler.class); blade.exceptionHandler(exceptionHandler); assertEquals(exceptionHandler, blade.exceptionHandler()); }
public void setExtraInfo(Map<String, String> extraInfo) { this.extraInfo = extraInfo; }
@Test void setExtraInfo() {}
public Record convert(final AbstractWALEvent event) { if (filter(event)) { return createPlaceholderRecord(event); } if (!(event instanceof AbstractRowEvent)) { return createPlaceholderRecord(event); } PipelineTableMetaData tableMetaData = getPipelineTableMetaData(((AbstractRowEvent) event).getTableName()); if (event instanceof WriteRowEvent) { return handleWriteRowEvent((WriteRowEvent) event, tableMetaData); } if (event instanceof UpdateRowEvent) { return handleUpdateRowEvent((UpdateRowEvent) event, tableMetaData); } if (event instanceof DeleteRowEvent) { return handleDeleteRowEvent((DeleteRowEvent) event, tableMetaData); } throw new UnsupportedSQLOperationException(""); }
@Test void assertConvertDeleteRowEvent() { Record record = walEventConverter.convert(mockDeleteRowEvent()); assertThat(record, instanceOf(DataRecord.class)); assertThat(((DataRecord) record).getType(), is(PipelineSQLOperationType.DELETE)); }
@Nullable @Override public Message decode(@Nonnull RawMessage rawMessage) { final String msg = new String(rawMessage.getPayload(), charset); try (Timer.Context ignored = this.decodeTime.time()) { final ResolvableInetSocketAddress address = rawMessage.getRemoteAddress(); final InetSocketAddress remoteAddress; if (address == null) { remoteAddress = null; } else { remoteAddress = address.getInetSocketAddress(); } return parse(msg, remoteAddress == null ? null : remoteAddress.getAddress(), rawMessage.getTimestamp()); } }
@Test public void testIssue2954() throws Exception { // https://github.com/Graylog2/graylog2-server/issues/2954 final RawMessage rawMessage = buildRawMessage("<6>2016-10-12T14:10:18Z hostname testmsg[20]: Test"); final Message message = codec.decode(rawMessage); assertNotNull(message); assertEquals("hostname testmsg[20]: Test", message.getMessage()); assertEquals(new DateTime(2016, 10, 12, 14, 10, 18, DateTimeZone.UTC), message.getTimestamp()); assertEquals("hostname", message.getSource()); assertEquals(6, message.getField("level")); assertEquals("kernel", message.getField("facility")); assertEquals(0, message.getField("facility_num")); }
@Override public List<Permission> getPermissionsForUser(User user) { final GRN principal = grnRegistry.ofUser(user); final ImmutableSet.Builder<Permission> permSet = ImmutableSet.<Permission>builder() .addAll(user.getPermissions().stream().map(CaseSensitiveWildcardPermission::new).collect(Collectors.toSet())) .addAll(permissionAndRoleResolver.resolvePermissionsForPrincipal(principal)) .addAll(getUserPermissionsFromRoles(user).stream().map(CaseSensitiveWildcardPermission::new).collect(Collectors.toSet())); return permSet.build().asList(); }
@Test public void testGetPermissionsForUser() throws Exception { final InMemoryRolePermissionResolver permissionResolver = mock(InMemoryRolePermissionResolver.class); final GRNRegistry grnRegistry = GRNRegistry.createWithBuiltinTypes(); final UserService userService = new UserServiceImpl(mongoConnection, configuration, roleService, accessTokenService, userFactory, permissionResolver, serverEventBus, grnRegistry, permissionAndRoleResolver); final UserImplFactory factory = new UserImplFactory(new Configuration(), permissions); final UserImpl user = factory.create(new HashMap<>()); user.setName("user"); final Role role = createRole("Foo"); user.setRoleIds(Collections.singleton(role.getId())); user.setPermissions(Collections.singletonList("hello:world")); when(permissionResolver.resolveStringPermission(role.getId())).thenReturn(Collections.singleton("foo:bar")); final GRNPermission ownerShipPermission = GRNPermission.create(RestPermissions.ENTITY_OWN, grnRegistry.newGRN(GRNTypes.DASHBOARD, "1234")); final GRN userGRN = grnRegistry.ofUser(user); when(permissionAndRoleResolver.resolvePermissionsForPrincipal(userGRN)) .thenReturn(ImmutableSet.of( new CaseSensitiveWildcardPermission("perm:from:grant"), ownerShipPermission)); final String roleId = "12345"; when(permissionAndRoleResolver.resolveRolesForPrincipal(userGRN)).thenReturn(ImmutableSet.of(roleId)); when(permissionResolver.resolveStringPermission(roleId)).thenReturn(ImmutableSet.of("perm:from:role")); assertThat(userService.getPermissionsForUser(user).stream().map(p -> p instanceof CaseSensitiveWildcardPermission ? p.toString() : p).collect(Collectors.toSet())) .containsExactlyInAnyOrder("users:passwordchange:user", "users:edit:user", "foo:bar", "hello:world", "users:tokenlist:user", "users:tokencreate:user", "users:tokenremove:user", "perm:from:grant", ownerShipPermission, "perm:from:role"); }
public static SqlPrimitiveType of(final String typeName) { switch (typeName.toUpperCase()) { case INT: return SqlPrimitiveType.of(SqlBaseType.INTEGER); case VARCHAR: return SqlPrimitiveType.of(SqlBaseType.STRING); default: try { final SqlBaseType sqlType = SqlBaseType.valueOf(typeName.toUpperCase()); return SqlPrimitiveType.of(sqlType); } catch (final IllegalArgumentException e) { throw new SchemaException("Unknown primitive type: " + typeName, e); } } }
@Test public void shouldThrowOnStructType() { // When: final Exception e = assertThrows( SchemaException.class, () -> SqlPrimitiveType.of(SqlBaseType.STRUCT) ); // Then: assertThat(e.getMessage(), containsString("Invalid primitive type: STRUCT")); }
static KiePMMLComparisonMeasure getKiePMMLComparisonMeasure(ComparisonMeasure comparisonMeasure) { return new KiePMMLComparisonMeasure(comparisonMeasureKindFrom(comparisonMeasure.getKind()), aggregateFunctionFrom(comparisonMeasure.getMeasure()), compareFunctionFrom(comparisonMeasure.getCompareFunction())); }
@Test void getKiePMMLComparisonMeasure() { ComparisonMeasure comparisonMeasure = new ComparisonMeasure(); getRandomEnum(ComparisonMeasure.Kind.values()); comparisonMeasure.setKind(getRandomEnum(ComparisonMeasure.Kind.values())); comparisonMeasure.setCompareFunction(getRandomEnum(CompareFunction.values())); Random random = new Random(); comparisonMeasure.setMinimum(random.nextInt(10)); comparisonMeasure.setMaximum(comparisonMeasure.getMinimum().intValue() + random.nextInt(10)); comparisonMeasure.setMeasure(new Euclidean()); KiePMMLComparisonMeasure retrieved = KiePMMLClusteringModelFactory.getKiePMMLComparisonMeasure(comparisonMeasure); assertThat(retrieved.getAggregateFunction()).isEqualTo(KiePMMLAggregateFunction.EUCLIDEAN); commonEvaluateKiePMMLComparisonMeasure(retrieved, comparisonMeasure); }
private Object[] computeFrameLocals() { return computeFrameLocals(argsSize, locals, newLocals, variableMapper); }
@Test public void testComplexSparkContextInit() { Object[] expected = { "org/apache/spark/SparkContext", "org/apache/spark/SparkConf", "java/util/concurrent/ConcurrentMap", "scala/Option", "java/lang/String", 0, 0, 1, "scala/Option", "scala/Tuple2", "org/apache/spark/scheduler/SchedulerBackend", "org/apache/spark/scheduler/TaskScheduler", "scala/Tuple2", "scala/Tuple2", "org/apache/spark/scheduler/SchedulerBackend", "org/apache/spark/scheduler/TaskScheduler", "scala/Option", 0, 1, "org/apache/spark/scheduler/SchedulerBackend", "scala/Option" }; VariableMapper mapper = new VariableMapper( 2, 21, new int[] { 0, 0, -2147483628, -2147483636, -2147483646, -2147483645, -2147483644, -2147483643, -2147483642, -2147483641, -2147483640, -2147483635, -2147483639, -2147483638, -2147483637, -2147483634, -2147483633, -2147483632, -2147483631, -2147483630, -2147483629, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }); Object[] fromFrame = { "org/apache/spark/SparkContext", "org/apache/spark/SparkConf", 0, 0, "scala/Option", "scala/Tuple2", "java/util/concurrent/ConcurrentMap", "scala/Option", "java/lang/String", 0, 0, 1, "scala/Option", "scala/Tuple2", "scala/Tuple2", "org/apache/spark/scheduler/SchedulerBackend", "org/apache/spark/scheduler/TaskScheduler", "org/apache/spark/scheduler/SchedulerBackend", "org/apache/spark/scheduler/TaskScheduler", "scala/Option", 0, 1, "org/apache/spark/scheduler/SchedulerBackend" }; Object[] locals = InstrumentingMethodVisitor.computeFrameLocals(2, Arrays.asList(fromFrame), null, mapper); assertArrayEquals(expected, locals); }
public void add(final ProxyBackendHandler handler) { backendHandlers.add(handler); }
@Test void assertAddDatabaseConnector() { ProxyBackendHandler expectedEngine = mock(DatabaseConnector.class); databaseConnectionManager.add(expectedEngine); Collection<ProxyBackendHandler> actual = getBackendHandlers(); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), is(expectedEngine)); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testBatchOnCreateMatcher() throws Exception { Pipeline p = TestPipeline.create(options); PCollection<Integer> pc = p.apply(Create.of(1, 2, 3)); PAssert.that(pc).containsInAnyOrder(1, 2, 3); final DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class); when(mockJob.getState()).thenReturn(State.DONE); when(mockJob.getProjectId()).thenReturn("test-project"); when(mockJob.getJobId()).thenReturn("test-job"); DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class); when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob); TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient); options.as(TestPipelineOptions.class).setOnCreateMatcher(new TestSuccessMatcher(mockJob, 0)); when(mockClient.getJobMetrics(anyString())) .thenReturn(generateMockMetricResponse(true /* success */, true /* tentative */)); runner.run(p, mockRunner); }
public static long parseDuration(final String propertyName, final String propertyValue) { final char lastCharacter = propertyValue.charAt(propertyValue.length() - 1); if (Character.isDigit(lastCharacter)) { return Long.parseLong(propertyValue); } if (lastCharacter != 's' && lastCharacter != 'S') { throw new NumberFormatException( propertyName + ": " + propertyValue + " should end with: s, ms, us, or ns."); } final char secondLastCharacter = propertyValue.charAt(propertyValue.length() - 2); if (Character.isDigit(secondLastCharacter)) { final long value = AsciiEncoding.parseLongAscii(propertyValue, 0, propertyValue.length() - 1); return TimeUnit.SECONDS.toNanos(value); } final long value = AsciiEncoding.parseLongAscii(propertyValue, 0, propertyValue.length() - 2); switch (secondLastCharacter) { case 'n': case 'N': return value; case 'u': case 'U': return TimeUnit.MICROSECONDS.toNanos(value); case 'm': case 'M': return TimeUnit.MILLISECONDS.toNanos(value); default: throw new NumberFormatException( propertyName + ": " + propertyValue + " should end with: s, ms, us, or ns."); } }
@Test void shouldThrowWhenParseTimeHasBadSuffix() { assertThrows(NumberFormatException.class, () -> parseDuration("", "1g")); }
public boolean execute(final File clusterDir) { if (!clusterDir.exists() || !clusterDir.isDirectory()) { throw new IllegalArgumentException("invalid cluster directory: " + clusterDir.getAbsolutePath()); } final RecordingLog.Entry entry = ClusterTool.findLatestValidSnapshot(clusterDir); if (null == entry) { throw new ClusterException("no valid snapshot found"); } final long recordingId = entry.recordingId; final ClusterNodeControlProperties properties = ClusterTool.loadControlProperties(clusterDir); final RecordingSignalCapture recordingSignalCapture = new RecordingSignalCapture(); try (Aeron aeron = Aeron.connect(new Aeron.Context().aeronDirectoryName(properties.aeronDirectoryName)); AeronArchive archive = AeronArchive.connect(new AeronArchive.Context() .controlRequestChannel(archiveLocalRequestChannel) .controlRequestStreamId(archiveLocalRequestStreamId) .controlResponseChannel(IPC_CHANNEL) .recordingSignalConsumer(recordingSignalCapture) .aeron(aeron))) { final SnapshotReader snapshotReader = new SnapshotReader(); replayLocalSnapshotRecording(aeron, archive, recordingId, snapshotReader); final long targetNextServiceSessionId = max( max(snapshotReader.nextServiceSessionId, snapshotReader.maxClusterSessionId + 1), snapshotReader.logServiceSessionId + 1 + snapshotReader.pendingServiceMessageCount); final long targetLogServiceSessionId = targetNextServiceSessionId - 1 - snapshotReader.pendingServiceMessageCount; if (targetNextServiceSessionId != snapshotReader.nextServiceSessionId || targetLogServiceSessionId != snapshotReader.logServiceSessionId || 0 != snapshotReader.pendingServiceMessageCount && (targetLogServiceSessionId + 1 != snapshotReader.minClusterSessionId || targetNextServiceSessionId - 1 != snapshotReader.maxClusterSessionId)) { final long tempRecordingId = createNewSnapshotRecording( aeron, archive, recordingId, targetLogServiceSessionId, targetNextServiceSessionId); final long stopPosition = awaitRecordingStopPosition(archive, recordingId); final long newStopPosition = awaitRecordingStopPosition(archive, tempRecordingId); if (stopPosition != newStopPosition) { throw new ClusterException("new snapshot recording incomplete: expectedStopPosition=" + stopPosition + ", actualStopPosition=" + newStopPosition); } recordingSignalCapture.reset(); archive.truncateRecording(recordingId, 0); recordingSignalCapture.awaitSignalForRecordingId(archive, recordingId, RecordingSignal.DELETE); final long replicationId = archive.replicate( tempRecordingId, recordingId, archive.context().controlRequestStreamId(), IPC_CHANNEL, null); recordingSignalCapture.reset(); recordingSignalCapture.awaitSignalForCorrelationId(archive, replicationId, RecordingSignal.SYNC); final long replicatedStopPosition = recordingSignalCapture.position(); if (stopPosition != replicatedStopPosition) { throw new ClusterException("incomplete replication of the new recording: expectedStopPosition=" + stopPosition + ", replicatedStopPosition=" + replicatedStopPosition); } recordingSignalCapture.reset(); archive.purgeRecording(tempRecordingId); recordingSignalCapture.awaitSignalForRecordingId(archive, tempRecordingId, RecordingSignal.DELETE); return true; } } return false; }
@Test void executeThrowsNullPointerExceptionIfClusterDirIsNull() { //noinspection DataFlowIssue assertThrowsExactly( NullPointerException.class, () -> new ConsensusModuleSnapshotPendingServiceMessagesPatch().execute(null)); }
public static AlluxioSinkConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); return mapper.readValue(new File(yamlFile), AlluxioSinkConfig.class); }
@Test public final void loadFromYamlFileTest() throws IOException { File yamlFile = getFile("sinkConfig.yaml"); String path = yamlFile.getAbsolutePath(); AlluxioSinkConfig config = AlluxioSinkConfig.load(path); assertNotNull(config); assertEquals("localhost", config.getAlluxioMasterHost()); assertEquals(Integer.parseInt("19998"), config.getAlluxioMasterPort()); assertEquals("pulsar", config.getAlluxioDir()); assertEquals("TopicA", config.getFilePrefix()); assertEquals(".txt", config.getFileExtension()); assertEquals("\n".charAt(0), config.getLineSeparator()); assertEquals(Long.parseLong("100"), config.getRotationRecords()); assertEquals(Long.parseLong("-1"), config.getRotationInterval()); }
public String abbreviate(String fqClassName) { if (fqClassName == null) { throw new IllegalArgumentException("Class name may not be null"); } int inLen = fqClassName.length(); if (inLen < targetLength) { return fqClassName; } StringBuilder buf = new StringBuilder(inLen); int rightMostDotIndex = fqClassName.lastIndexOf(DOT); if (rightMostDotIndex == -1) return fqClassName; // length of last segment including the dot int lastSegmentLength = inLen - rightMostDotIndex; int leftSegments_TargetLen = targetLength - lastSegmentLength; if (leftSegments_TargetLen < 0) leftSegments_TargetLen = 0; int leftSegmentsLen = inLen - lastSegmentLength; // maxPossibleTrim denotes the maximum number of characters we aim to trim // the actual number of character trimmed may be higher since segments, when // reduced, are reduced to just one character int maxPossibleTrim = leftSegmentsLen - leftSegments_TargetLen; int trimmed = 0; boolean inDotState = true; int i = 0; for (; i < rightMostDotIndex; i++) { char c = fqClassName.charAt(i); if (c == DOT) { // if trimmed too many characters, let us stop if (trimmed >= maxPossibleTrim) break; buf.append(c); inDotState = true; } else { if (inDotState) { buf.append(c); inDotState = false; } else { trimmed++; } } } // append from the position of i which may include the last seen DOT buf.append(fqClassName.substring(i)); return buf.toString(); }
@Test public void testNoDot() { TargetLengthBasedClassNameAbbreviator abbreviator = new TargetLengthBasedClassNameAbbreviator(1); String name = "hello"; assertEquals(name, abbreviator.abbreviate(name)); }
@Override public void handleStreamRequest(StreamRequest req, Map<String, String> wireAttrs, RequestContext requestContext, TransportCallback<StreamResponse> callback) { _transportDispatcher.handleStreamRequest(req, wireAttrs, requestContext, new RequestFinalizerTransportCallback<>(callback, requestContext, req)); }
@Test(dataProvider = "throwTransportCallbackException") public void testHandleStreamRequestOrdering(boolean throwTransportCallbackException) { when(_streamTransportResponse.getResponse()) .thenReturn(_streamResponse); when(_streamResponse.getEntityStream()) .thenReturn(_entityStream); final TestTransportCallback<StreamResponse> transportCallback = new TestTransportCallback<>(throwTransportCallbackException); _outerDispatcher.handleStreamRequest(null, null, new RequestContext(), transportCallback); Assert.assertEquals(_outerDispatcher._executionOrder, 1); Assert.assertEquals(_innerDispatcher._executionOrder, 2); Assert.assertEquals(_innerDispatcher._transportCallback._executionOrder, 3); Assert.assertEquals(_outerDispatcher._transportCallback._executionOrder, 4); Assert.assertEquals(transportCallback._executionOrder, 5); verify(_entityStream).addObserver(any()); if (throwTransportCallbackException) { Assert.assertEquals(_requestFinalizer._executionOrder, 6, "Expected request to be finalized after the callback threw an exception."); } }
protected Session currentSession() { return sessionFactory.getCurrentSession(); }
@Test void getsASessionFromTheSessionFactory() throws Exception { assertThat(dao.currentSession()) .isSameAs(session); }
private static List<PeerDiscovery> buildDiscoveries(Network network, String[] seeds) { List<PeerDiscovery> discoveries = new ArrayList<>(); if (seeds != null) for (String seed : seeds) discoveries.add(new DnsSeedDiscovery(network, seed)); return discoveries; }
@Test public void testBuildDiscoveries() throws PeerDiscoveryException { String[] seeds = new String[] { "seed.bitcoin.sipa.be", "dnsseed.bluematt.me" }; DnsDiscovery dnsDiscovery = new DnsDiscovery(seeds, BitcoinNetwork.MAINNET); assertTrue(dnsDiscovery.seeds.size() == 2); for (PeerDiscovery peerDiscovery : dnsDiscovery.seeds) { assertTrue(peerDiscovery.getPeers(0, Duration.ofMillis(100)).size() > 0); } }
@SuppressWarnings("FutureReturnValueIgnored") public void start() { running.set(true); configFetcher.start(); memoryMonitor.start(); streamingWorkerHarness.start(); sampler.start(); workerStatusReporter.start(); activeWorkRefresher.start(); }
@Test public void testLimitOnOutputBundleSize() throws Exception { // This verifies that ReadOperation, StreamingModeExecutionContext, and windmill sinks // coordinate to limit size of an output bundle. List<Integer> finalizeTracker = Lists.newArrayList(); TestCountingSource.setFinalizeTracker(finalizeTracker); final int numMessagesInCustomSourceShard = 100000; // 100K input messages. final int inflatedSizePerMessage = 10000; // x10k => 1GB total output size. StreamingDataflowWorker worker = makeWorker( defaultWorkerParams() .setInstructions( makeUnboundedSourcePipeline( numMessagesInCustomSourceShard, new InflateDoFn(inflatedSizePerMessage))) .build()); worker.start(); // Test new key. server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 1" + " cache_token: 1" + " }" + "}", null)); // Matcher to ensure that commit size is within 10% of max bundle size. Matcher<Integer> isWithinBundleSizeLimits = both(greaterThan(StreamingDataflowWorker.MAX_SINK_BYTES * 9 / 10)) .and(lessThan(StreamingDataflowWorker.MAX_SINK_BYTES * 11 / 10)); Map<Long, Windmill.WorkItemCommitRequest> result = server.waitForAndGetCommits(1); Windmill.WorkItemCommitRequest commit = result.get(1L); assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits); // Try another bundle server .whenGetWorkCalled() .thenReturn( buildInput( "work {" + " computation_id: \"computation\"" + " input_data_watermark: 0" + " work {" + " key: \"0000000000000001\"" + " sharding_key: 1" + " work_token: 2" + " cache_token: 1" + " }" + "}", null)); result = server.waitForAndGetCommits(1); commit = result.get(2L); assertThat(commit.getSerializedSize(), isWithinBundleSizeLimits); }
public int getWeight() { ProviderStatus status = getStatus(); if (status == ProviderStatus.WARMING_UP) { try { // 还处于预热时间中 Integer warmUpWeight = (Integer) getDynamicAttr(ProviderInfoAttrs.ATTR_WARMUP_WEIGHT); if (warmUpWeight != null) { return warmUpWeight; } } catch (Exception e) { return weight; } } return weight; }
@Test public void testGetWeight() { //1s ProviderInfo provider = ProviderHelper .toProviderInfo("bolt://10.15.232.229:12222?timeout=3333&serialization=hessian2&connections=1&warmupTime=100&warmupWeight=5&appName=test-server&weight=2000"); long warmupTime = Long.parseLong(provider.getStaticAttr(ProviderInfoAttrs.ATTR_WARMUP_TIME)); provider.setDynamicAttr(ProviderInfoAttrs.ATTR_WARMUP_WEIGHT, Integer.parseInt(provider.getStaticAttr(ProviderInfoAttrs.ATTR_WARMUP_WEIGHT))); provider.setDynamicAttr(ProviderInfoAttrs.ATTR_WARMUP_TIME, warmupTime); provider.setDynamicAttr(ProviderInfoAttrs.ATTR_WARM_UP_END_TIME, System.currentTimeMillis() + warmupTime); provider.setStatus(ProviderStatus.WARMING_UP); Assert.assertTrue(RpcConstants.PROTOCOL_TYPE_BOLT.equals(provider.getProtocolType())); Assert.assertTrue(RpcConstants.SERIALIZE_HESSIAN2.equals(provider.getSerializationType())); Assert.assertTrue("10.15.232.229".equals(provider.getHost())); Assert.assertTrue(provider.getPort() == 12222); Assert.assertTrue("test-server".equals(provider.getAttr(ProviderInfoAttrs.ATTR_APP_NAME))); Assert.assertTrue("1".equals(provider.getAttr(ProviderInfoAttrs.ATTR_CONNECTIONS))); Assert.assertEquals("3333", provider.getStaticAttr(ProviderInfoAttrs.ATTR_TIMEOUT)); Assert.assertEquals(5, provider.getDynamicAttr(ProviderInfoAttrs.ATTR_WARMUP_WEIGHT)); Assert.assertTrue(provider.getDynamicAttr(ProviderInfoAttrs.ATTR_WARM_UP_END_TIME) != null); Assert.assertEquals("5", provider.getStaticAttr(ProviderInfoAttrs.ATTR_WARMUP_WEIGHT)); Assert.assertEquals("100", provider.getStaticAttr(ProviderInfoAttrs.ATTR_WARMUP_TIME)); Assert.assertEquals(ProviderStatus.WARMING_UP, provider.getStatus()); Assert.assertEquals(5, provider.getWeight()); try { TimeUnit.MILLISECONDS.sleep(200); } catch (Exception ignored) { } Assert.assertTrue(provider.getWeight() == 2000); Assert.assertTrue(provider.getStatus() == ProviderStatus.AVAILABLE); Assert.assertTrue(provider.getDynamicAttr(ProviderInfoAttrs.ATTR_WARM_UP_END_TIME) == null); }
static EditLogValidation scanEditLog(EditLogInputStream in, long maxTxIdToScan) { long lastPos; long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; while (true) { long txid; lastPos = in.getPosition(); try { if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) { break; } } catch (Throwable t) { FSImage.LOG.warn("Caught exception after scanning through " + numValid + " ops from " + in + " while determining its valid length. Position was " + lastPos, t); in.resync(); FSImage.LOG.warn("After resync, position is " + in.getPosition()); if (in.getPosition() <= lastPos) { FSImage.LOG.warn("After resync, the position, {} is not greater " + "than the previous position {}. Skipping remainder of this log.", in.getPosition(), lastPos); break; } continue; } if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) { lastTxId = txid; } if (lastTxId >= maxTxIdToScan) { break; } numValid++; } return new EditLogValidation(lastPos, lastTxId, false); }
@Test public void testValidateEditLogWithCorruptBody() throws IOException { File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptBody"); SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap(); final int NUM_TXNS = 20; File logFile = prepareUnfinalizedTestEditLog(testDir, NUM_TXNS, offsetToTxId); // Back up the uncorrupted log File logFileBak = new File(testDir, logFile.getName() + ".bak"); Files.copy(logFile, logFileBak); EditLogValidation validation = EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true); assertTrue(!validation.hasCorruptHeader()); // We expect that there will be an OP_START_LOG_SEGMENT, followed by // NUM_TXNS opcodes, followed by an OP_END_LOG_SEGMENT. assertEquals(NUM_TXNS + 1, validation.getEndTxId()); // Corrupt each edit and verify that validation continues to work for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) { long txOffset = entry.getKey(); long txId = entry.getValue(); // Restore backup, corrupt the txn opcode Files.copy(logFileBak, logFile); corruptByteInFile(logFile, txOffset); validation = EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true); long expectedEndTxId = (txId == (NUM_TXNS + 1)) ? NUM_TXNS : (NUM_TXNS + 1); assertEquals("Failed when corrupting txn opcode at " + txOffset, expectedEndTxId, validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); } // Truncate right before each edit and verify that validation continues // to work for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) { long txOffset = entry.getKey(); long txId = entry.getValue(); // Restore backup, corrupt the txn opcode Files.copy(logFileBak, logFile); truncateFile(logFile, txOffset); validation = EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true); long expectedEndTxId = (txId == 0) ? HdfsServerConstants.INVALID_TXID : (txId - 1); assertEquals("Failed when corrupting txid " + txId + " txn opcode " + "at " + txOffset, expectedEndTxId, validation.getEndTxId()); assertTrue(!validation.hasCorruptHeader()); } }
public static String prepareUrl(@NonNull String url) { url = url.trim(); String lowerCaseUrl = url.toLowerCase(Locale.ROOT); // protocol names are case insensitive if (lowerCaseUrl.startsWith("feed://")) { Log.d(TAG, "Replacing feed:// with http://"); return prepareUrl(url.substring("feed://".length())); } else if (lowerCaseUrl.startsWith("pcast://")) { Log.d(TAG, "Removing pcast://"); return prepareUrl(url.substring("pcast://".length())); } else if (lowerCaseUrl.startsWith("pcast:")) { Log.d(TAG, "Removing pcast:"); return prepareUrl(url.substring("pcast:".length())); } else if (lowerCaseUrl.startsWith("itpc")) { Log.d(TAG, "Replacing itpc:// with http://"); return prepareUrl(url.substring("itpc://".length())); } else if (lowerCaseUrl.startsWith(AP_SUBSCRIBE)) { Log.d(TAG, "Removing antennapod-subscribe://"); return prepareUrl(url.substring(AP_SUBSCRIBE.length())); } else if (lowerCaseUrl.contains(AP_SUBSCRIBE_DEEPLINK)) { Log.d(TAG, "Removing " + AP_SUBSCRIBE_DEEPLINK); String query = Uri.parse(url).getQueryParameter("url"); try { return prepareUrl(URLDecoder.decode(query, "UTF-8")); } catch (UnsupportedEncodingException e) { return prepareUrl(query); } } else if (!(lowerCaseUrl.startsWith("http://") || lowerCaseUrl.startsWith("https://"))) { Log.d(TAG, "Adding http:// at the beginning of the URL"); return "http://" + url; } else { return url; } }
@Test public void testAntennaPodSubscribeProtocolNoScheme() { final String in = "antennapod-subscribe://example.com"; final String out = UrlChecker.prepareUrl(in); assertEquals("http://example.com", out); }
@Override public boolean isBuffer() { return dataType.isBuffer(); }
@Test void testDataBufferIsBuffer() { assertThat(newBuffer(1024, 1024, true).isBuffer()).isTrue(); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); try { final RuleDao ruleDao = ruleService.load(modelId.id()); return Optional.of(exportNativeEntity(ruleDao, entityDescriptorIds)); } catch (NotFoundException e) { LOG.debug("Couldn't find pipeline rule {}", entityDescriptor, e); return Optional.empty(); } }
@Test public void exportEntity() { final RuleDao pipelineRule = RuleDao.builder() .id("id") .title("title") .description("description") .source("rule \"debug\"\nwhen\n true\nthen\n debug($message.message);\nend") .build(); final EntityDescriptor descriptor = EntityDescriptor.create("id", ModelTypes.PIPELINE_RULE_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Entity entity = facade.exportNativeEntity(pipelineRule, entityDescriptorIds); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.PIPELINE_RULE_V1); final EntityV1 entityV1 = (EntityV1) entity; final PipelineRuleEntity ruleEntity = objectMapper.convertValue(entityV1.data(), PipelineRuleEntity.class); assertThat(ruleEntity.title()).isEqualTo(ValueReference.of("title")); assertThat(ruleEntity.description()).isEqualTo(ValueReference.of("description")); assertThat(ruleEntity.source().asString(Collections.emptyMap())).startsWith("rule \"debug\"\n"); }
public Set<Device> getDevicesFromPath(String path) throws IOException { MutableInt counter = new MutableInt(0); try (Stream<Path> stream = Files.walk(Paths.get(path), 1)) { return stream.filter(p -> p.toFile().getName().startsWith("veslot")) .map(p -> toDevice(p, counter)) .collect(Collectors.toSet()); } }
@Test public void testNonVESlotFilesAreSkipped() throws IOException { createVeSlotFile(0); createOsStateFile(0); createFile("abcde"); createFile("vexlot"); createFile("xyzveslot"); when(mockCommandExecutor.getOutput()).thenReturn( "8:1:character special file", "9:1:character special file", "10:1:character special file", "11:1:character special file", "12:1:character special file"); when(udevUtil.getSysPath(anyInt(), anyChar())).thenReturn(testFolder); Set<Device> devices = discoverer.getDevicesFromPath(testFolder); assertEquals("Number of devices", 1, devices.size()); Device device = devices.iterator().next(); assertEquals("Device ID", 0, device.getId()); assertEquals("Major number", 8, device.getMajorNumber()); assertEquals("Minor number", 1, device.getMinorNumber()); assertEquals("Status", "ONLINE", device.getStatus()); assertTrue("Device is not healthy", device.isHealthy()); }
public static Subject.Factory<Re2jStringSubject, String> re2jString() { return Re2jStringSubject.FACTORY; }
@Test public void matches_pattern_succeeds() { assertAbout(re2jString()).that("hello world").matches(PATTERN); }
@Override public NamingResult select(NamingContext context) { List<Instance> instances = doFilter(context.getInstances()); return () -> instances; }
@Test public void testSelect() { DefaultNamingSelector namingSelector = new DefaultNamingSelector(Instance::isHealthy); Random random = new Random(); int total = random.nextInt(32) + 1; int health = random.nextInt(total); NamingContext namingContext = getMockNamingContext(total, health); NamingResult result = namingSelector.select(namingContext); assertEquals(health, result.getResult().size()); result.getResult().forEach(ins -> assertTrue(ins.isHealthy())); }
@Override public RemotingServer bind(URL url, ChannelHandler handler) throws RemotingException { return new NettyServer(url, handler); }
@Test void shouldAbleToBindNetty4() throws Exception { int port = NetUtils.getAvailablePort(); URL url = new ServiceConfigURL( "telnet", "localhost", port, new String[] {Constants.BIND_PORT_KEY, String.valueOf(port)}); ApplicationModel applicationModel = ApplicationModel.defaultModel(); ApplicationConfig applicationConfig = new ApplicationConfig("provider-app"); applicationConfig.setExecutorManagementMode(EXECUTOR_MANAGEMENT_MODE_DEFAULT); applicationModel.getApplicationConfigManager().setApplication(applicationConfig); ConfigManager configManager = new ConfigManager(applicationModel); configManager.setApplication(applicationConfig); configManager.getApplication(); applicationModel.setConfigManager(configManager); url = url.setScopeModel(applicationModel); ModuleModel moduleModel = applicationModel.getDefaultModule(); url = url.putAttribute(CommonConstants.SCOPE_MODEL, moduleModel); RemotingServer server = new NettyTransporter().bind(url, new ChannelHandlerAdapter()); assertThat(server.isBound(), is(true)); }
public static Binary fromConstantByteBuffer(final ByteBuffer value, int offset, int length) { return new ByteBufferBackedBinary(value, offset, length, false); }
@Test public void testWriteAllTo() throws Exception { byte[] orig = {10, 9, 8, 7, 6, 5, 4, 3, 2, 1}; testWriteAllToHelper(Binary.fromConstantByteBuffer(ByteBuffer.wrap(orig)), orig); ByteBuffer buf = ByteBuffer.allocateDirect(orig.length); buf.put(orig); buf.flip(); testWriteAllToHelper(Binary.fromConstantByteBuffer(buf), orig); }
public static void disableConsumption(KafkaConsumerWrapper kafkaConsumerWrapper, Set<String> prohibitionTopics) { Set<String> originalTopics = kafkaConsumerWrapper.getOriginalTopics(); // Not subscribed to any Topic, so no action is required if (originalTopics.size() == 0) { return; } Collection<TopicPartition> originalPartitions = kafkaConsumerWrapper.getOriginalPartitions(); KafkaConsumer<?, ?> kafkaConsumer = kafkaConsumerWrapper.getKafkaConsumer(); Collection<String> subtractTopics = CollectionUtils.subtract(originalTopics, prohibitionTopics); if (kafkaConsumerWrapper.isAssign()) { kafkaConsumer.assign(originalPartitions.stream().filter(obj -> subtractTopics.contains(obj.topic())) .collect(Collectors.toSet())); return; } kafkaConsumer.subscribe(subtractTopics); }
@Test public void testDisableConsumptionWithSubtractTopics() { KafkaConsumer<?, ?> mockConsumer = Mockito.mock(KafkaConsumer.class); KafkaConsumerWrapper kafkaConsumerWrapper = new KafkaConsumerWrapper(mockConsumer); HashSet<String> originalTopics = new HashSet<>(); originalTopics.add("testTopic-1"); originalTopics.add("testTopic-2"); kafkaConsumerWrapper.setOriginalTopics(originalTopics); kafkaConsumerWrapper.setAssign(false); Set<String> prohibitionTopics = new HashSet<>(); prohibitionTopics.add("testTopic-2"); prohibitionTopics.add("testTopic-3"); KafkaConsumerController.disableConsumption(kafkaConsumerWrapper, prohibitionTopics); Mockito.verify(mockConsumer, Mockito.times(1)).subscribe(Collections.singletonList("testTopic-1")); }
public static PredicateTreeAnalyzerResult analyzePredicateTree(Predicate predicate) { AnalyzerContext context = new AnalyzerContext(); int treeSize = aggregatePredicateStatistics(predicate, false, context); int minFeature = ((int)Math.ceil(findMinFeature(predicate, false, context))) + (context.hasNegationPredicate ? 1 : 0); return new PredicateTreeAnalyzerResult(minFeature, treeSize, context.subTreeSizes); }
@Test void require_that_featureconjunctions_contribute_as_one_feature() { Predicate p = conj( feature("foo").inSet("bar"), feature("baz").inSet("qux")); PredicateTreeAnalyzerResult r = PredicateTreeAnalyzer.analyzePredicateTree(p); assertEquals(1, r.minFeature); assertEquals(1, r.treeSize); }
int getDictionaryId(String word) throws LongWordException { requireNonNull(word); if (word.length() > MAX_WORD_LENGTH) { throw new LongWordException("Too long value in the metric descriptor found, maximum is " + MAX_WORD_LENGTH + ": " + word); } int nextWordId = orderedDictionary.size(); return orderedDictionary .computeIfAbsent(word, key -> new Word(word, nextWordId)) .id; }
@Test public void testGrowing() { for (int i = 0; i < 256; i++) { String word = Integer.toString(i); int dictionaryId = dictionary.getDictionaryId(word); assertEquals(i, dictionaryId); } }
@Override public SlotAssignmentResult ensure(long key1, long key2) { assert key1 != unassignedSentinel : "ensure() called with key1 == nullKey1 (" + unassignedSentinel + ')'; return super.ensure0(key1, key2); }
@Test public void testCursor_advance_afterAdvanceReturnsFalse() { hsa.ensure(randomKey(), randomKey()); HashSlotCursor16byteKey cursor = hsa.cursor(); cursor.advance(); cursor.advance(); assertThrows(AssertionError.class, cursor::advance); }
@Override public <KR> KStream<KR, V> selectKey(final KeyValueMapper<? super K, ? super V, ? extends KR> mapper) { return selectKey(mapper, NamedInternal.empty()); }
@Test public void shouldNotAllowNullNamedOnSelectKey() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.selectKey((k, v) -> k, null)); assertThat(exception.getMessage(), equalTo("named can't be null")); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config) ); }
@Test public void shouldValidateMultipleMigrations() throws Exception { // Given: final List<String> versions = ImmutableList.of("1", "2", "3"); final List<String> checksums = givenExistingMigrationFiles(versions); givenAppliedMigrations(versions, checksums); // When: final int result = command.command(config, cfg -> ksqlClient, migrationsDir); // Then: assertThat(result, is(0)); verifyClientCallsForVersions(versions); }
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "requiredColumns is ImmutableSet") public Collection<? extends ColumnReferenceExp> get() { return requiredColumns; }
@Test public void shouldAddAll() { // When: builder.addAll(ImmutableSet.of(COL0_REF, COL1_REF, EXP0)); // Then: assertThat(builder.build().get(), is(ImmutableSet.of(COL0_REF, COL1_REF, COL2_REF, COL3_REF))); }
public int tryClaim(final int msgTypeId, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return recordIndex; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putInt(typeOffset(recordIndex), msgTypeId); return encodedMsgOffset(recordIndex); }
@Test void tryClaimThrowsIllegalArgumentExceptionIfLengthIsBiggerThanMaxMessageLength() { assertThrows(IllegalArgumentException.class, () -> ringBuffer.tryClaim(MSG_TYPE_ID, 2000)); }
@Override public void check(Model model) { if (model == null) return; List<Model> appenderModels = new ArrayList<>(); deepFindAllModelsOfType(AppenderModel.class, appenderModels, model); List<Pair<Model, Model>> nestedPairs = deepFindNestedSubModelsOfType(AppenderModel.class, appenderModels); List<Pair<Model, Model>> filteredNestedPairs = nestedPairs.stream().filter(pair -> !isSiftingAppender(pair.first)).collect(Collectors.toList()); if(filteredNestedPairs.isEmpty()) { return; } addWarn(NESTED_APPENDERS_WARNING); for(Pair<Model, Model> pair: filteredNestedPairs) { addWarn("Appender at line "+pair.first.getLineNumber() + " contains a nested appender at line "+pair.second.getLineNumber()); } }
@Test public void singleAppender() { TopModel topModel = new TopModel(); AppenderModel appenderModel0 = new AppenderModel(); appenderModel0.setLineNumber(1); topModel.addSubModel(appenderModel0); awasc.check(topModel); statusChecker.assertIsWarningOrErrorFree(); }
public static Date trimDate( Date date ) { Calendar calendar = Calendar.getInstance(); calendar.setTime( date ); calendar.set( Calendar.MILLISECOND, 0 ); calendar.set( Calendar.SECOND, 0 ); calendar.set( Calendar.MINUTE, 0 ); calendar.set( Calendar.HOUR_OF_DAY, 0 ); return calendar.getTime(); }
@Test public void testTrimDate() { Date now = new Date(); Date nowTrimed = Const.trimDate( now ); Calendar calendar = GregorianCalendar.getInstance(); calendar.setTime( nowTrimed ); assertEquals( 0, calendar.get( Calendar.HOUR_OF_DAY ) ); assertEquals( 0, calendar.get( Calendar.MINUTE ) ); assertEquals( 0, calendar.get( Calendar.SECOND ) ); assertEquals( 0, calendar.get( Calendar.MILLISECOND ) ); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); final EndTransactionRequestHeader requestHeader = (EndTransactionRequestHeader) request.decodeCommandCustomHeader(EndTransactionRequestHeader.class); LOGGER.debug("Transaction request:{}", requestHeader); if (BrokerRole.SLAVE == brokerController.getMessageStoreConfig().getBrokerRole()) { response.setCode(ResponseCode.SLAVE_NOT_AVAILABLE); LOGGER.warn("Message store is slave mode, so end transaction is forbidden. "); return response; } if (requestHeader.getFromTransactionCheck()) { switch (requestHeader.getCommitOrRollback()) { case MessageSysFlag.TRANSACTION_NOT_TYPE: { LOGGER.warn("Check producer[{}] transaction state, but it's pending status." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); return null; } case MessageSysFlag.TRANSACTION_COMMIT_TYPE: { LOGGER.warn("Check producer[{}] transaction state, the producer commit the message." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); break; } case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: { LOGGER.warn("Check producer[{}] transaction state, the producer rollback the message." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); break; } default: return null; } } else { switch (requestHeader.getCommitOrRollback()) { case MessageSysFlag.TRANSACTION_NOT_TYPE: { LOGGER.warn("The producer[{}] end transaction in sending message, and it's pending status." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); return null; } case MessageSysFlag.TRANSACTION_COMMIT_TYPE: { break; } case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: { LOGGER.warn("The producer[{}] end transaction in sending message, rollback the message." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); break; } default: return null; } } OperationResult result = new OperationResult(); if (MessageSysFlag.TRANSACTION_COMMIT_TYPE == requestHeader.getCommitOrRollback()) { result = this.brokerController.getTransactionalMessageService().commitMessage(requestHeader); if (result.getResponseCode() == ResponseCode.SUCCESS) { if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) { response.setCode(ResponseCode.ILLEGAL_OPERATION); LOGGER.warn("Message commit fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check", requestHeader.getMsgId(), requestHeader.getCommitLogOffset()); return response; } RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader); if (res.getCode() == ResponseCode.SUCCESS) { MessageExtBrokerInner msgInner = endMessageTransaction(result.getPrepareMessage()); msgInner.setSysFlag(MessageSysFlag.resetTransactionValue(msgInner.getSysFlag(), requestHeader.getCommitOrRollback())); msgInner.setQueueOffset(requestHeader.getTranStateTableOffset()); msgInner.setPreparedTransactionOffset(requestHeader.getCommitLogOffset()); msgInner.setStoreTimestamp(result.getPrepareMessage().getStoreTimestamp()); MessageAccessor.clearProperty(msgInner, MessageConst.PROPERTY_TRANSACTION_PREPARED); RemotingCommand sendResult = sendFinalMessage(msgInner); if (sendResult.getCode() == ResponseCode.SUCCESS) { this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage()); // successful committed, then total num of half-messages minus 1 this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(msgInner.getTopic(), -1); BrokerMetricsManager.commitMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder() .put(LABEL_TOPIC, msgInner.getTopic()) .build()); // record the commit latency. Long commitLatency = (System.currentTimeMillis() - result.getPrepareMessage().getBornTimestamp()) / 1000; BrokerMetricsManager.transactionFinishLatency.record(commitLatency, BrokerMetricsManager.newAttributesBuilder() .put(LABEL_TOPIC, msgInner.getTopic()) .build()); } return sendResult; } return res; } } else if (MessageSysFlag.TRANSACTION_ROLLBACK_TYPE == requestHeader.getCommitOrRollback()) { result = this.brokerController.getTransactionalMessageService().rollbackMessage(requestHeader); if (result.getResponseCode() == ResponseCode.SUCCESS) { if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) { response.setCode(ResponseCode.ILLEGAL_OPERATION); LOGGER.warn("Message rollback fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check", requestHeader.getMsgId(), requestHeader.getCommitLogOffset()); return response; } RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader); if (res.getCode() == ResponseCode.SUCCESS) { this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage()); // roll back, then total num of half-messages minus 1 this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC), -1); BrokerMetricsManager.rollBackMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder() .put(LABEL_TOPIC, result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC)) .build()); } return res; } } response.setCode(result.getResponseCode()); response.setRemark(result.getResponseRemark()); return response; }
@Test public void testProcessRequest_CheckMessage() throws RemotingCommandException { when(transactionMsgService.commitMessage(any(EndTransactionRequestHeader.class))).thenReturn(createResponse(ResponseCode.SUCCESS)); when(messageStore.putMessage(any(MessageExtBrokerInner.class))) .thenReturn(new PutMessageResult(PutMessageStatus.PUT_OK, createAppendMessageResult(AppendMessageStatus.PUT_OK))); RemotingCommand request = createEndTransactionMsgCommand(MessageSysFlag.TRANSACTION_COMMIT_TYPE, true); RemotingCommand response = endTransactionProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); assertThat(brokerController.getBrokerStatsManager().getStatsItem(Stats.BROKER_PUT_NUMS, brokerController.getBrokerConfig().getBrokerClusterName()).getValue().sum()).isEqualTo(1); assertThat(brokerController.getBrokerStatsManager().getStatsItem(Stats.TOPIC_PUT_NUMS, TOPIC).getValue().sum()).isEqualTo(1L); assertThat(brokerController.getBrokerStatsManager().getStatsItem(Stats.TOPIC_PUT_SIZE, TOPIC).getValue().sum()).isEqualTo(1L); }
@Override public void persistAll(Set<MessageQueue> mqs) { if (null == mqs || mqs.isEmpty()) { return; } OffsetSerializeWrapper offsetSerializeWrapper = null; try { offsetSerializeWrapper = readLocalOffset(); } catch (MQClientException e) { log.error("readLocalOffset exception", e); return; } if (offsetSerializeWrapper == null) { offsetSerializeWrapper = new OffsetSerializeWrapper(); } for (Map.Entry<MessageQueue, ControllableOffset> entry : this.offsetTable.entrySet()) { if (mqs.contains(entry.getKey())) { AtomicLong offset = new AtomicLong(entry.getValue().getOffset()); offsetSerializeWrapper.getOffsetTable().put(entry.getKey(), offset); } } String jsonString = offsetSerializeWrapper.toJson(true); if (jsonString != null) { try { MixAll.string2File(jsonString, this.storePath); } catch (IOException e) { log.error("persistAll consumer offset Exception, " + this.storePath, e); } } }
@Test public void testPersistAll() throws Exception { OffsetStore offsetStore = new LocalFileOffsetStore(mQClientFactory, group); MessageQueue messageQueue0 = new MessageQueue(topic, brokerName, 0); offsetStore.updateOffset(messageQueue0, 1024, false); offsetStore.persistAll(new HashSet<MessageQueue>(Collections.singletonList(messageQueue0))); assertThat(offsetStore.readOffset(messageQueue0, ReadOffsetType.READ_FROM_STORE)).isEqualTo(1024); MessageQueue messageQueue1 = new MessageQueue(topic, brokerName, 1); MessageQueue messageQueue2 = new MessageQueue(topic, brokerName, 2); offsetStore.updateOffset(messageQueue1, 1025, false); offsetStore.updateOffset(messageQueue2, 1026, false); offsetStore.persistAll(new HashSet<MessageQueue>(Arrays.asList(messageQueue1, messageQueue2))); assertThat(offsetStore.readOffset(messageQueue0, ReadOffsetType.READ_FROM_STORE)).isEqualTo(1024); assertThat(offsetStore.readOffset(messageQueue1, ReadOffsetType.READ_FROM_STORE)).isEqualTo(1025); assertThat(offsetStore.readOffset(messageQueue2, ReadOffsetType.READ_FROM_STORE)).isEqualTo(1026); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { long datetime = readDatetimeV2FromPayload(payload); return 0L == datetime ? MySQLTimeValueUtils.DATETIME_OF_ZERO : readDatetime(columnDef, datetime, payload); }
@Test void assertReadWithoutFraction3() { columnDef.setColumnMeta(3); when(payload.readInt1()).thenReturn(0xfe, 0xf3, 0xff, 0x7e, 0xfb); when(payload.getByteBuf()).thenReturn(byteBuf); when(byteBuf.readUnsignedShort()).thenReturn(9990); LocalDateTime expected = LocalDateTime.of(9999, 12, 31, 23, 59, 59, 999 * 1000 * 1000); assertThat(new MySQLDatetime2BinlogProtocolValue().read(columnDef, payload), is(Timestamp.valueOf(expected))); }
public static <T> Response call(RestUtils.RestCallable<T> callable, AlluxioConfiguration alluxioConf, @Nullable Map<String, Object> headers) { try { // TODO(cc): reconsider how to enable authentication if (SecurityUtils.isSecurityEnabled(alluxioConf) && AuthenticatedClientUser.get(alluxioConf) == null) { AuthenticatedClientUser.set(ServerUserState.global().getUser().getName()); } } catch (IOException e) { LOG.warn("Failed to set AuthenticatedClientUser in REST service handler: {}", e.toString()); return createErrorResponse(e, alluxioConf); } try { return createResponse(callable.call(), alluxioConf, headers); } catch (Exception e) { LOG.warn("Unexpected error invoking rest endpoint: {}", e.toString()); return createErrorResponse(e, alluxioConf); } }
@Test public void errorResponse() throws Exception { final Status status = Status.ALREADY_EXISTS; final String message = "error message"; Response response = RestUtils.call(new RestUtils.RestCallable<Void>() { @Override public Void call() throws Exception { throw new AlluxioStatusException(status.withDescription(message)); } }, Configuration.global()); Assert.assertEquals(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), response.getStatus()); RestUtils.ErrorResponse errorResponse = (RestUtils.ErrorResponse) response.getEntity(); Assert.assertEquals(status.getCode(), errorResponse.getStatusCode()); Assert.assertEquals(message, errorResponse.getMessage()); }
@SuppressWarnings("unchecked") protected E get(Object id) { return (E) currentSession().get(entityClass, requireNonNull(id)); }
@Test void getsEntitiesById() throws Exception { when(session.get(String.class, 200)).thenReturn("woo!"); assertThat(dao.get(200)) .isEqualTo("woo!"); verify(session).get(String.class, 200); }
@Override public void handlerScanUri(Activity activity, Uri uri) { if (ChannelUtils.hasUtmByMetaData(activity)) { showDialog(activity, SADisplayUtil.getStringResource(activity, R.string.sensors_analytics_ad_listener)); return; } String monitorId = uri.getQueryParameter("monitor_id"); if (TextUtils.isEmpty(monitorId)) { SensorsDataDialogUtils.startLaunchActivity(activity); return; } String url = SensorsDataAPI.sharedInstance().getServerUrl(); if (TextUtils.isEmpty(url)) { showDialog(activity, SADisplayUtil.getStringResource(activity, R.string.sensors_analytics_ad_error_url)); return; } ServerUrl serverUrl = new ServerUrl(url); String projectName = uri.getQueryParameter("project_name"); if (serverUrl.getProject().equals(projectName)) { String projectId = uri.getQueryParameter("project_id"); String accountId = uri.getQueryParameter("account_id"); String isReLink = uri.getQueryParameter("is_relink"); if ("1".equals(isReLink)) {//续连标识 1 :续连 String deviceCode = uri.getQueryParameter("device_code"); if (ChannelUtils.checkDeviceInfo(activity, deviceCode)) {//比较设备信息是否匹配 showChannelDebugActiveDialog(activity); } else { showDialog(activity, SADisplayUtil.getStringResource(activity, R.string.sensors_analytics_ad_error_retry)); } } else { showChannelDebugDialog(activity, serverUrl.getBaseUrl(), monitorId, projectId, accountId); } } else { showDialog(activity, SADisplayUtil.getStringResource(activity, R.string.sensors_analytics_ad_error_project)); } }
@Test public void handlerScanUri() { Activity activity = new Activity(); Uri uri = Uri.parse("567483abc://channeldebug/abc/bcd"); Assert.assertTrue(SAAdvertScanHelper.scanHandler(activity, uri)); }
public static int compose(final int major, final int minor, final int patch) { if (major < 0 || major > 255) { throw new IllegalArgumentException("major must be 0-255: " + major); } if (minor < 0 || minor > 255) { throw new IllegalArgumentException("minor must be 0-255: " + minor); } if (patch < 0 || patch > 255) { throw new IllegalArgumentException("patch must be 0-255: " + patch); } if (major + minor + patch == 0) { throw new IllegalArgumentException("all parts cannot be zero"); } return (major << 16) | (minor << 8) | patch; }
@Test void shouldDetectNegativeMajor() { assertThrows(IllegalArgumentException.class, () -> SemanticVersion.compose(-1, 1, 1)); }
public static FileRewriteCoordinator get() { return INSTANCE; }
@TestTemplate public void testSortRewrite() throws NoSuchTableException, IOException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); Dataset<Row> df = newDF(1000); df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); Table table = validationCatalog.loadTable(tableIdent); assertThat(table.snapshots()).as("Should produce 4 snapshots").hasSize(4); try (CloseableIterable<FileScanTask> fileScanTasks = table.newScan().planFiles()) { String fileSetID = UUID.randomUUID().toString(); ScanTaskSetManager taskSetManager = ScanTaskSetManager.get(); taskSetManager.stageTasks(table, fileSetID, Lists.newArrayList(fileScanTasks)); // read original 4 files as 4 splits Dataset<Row> scanDF = spark .read() .format("iceberg") .option(SparkReadOptions.SCAN_TASK_SET_ID, fileSetID) .option(SparkReadOptions.SPLIT_SIZE, "134217728") .option(SparkReadOptions.FILE_OPEN_COST, "134217728") .load(tableName); // make sure we disable AQE and set the number of shuffle partitions as the target num files ImmutableMap<String, String> sqlConf = ImmutableMap.of( "spark.sql.shuffle.partitions", "2", "spark.sql.adaptive.enabled", "false"); withSQLConf( sqlConf, () -> { try { // write new files with sorted records scanDF .sort("id") .writeTo(tableName) .option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, fileSetID) .append(); } catch (NoSuchTableException e) { throw new RuntimeException("Could not replace files", e); } }); // commit the rewrite FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get(); Set<DataFile> rewrittenFiles = taskSetManager.fetchTasks(table, fileSetID).stream() .map(t -> t.asFileScanTask().file()) .collect(Collectors.toSet()); Set<DataFile> addedFiles = rewriteCoordinator.fetchNewFiles(table, fileSetID); table.newRewrite().rewriteFiles(rewrittenFiles, addedFiles).commit(); } table.refresh(); Map<String, String> summary = table.currentSnapshot().summary(); assertThat(summary.get("deleted-data-files")) .as("Deleted files count must match") .isEqualTo("4"); assertThat(summary.get("added-data-files")).as("Added files count must match").isEqualTo("2"); Object rowCount = scalarSql("SELECT count(*) FROM %s", tableName); assertThat(rowCount).as("Row count must match").isEqualTo(4000L); }
@Override public OpenstackVtap removeVtap(OpenstackVtapId vtapId) { return store.removeVtap(vtapId); }
@Test(expected = NullPointerException.class) public void testRemoveNullVtap() { target.removeVtap(null); }
public JsonNode toJson(Object object) { // NOTE: jayway json path 2.4.0 seems to have issues with '@.name' so we'll do this manually // as determined by a cursory and purely subjective investigation by alex // "$..[?(@.name =~ /password$/i || @.name =~ /secret$/i || @.name =~ /secret[\\s_-]*key$/i || @.name =~ /keytab$/i // || @.name =~ /token$/i)]" try { JsonNode node; if (object instanceof JsonNode) { node = (JsonNode) object; } else if (object instanceof String) { node = JsonUtils.stringToJsonNode(String.valueOf(object)); } else { node = JsonUtils.objectToJsonNode(object); } return toJsonRecursive(node); } catch (IOException e) { throw new RuntimeException(e); } }
@Test public void testNoop() { Object output = new Obfuscator("nope", Collections.emptyList()).toJson(_map); Assert.assertEquals(output, JsonUtils.objectToJsonNode(_map)); }
protected void setAlarmConditionMetadata(AlarmRuleState ruleState, TbMsgMetaData metaData) { if (ruleState.getSpec().getType() == AlarmConditionSpecType.REPEATING) { metaData.putValue(DataConstants.ALARM_CONDITION_REPEATS, String.valueOf(ruleState.getState().getEventCount())); } if (ruleState.getSpec().getType() == AlarmConditionSpecType.DURATION) { metaData.putValue(DataConstants.ALARM_CONDITION_DURATION, String.valueOf(ruleState.getState().getDuration())); } }
@Test public void testSetAlarmConditionMetadata_repeatingCondition() { AlarmRuleState ruleState = createMockAlarmRuleState(new RepeatingAlarmConditionSpec()); int eventCount = 3; ruleState.getState().setEventCount(eventCount); AlarmState alarmState = createMockAlarmState(); TbMsgMetaData metaData = new TbMsgMetaData(); alarmState.setAlarmConditionMetadata(ruleState, metaData); assertEquals(AlarmConditionSpecType.REPEATING, ruleState.getSpec().getType()); assertNotNull(metaData.getValue(DataConstants.ALARM_CONDITION_REPEATS)); assertNull(metaData.getValue(DataConstants.ALARM_CONDITION_DURATION)); assertEquals(String.valueOf(eventCount), metaData.getValue(DataConstants.ALARM_CONDITION_REPEATS)); }
@Override public void deactivate(String id, Boolean anonymize) { userSession.checkLoggedIn().checkIsSystemAdministrator(); checkRequest(!id.equals(userSession.getUuid()), "Self-deactivation is not possible"); userService.deactivate(id, anonymize); }
@Test public void deactivate_whenUserServiceThrowsNotFoundException_shouldReturnNotFound() throws Exception { userSession.logIn().setSystemAdministrator(); doThrow(new NotFoundException("User not found.")).when(userService).deactivate("userToDelete", false); mockMvc.perform(delete(USER_ENDPOINT + "/userToDelete")) .andExpectAll( status().isNotFound(), content().json("{\"message\":\"User not found.\"}")); }
@PUT @Path("{id}/default") @Timed @ApiOperation(value = "Set default index set") @AuditEvent(type = AuditEventTypes.INDEX_SET_UPDATE) @ApiResponses(value = { @ApiResponse(code = 403, message = "Unauthorized"), }) public IndexSetSummary setDefault(@ApiParam(name = "id", required = true) @PathParam("id") String id) { checkPermission(RestPermissions.INDEXSETS_EDIT, id); final IndexSetConfig indexSet = indexSetService.get(id) .orElseThrow(() -> new NotFoundException("Index set <" + id + "> does not exist")); if (!indexSet.isRegularIndex()) { throw new ClientErrorException("Index set not eligible as default", Response.Status.CONFLICT); } clusterConfigService.write(DefaultIndexSetConfig.create(indexSet.id())); final IndexSetConfig defaultIndexSet = indexSetService.getDefault(); return IndexSetSummary.fromIndexSetConfig(indexSet, indexSet.equals(defaultIndexSet)); }
@Test public void setDefaultDoesNotDoAnyThingIfNotPermitted() { notPermitted(); expectedException.expect(ForbiddenException.class); expectedException.expectMessage("Not authorized to access resource id <someIndexSetId>"); try { indexSetsResource.setDefault("someIndexSetId"); } finally { verifyNoMoreInteractions(indexSetService); verifyNoMoreInteractions(clusterConfigService); } }
@PostMapping( path = "/api/{namespace}/{extension}/review/delete", produces = MediaType.APPLICATION_JSON_VALUE ) @Operation(hidden = true) public ResponseEntity<ResultJson> deleteReview(@PathVariable String namespace, @PathVariable String extension) { var json = local.deleteReview(namespace, extension); if (json.error == null) { return ResponseEntity.ok(json); } else { return new ResponseEntity<>(json, HttpStatus.BAD_REQUEST); } }
@Test public void testDeleteReview() throws Exception { var user = mockUserData(); var extVersion = mockExtension(); var extension = extVersion.getExtension(); Mockito.when(repositories.findExtension("bar", "foo")) .thenReturn(extension); var review = new ExtensionReview(); review.setExtension(extension); review.setUser(user); review.setActive(true); Mockito.when(repositories.findActiveReviews(extension, user)) .thenReturn(Streamable.of(review)); Mockito.when(repositories.findActiveReviews(extension)) .thenReturn(Streamable.empty()); mockMvc.perform(post("/api/{namespace}/{extension}/review/delete", "foo", "bar") .with(user("test_user")) .with(csrf().asHeader())) .andExpect(status().isOk()) .andExpect(content().json(successJson("Deleted review for foo.bar"))); }
@Override public FilterRegistration.Dynamic addFilter(String name, String filterClass) { try { Class<?> newFilterClass = getClassLoader().loadClass(filterClass); if (!Filter.class.isAssignableFrom(newFilterClass)) { throw new IllegalArgumentException(filterClass + " does not implement Filter"); } @SuppressWarnings("unchecked") Class<? extends Filter> filterCastClass = (Class<? extends Filter>)newFilterClass; return addFilter(name, filterCastClass); } catch (ClassNotFoundException e) { log.error("Could not find filter class", e); throw new IllegalStateException("Filter class " + filterClass + " not found"); } }
@Test void addFilter_nonExistentFilterClass_expectException() { AwsServletContext ctx = new AwsServletContext(null); String filterClass = "com.amazonaws.serverless.TestingFilterClassNonExistent"; try { ctx.addFilter("filter", filterClass); } catch (IllegalStateException e) { assertTrue(e.getMessage().startsWith("Filter class " + filterClass)); return; } fail("Expected IllegalStateException"); }
@Udf public String concatWS( @UdfParameter(description = "Separator string and values to join") final String... inputs) { if (inputs == null || inputs.length < 2) { throw new KsqlFunctionException("Function Concat_WS expects at least two input arguments."); } final String separator = inputs[0]; if (separator == null) { return null; } return Arrays.stream(inputs, 1, inputs.length) .filter(Objects::nonNull) .collect(Collectors.joining(separator)); }
@Test public void shouldHandleEmptyInputs() { assertThat(udf.concatWS("SEP", "foo", "", "bar"), is("fooSEPSEPbar")); assertThat(udf.concatWS(ByteBuffer.wrap(new byte[] {1}), ByteBuffer.wrap(new byte[] {2}), EMPTY_BYTES, ByteBuffer.wrap(new byte[] {3})), is(ByteBuffer.wrap(new byte[] {2, 1, 1, 3}))); }
public Optional<Execution> evaluate(RunContext runContext, io.kestra.core.models.flows.Flow flow, Execution current) { Logger logger = runContext.logger(); Execution.ExecutionBuilder builder = Execution.builder() .id(IdUtils.create()) .tenantId(flow.getTenantId()) .namespace(flow.getNamespace()) .flowId(flow.getId()) .flowRevision(flow.getRevision()) .labels(flow.getLabels()) .state(new State()) .trigger(ExecutionTrigger.of( this, Output.builder() .executionId(current.getId()) .namespace(current.getNamespace()) .flowId(current.getFlowId()) .flowRevision(current.getFlowRevision()) .state(current.getState().getCurrent()) .build() )); try { if (this.inputs != null) { Map<String, Object> outputs = current.getOutputs(); if (outputs != null && !outputs.isEmpty()) { builder.inputs(runContext.render(this.inputs, Map.of(TRIGGER_VAR, Map.of(OUTPUTS_VAR, outputs)))); } else { builder.inputs(runContext.render(this.inputs)); } } else { builder.inputs(new HashMap<>()); } return Optional.of(builder.build()); } catch (Exception e) { logger.warn( "Failed to trigger flow {}.{} for trigger {}, invalid inputs", flow.getNamespace(), flow.getId(), this.getId(), e ); return Optional.empty(); } }
@Test void success() { var flow = io.kestra.core.models.flows.Flow.builder() .id("flow-with-flow-trigger") .namespace("io.kestra.unittest") .revision(1) .labels( List.of( new Label("flow-label-1", "flow-label-1"), new Label("flow-label-2", "flow-label-2") ) ) .tasks(Collections.singletonList(Return.builder() .id("test") .type(Return.class.getName()) .format("test") .build())) .build(); var execution = Execution.builder() .id(IdUtils.create()) .namespace("io.kestra.unittest") .flowId("flow-with-flow-trigger") .flowRevision(1) .state(State.of(State.Type.RUNNING, Collections.emptyList())) .build(); var flowTrigger = Flow.builder() .id("flow") .type(Flow.class.getName()) .build(); Optional<Execution> evaluate = flowTrigger.evaluate( runContextFactory.of(), flow, execution ); assertThat(evaluate.isPresent(), is(true)); assertThat(evaluate.get().getFlowId(), is("flow-with-flow-trigger")); assertThat(evaluate.get().getLabels(), hasItem(new Label("flow-label-1", "flow-label-1"))); assertThat(evaluate.get().getLabels(), hasItem(new Label("flow-label-2", "flow-label-2"))); }
@Override public KTable<K, V> reduce(final Reducer<V> adder, final Reducer<V> subtractor, final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) { return reduce(adder, subtractor, NamedInternal.empty(), materialized); }
@Test public void shouldReduce() { final KeyValueMapper<String, Number, KeyValue<String, Integer>> intProjection = (key, value) -> KeyValue.pair(key, value.intValue()); final KTable<String, Integer> reduced = builder .table( topic, Consumed.with(Serdes.String(), Serdes.Double()), Materialized.<String, Double, KeyValueStore<Bytes, byte[]>>as("store") .withKeySerde(Serdes.String()) .withValueSerde(Serdes.Double())) .groupBy(intProjection) .reduce( MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR, Materialized.as("reduced")); final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = getReducedResults(reduced); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { assertReduced(supplier.theCapturedProcessor().lastValueAndTimestampPerKey(), topic, driver); assertEquals(reduced.queryableStoreName(), "reduced"); } }
public <T> Flux<T> queryItems( final String query, final CosmosQueryRequestOptions queryRequestOptions, final Class<T> itemType) { CosmosDbUtils.validateIfParameterIsNotEmpty(query, PARAM_QUERY); CosmosDbUtils.validateIfParameterIsNotEmpty(itemType, PARAM_ITEM_TYPE); return container .flatMapMany(container -> CosmosDbUtils.convertCosmosPagedFluxToFluxResults( container.queryItems(query, queryRequestOptions, itemType))); }
@Test void queryItems() { final CosmosDbContainerOperations operations = new CosmosDbContainerOperations(Mono.just(mock(CosmosAsyncContainer.class))); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.queryItems(null, null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.queryItems(null, null, Object.class)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.queryItems("", null, Object.class)); }
public static void pressText(File imageFile, File destFile, String pressText, Color color, Font font, int x, int y, float alpha) { BufferedImage image = null; try { image = read(imageFile); pressText(image, destFile, pressText, color, font, x, y, alpha); } finally { flush(image); } }
@Test @Disabled public void pressTextTest() { ImgUtil.pressText(// FileUtil.file("d:/test/2.jpg"), // FileUtil.file("d:/test/2_result.png"), // "版权所有", Color.RED, // new Font("黑体", Font.BOLD, 100), // 0, // 0, // 1f); }
public static UFreeIdent create(CharSequence identifier) { return new AutoValue_UFreeIdent(StringName.of(identifier)); }
@Test public void binds() { JCExpression expr = parseExpression("\"abcdefg\".charAt(x + 1)"); UFreeIdent ident = UFreeIdent.create("foo"); assertThat(ident.unify(expr, unifier)).isNotNull(); assertThat(unifier.getBindings()).containsExactly(new UFreeIdent.Key("foo"), expr); }
@Override public WxMaPhoneNumberInfo getWxMaPhoneNumberInfo(Integer userType, String phoneCode) { WxMaService service = getWxMaService(userType); try { return service.getUserService().getPhoneNoInfo(phoneCode); } catch (WxErrorException e) { log.error("[getPhoneNoInfo][userType({}) phoneCode({}) 获得手机号失败]", userType, phoneCode, e); throw exception(SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR); } }
@Test public void testGetWxMaPhoneNumberInfo_exception() throws WxErrorException { // 准备参数 Integer userType = randomPojo(UserTypeEnum.class).getValue(); String phoneCode = randomString(); // mock 方法 WxMaUserService userService = mock(WxMaUserService.class); when(wxMaService.getUserService()).thenReturn(userService); WxErrorException wxErrorException = randomPojo(WxErrorException.class); when(userService.getPhoneNoInfo(eq(phoneCode))).thenThrow(wxErrorException); // 调用并断言异常 assertServiceException(() -> socialClientService.getWxMaPhoneNumberInfo(userType, phoneCode), SOCIAL_CLIENT_WEIXIN_MINI_APP_PHONE_CODE_ERROR); }
@Override public void onEvent(Event event) { if (EnvUtil.getStandaloneMode()) { return; } if (event instanceof ClientEvent.ClientVerifyFailedEvent) { syncToVerifyFailedServer((ClientEvent.ClientVerifyFailedEvent) event); } else { syncToAllServer((ClientEvent) event); } }
@Test void testOnClientChangedEventSuccess() { distroClientDataProcessor.onEvent(new ClientEvent.ClientChangedEvent(client)); verify(distroProtocol, never()).syncToTarget(any(), any(), anyString(), anyLong()); verify(distroProtocol).sync(any(), eq(DataOperation.CHANGE)); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldFormatStruct() { // Given: final Schema structSchema = SchemaBuilder.struct() .field("COL1", Schema.STRING_SCHEMA) .field("COL4", SchemaBuilder .array(Schema.FLOAT64_SCHEMA) .build()) .field("COL5", SchemaBuilder .map(Schema.STRING_SCHEMA, Schema.FLOAT64_SCHEMA) .build()) .build(); // Then: assertThat(DEFAULT.format(structSchema), is( "STRUCT<" + "COL1 VARCHAR, " + "COL4 ARRAY<DOUBLE>, " + "COL5 MAP<VARCHAR, DOUBLE>" + ">")); assertThat(STRICT.format(structSchema), is( "STRUCT<" + "COL1 VARCHAR NOT NULL, " + "COL4 ARRAY<DOUBLE NOT NULL> NOT NULL, " + "COL5 MAP<VARCHAR NOT NULL, DOUBLE NOT NULL> NOT NULL" + "> NOT NULL")); }
public static Class loadClass(String className, ClassLoader classLoader) throws ClassNotFoundException { if (className.length() == 1) { char type = className.charAt(0); if (type == 'B') { return Byte.TYPE; } else if (type == 'C') { return Character.TYPE; } else if (type == 'D') { return Double.TYPE; } else if (type == 'F') { return Float.TYPE; } else if (type == 'I') { return Integer.TYPE; } else if (type == 'J') { return Long.TYPE; } else if (type == 'S') { return Short.TYPE; } else if (type == 'Z') { return Boolean.TYPE; } else if (type == 'V') { return Void.TYPE; } else { throw new ClassNotFoundException(className); } } else if (isPrimitive(className)) { return (Class) PRIMITIVE_NAME_TYPE_MAP.get(className); } else if (className.charAt(0) == 'L' && className.charAt(className.length() - 1) == ';') { return classLoader.loadClass(className.substring(1, className.length() - 1)); } else { try { return classLoader.loadClass(className); } catch (ClassNotFoundException | NoClassDefFoundError var4) { if (className.charAt(0) != '[') { throw var4; } else { // CHECKSTYLE.OFF: EmptyStatement int arrayDimension; for (arrayDimension = 0; className.charAt(arrayDimension) == '['; ++arrayDimension) { } // CHECKSTYLE.ON: EmptyStatement Class componentType = loadClass(className.substring(arrayDimension), classLoader); return Array.newInstance(componentType, new int[arrayDimension]).getClass(); } } } }
@Test public void testLoadClass() throws Exception { ClassLoader clsLoader = ClassLoader.getSystemClassLoader(); Class[] classes = new Class[] { Integer.class, int.class, Byte.class, byte.class, Double.class, double.class, Float.class, float.class, Character.class, char.class, Long.class, long.class, Short.class, short.class, Boolean.class, boolean.class, Void.class, Reflections.class, Integer[].class, int[].class }; for (Class cls : classes) { assertEquals(cls, Reflections.loadClass(cls.getName(), clsLoader)); } }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final String resourceId = fileid.getFileId(file); final UiFsModel uiFsModel = new ListResourceApi(new EueApiClient(session)).resourceResourceIdGet(resourceId, null, null, null, null, null, null, Collections.singletonList(EueAttributesFinderFeature.OPTION_DOWNLOAD), null); final HttpUriRequest request = new HttpGet(uiFsModel.getUilink().getDownloadURI()); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = session.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response); default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new EueExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test(expected = NotfoundException.class) public void testNotFound() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); new EueReadFeature(session, fileid).read( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new DisabledConnectionCallback()); }
public static <T> Collection<T> nullToEmpty(Collection<T> collection) { return collection == null ? Collections.emptyList() : collection; }
@Test public void testNullToEmpty_whenNull() { assertEquals(emptyList(), nullToEmpty(null)); }
public static Set<String> extractColumnsWithGranularity(TableConfig tableConfig) { if (tableConfig.getFieldConfigList() == null) { return Collections.emptySet(); } Set<String> columnsWithGranularity = new HashSet<>(); for (FieldConfig fieldConfig : tableConfig.getFieldConfigList()) { TimestampConfig timestampConfig = fieldConfig.getTimestampConfig(); if (timestampConfig == null || CollectionUtils.isEmpty(timestampConfig.getGranularities())) { continue; } String timestampColumn = fieldConfig.getName(); for (TimestampIndexGranularity granularity : timestampConfig.getGranularities()) { columnsWithGranularity.add(getColumnWithGranularity(timestampColumn, granularity)); } } return columnsWithGranularity.isEmpty() ? Collections.emptySet() : columnsWithGranularity; }
@Test public void testExtractColumnsWithGranularity() { TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("testTable").setFieldConfigList( Arrays.asList( new FieldConfig("ts1", FieldConfig.EncodingType.DICTIONARY, FieldConfig.IndexType.TIMESTAMP, null, null, new TimestampConfig(Arrays.asList(TimestampIndexGranularity.SECOND, TimestampIndexGranularity.MINUTE, TimestampIndexGranularity.HOUR)), null), new FieldConfig("ts2", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.TIMESTAMP, null, null, new TimestampConfig(Arrays.asList(TimestampIndexGranularity.HOUR, TimestampIndexGranularity.DAY, TimestampIndexGranularity.WEEK)), null), new FieldConfig("ts3", FieldConfig.EncodingType.RAW, FieldConfig.IndexType.TIMESTAMP, null, FieldConfig.CompressionCodec.PASS_THROUGH, new TimestampConfig( Arrays.asList(TimestampIndexGranularity.WEEK, TimestampIndexGranularity.MONTH, TimestampIndexGranularity.YEAR)), null))).build(); Set<String> columnsWithGranularity = TimestampIndexUtils.extractColumnsWithGranularity(tableConfig); assertEquals(columnsWithGranularity, new HashSet<>( Arrays.asList("$ts1$SECOND", "$ts1$MINUTE", "$ts1$HOUR", "$ts2$HOUR", "$ts2$DAY", "$ts2$WEEK", "$ts3$WEEK", "$ts3$MONTH", "$ts3$YEAR"))); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public AppInfo get() { return getAppInfo(); }
@Test public void testAMDefault() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("mapreduce/") .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); verifyAMInfo(json.getJSONObject("info"), appContext); }
public CompletableFuture<Account> confirmReservedUsernameHash(final Account account, final byte[] reservedUsernameHash, @Nullable final byte[] encryptedUsername) { if (account.getUsernameHash().map(currentUsernameHash -> Arrays.equals(currentUsernameHash, reservedUsernameHash)).orElse(false)) { // the client likely already succeeded and is retrying return CompletableFuture.completedFuture(account); } if (!account.getReservedUsernameHash().map(oldHash -> Arrays.equals(oldHash, reservedUsernameHash)).orElse(false)) { // no such reservation existed, either there was no previous call to reserveUsername // or the reservation changed return CompletableFuture.failedFuture(new UsernameReservationNotFoundException()); } return redisDeleteAsync(account) .thenCompose(ignored -> updateWithRetriesAsync( account, a -> true, a -> accounts.confirmUsernameHash(a, reservedUsernameHash, encryptedUsername), () -> accounts.getByAccountIdentifierAsync(account.getUuid()).thenApply(Optional::orElseThrow), AccountChangeValidator.USERNAME_CHANGE_VALIDATOR, MAX_UPDATE_ATTEMPTS )) .whenComplete((updatedAccount, throwable) -> { if (throwable == null) { // Make a best effort to clear any stale data that may have been cached while this operation was in progress redisDeleteAsync(updatedAccount); } }); }
@Test void testConfirmReservedLapsed() { final Account account = AccountsHelper.generateTestAccount("+18005551234", UUID.randomUUID(), UUID.randomUUID(), new ArrayList<>(), new byte[UnidentifiedAccessUtil.UNIDENTIFIED_ACCESS_KEY_LENGTH]); // hash was reserved, but the reservation lapsed and another account took it setReservationHash(account, USERNAME_HASH_1); when(accounts.confirmUsernameHash(account, USERNAME_HASH_1, ENCRYPTED_USERNAME_1)) .thenReturn(CompletableFuture.failedFuture(new UsernameHashNotAvailableException())); CompletableFutureTestUtil.assertFailsWithCause(UsernameHashNotAvailableException.class, accountsManager.confirmReservedUsernameHash(account, USERNAME_HASH_1, ENCRYPTED_USERNAME_1)); assertTrue(account.getUsernameHash().isEmpty()); }
public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { final Map<Path, List<ObjectKeyAndVersion>> map = new HashMap<>(); final List<Path> containers = new ArrayList<>(); for(Path file : files.keySet()) { if(containerService.isContainer(file)) { containers.add(file); continue; } callback.delete(file); final Path bucket = containerService.getContainer(file); if(file.getType().contains(Path.Type.upload)) { // In-progress multipart upload try { multipartService.delete(new MultipartUpload(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(NotfoundException ignored) { log.warn(String.format("Ignore failure deleting multipart upload %s", file)); } } else { final List<ObjectKeyAndVersion> keys = new ArrayList<>(); // Always returning 204 even if the key does not exist. Does not return 404 for non-existing keys keys.add(new ObjectKeyAndVersion(containerService.getKey(file), file.attributes().getVersionId())); if(map.containsKey(bucket)) { map.get(bucket).addAll(keys); } else { map.put(bucket, keys); } } } // Iterate over all containers and delete list of keys for(Map.Entry<Path, List<ObjectKeyAndVersion>> entry : map.entrySet()) { final Path container = entry.getKey(); final List<ObjectKeyAndVersion> keys = entry.getValue(); this.delete(container, keys, prompt); } for(Path file : containers) { callback.delete(file); // Finally delete bucket itself try { final String bucket = containerService.getContainer(file).getName(); session.getClient().deleteBucket(bucket); session.getClient().getRegionEndpointCache().removeRegionForBucketName(bucket); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot delete {0}", e, file); } } }
@Test(expected = NotfoundException.class) public void testDeleteNotFoundBucket() throws Exception { final Path container = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)); new S3MultipleDeleteFeature(session, new S3AccessControlListFeature(session)).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static RestSettingBuilder delete(final RestIdMatcher idMatcher) { return single(HttpMethod.DELETE, checkNotNull(idMatcher, "ID Matcher should not be null")); }
@Test public void should_not_delete_with_unknown_id() throws Exception { server.resource("targets", delete("1").response(status(200)) ); running(server, () -> { HttpResponse httpResponse = helper.deleteForResponse(remoteUrl("/targets/2")); assertThat(httpResponse.getCode(), is(404)); }); }
@Nullable @Override public CodecAggregator getAggregator() { return null; }
@Test public void getAggregator() throws Exception { assertNull(codec.getAggregator()); }
@Override public boolean map(URL url) { if (CollectionUtils.isEmpty( applicationModel.getApplicationConfigManager().getMetadataConfigs())) { logger.warn( COMMON_PROPERTY_TYPE_MISMATCH, "", "", "No valid metadata config center found for mapping report."); return false; } String serviceInterface = url.getServiceInterface(); if (IGNORED_SERVICE_INTERFACES.contains(serviceInterface)) { return true; } boolean result = true; for (Map.Entry<String, MetadataReport> entry : metadataReportInstance.getMetadataReports(true).entrySet()) { MetadataReport metadataReport = entry.getValue(); String appName = applicationModel.getApplicationName(); try { if (metadataReport.registerServiceAppMapping(serviceInterface, appName, url)) { // MetadataReport support directly register service-app mapping continue; } boolean succeeded = false; int currentRetryTimes = 1; String newConfigContent = appName; do { ConfigItem configItem = metadataReport.getConfigItem(serviceInterface, DEFAULT_MAPPING_GROUP); String oldConfigContent = configItem.getContent(); if (StringUtils.isNotEmpty(oldConfigContent)) { String[] oldAppNames = oldConfigContent.split(","); if (oldAppNames.length > 0) { for (String oldAppName : oldAppNames) { if (StringUtils.trim(oldAppName).equals(appName)) { succeeded = true; break; } } } if (succeeded) { break; } newConfigContent = oldConfigContent + COMMA_SEPARATOR + appName; } succeeded = metadataReport.registerServiceAppMapping( serviceInterface, DEFAULT_MAPPING_GROUP, newConfigContent, configItem.getTicket()); if (!succeeded) { int waitTime = ThreadLocalRandom.current().nextInt(casRetryWaitTime); logger.info("Failed to publish service name mapping to metadata center by cas operation. " + "Times: " + currentRetryTimes + ". " + "Next retry delay: " + waitTime + ". " + "Service Interface: " + serviceInterface + ". " + "Origin Content: " + oldConfigContent + ". " + "Ticket: " + configItem.getTicket() + ". " + "Excepted context: " + newConfigContent); Thread.sleep(waitTime); } } while (!succeeded && currentRetryTimes++ <= casRetryTimes); if (!succeeded) { result = false; } } catch (Exception e) { result = false; logger.warn( INTERNAL_ERROR, "unknown error in registry module", "", "Failed registering mapping to remote." + metadataReport, e); } } return result; }
@Test void testMap() { ApplicationModel mockedApplicationModel = spy(applicationModel); when(configManager.getMetadataConfigs()).thenReturn(Collections.emptyList()); Mockito.when(mockedApplicationModel.getApplicationConfigManager()).thenReturn(configManager); Mockito.when(mockedApplicationModel.getCurrentConfig()).thenReturn(new ApplicationConfig("test")); // metadata report config not found mapping.setApplicationModel(mockedApplicationModel); boolean result = mapping.map(url); assertFalse(result); when(configManager.getMetadataConfigs()).thenReturn(Arrays.asList(new MetadataReportConfig())); MetadataReportInstance reportInstance = mock(MetadataReportInstance.class); Mockito.when(reportInstance.getMetadataReports(true)).thenReturn(metadataReportList); mapping.metadataReportInstance = reportInstance; when(metadataReport.registerServiceAppMapping(any(), any(), any())).thenReturn(true); // metadata report directly result = mapping.map(url); assertTrue(result); // metadata report using cas and retry, succeeded after retried 10 times when(metadataReport.registerServiceAppMapping(any(), any(), any())).thenReturn(false); when(metadataReport.getConfigItem(any(), any())).thenReturn(new ConfigItem()); when(metadataReport.registerServiceAppMapping(any(), any(), any(), any())) .thenAnswer(new Answer<Boolean>() { private int counter = 0; @Override public Boolean answer(InvocationOnMock invocationOnMock) { if (++counter == 10) { return true; } return false; } }); assertTrue(mapping.map(url)); // metadata report using cas and retry, failed after 11 times retry when(metadataReport.registerServiceAppMapping(any(), any(), any(), any())) .thenReturn(false); Exception exceptionExpected = null; assertFalse(mapping.map(url)); }
public static List<URL> parseConfigurators(String rawConfig) { // compatible url JsonArray, such as [ "override://xxx", "override://xxx" ] List<URL> compatibleUrls = parseJsonArray(rawConfig); if (CollectionUtils.isNotEmpty(compatibleUrls)) { return compatibleUrls; } List<URL> urls = new ArrayList<>(); ConfiguratorConfig configuratorConfig = parseObject(rawConfig); String scope = configuratorConfig.getScope(); List<ConfigItem> items = configuratorConfig.getConfigs(); if (ConfiguratorConfig.SCOPE_APPLICATION.equals(scope)) { items.forEach(item -> urls.addAll(appItemToUrls(item, configuratorConfig))); } else { // service scope by default. items.forEach(item -> urls.addAll(serviceItemToUrls(item, configuratorConfig))); } return urls; }
@Test void parseProviderConfigurationV3Conflict() throws IOException { try (InputStream yamlStream = this.getClass().getResourceAsStream("/ConfiguratorV3Duplicate.yml")) { List<URL> urls = ConfigParser.parseConfigurators(streamToString(yamlStream)); Assertions.assertNotNull(urls); Assertions.assertEquals(1, urls.size()); URL url = urls.get(0); Assertions.assertEquals("10.0.0.1:20880", url.getAddress()); Assertions.assertEquals("DemoService", url.getServiceInterface()); Assertions.assertEquals(200, url.getParameter(WEIGHT_KEY, 0)); Assertions.assertEquals("demo-provider", url.getApplication()); URL matchURL = URL.valueOf("dubbo://10.0.0.1:20880/DemoService?match_key1=value1"); URL notMatchURL = URL.valueOf("dubbo://10.0.0.1:20880/DemoService?match_key1=value_not_match"); // key not match ConditionMatch matcher = (ConditionMatch) url.getAttribute(MATCH_CONDITION); Assertions.assertTrue(matcher.isMatch(matchURL.getAddress(), matchURL)); Assertions.assertFalse(matcher.isMatch(notMatchURL.getAddress(), notMatchURL)); } }
public static byte[] compress(String urlString) throws MalformedURLException { byte[] compressedBytes = null; if (urlString != null) { // Figure the compressed bytes can't be longer than the original string. byte[] byteBuffer = new byte[urlString.length()]; int byteBufferIndex = 0; Arrays.fill(byteBuffer, (byte) 0x00); Pattern urlPattern = Pattern.compile(EDDYSTONE_URL_REGEX); Matcher urlMatcher = urlPattern.matcher(urlString); if (urlMatcher.matches()) { // www. String wwwdot = urlMatcher.group(EDDYSTONE_URL_WWW_GROUP); boolean haswww = (wwwdot != null); // Protocol. String rawProtocol = urlMatcher.group(EDDYSTONE_URL_PROTOCOL_GROUP); String protocol = rawProtocol.toLowerCase(); if (protocol.equalsIgnoreCase(URL_PROTOCOL_HTTP)) { byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTP_WWW : EDDYSTONE_URL_PROTOCOL_HTTP); } else { byteBuffer[byteBufferIndex] = (haswww ? EDDYSTONE_URL_PROTOCOL_HTTPS_WWW : EDDYSTONE_URL_PROTOCOL_HTTPS); } byteBufferIndex++; // Fully-qualified domain name (FQDN). This includes the hostname and any other components after the dots // but BEFORE the first single slash in the URL. byte[] hostnameBytes = urlMatcher.group(EDDYSTONE_URL_FQDN_GROUP).getBytes(); String rawHostname = new String(hostnameBytes); String hostname = rawHostname.toLowerCase(); String[] domains = hostname.split(Pattern.quote(".")); boolean consumedSlash = false; if (domains != null) { // Write the hostname/subdomains prior to the last one. If there's only one (e. g. http://localhost) // then that's the only thing to write out. byte[] periodBytes = {'.'}; int writableDomainsCount = (domains.length == 1 ? 1 : domains.length - 1); for (int domainIndex = 0; domainIndex < writableDomainsCount; domainIndex++) { // Write out leading period, if necessary. if (domainIndex > 0) { System.arraycopy(periodBytes, 0, byteBuffer, byteBufferIndex, periodBytes.length); byteBufferIndex += periodBytes.length; } byte[] domainBytes = domains[domainIndex].getBytes(); int domainLength = domainBytes.length; System.arraycopy(domainBytes, 0, byteBuffer, byteBufferIndex, domainLength); byteBufferIndex += domainLength; } // Is the TLD one that we can encode? if (domains.length > 1) { String tld = "." + domains[domains.length - 1]; String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP); String encodableTLDCandidate = (slash == null ? tld : tld + slash); byte encodedTLDByte = encodedByteForTopLevelDomain(encodableTLDCandidate); if (encodedTLDByte != TLD_NOT_ENCODABLE) { byteBuffer[byteBufferIndex++] = encodedTLDByte; consumedSlash = (slash != null); } else { byte[] tldBytes = tld.getBytes(); int tldLength = tldBytes.length; System.arraycopy(tldBytes, 0, byteBuffer, byteBufferIndex, tldLength); byteBufferIndex += tldLength; } } } // Optional slash. if (! consumedSlash) { String slash = urlMatcher.group(EDDYSTONE_URL_SLASH_GROUP); if (slash != null) { int slashLength = slash.length(); System.arraycopy(slash.getBytes(), 0, byteBuffer, byteBufferIndex, slashLength); byteBufferIndex += slashLength; } } // Path. String path = urlMatcher.group(EDDYSTONE_URL_PATH_GROUP); if (path != null) { int pathLength = path.length(); System.arraycopy(path.getBytes(), 0, byteBuffer, byteBufferIndex, pathLength); byteBufferIndex += pathLength; } // Copy the result. compressedBytes = new byte[byteBufferIndex]; System.arraycopy(byteBuffer, 0, compressedBytes, 0, compressedBytes.length); } else { throw new MalformedURLException(); } } else { throw new MalformedURLException(); } return compressedBytes; }
@Test public void testCompressWithShortenedURLContainingCaps() throws MalformedURLException { String testURL = "http://goo.gl/C2HC48"; byte[] expectedBytes = {0x02, 'g', 'o', 'o', '.', 'g', 'l', '/', 'C', '2', 'H', 'C', '4', '8'}; String hexBytes = bytesToHex(UrlBeaconUrlCompressor.compress(testURL)); assertTrue(Arrays.equals(expectedBytes, UrlBeaconUrlCompressor.compress(testURL))); }
synchronized void trim(long startKey, long endKey) { /* * [3, 4, 5, 9] -> [10, 13, 14, 15] -> [21, 24, 29, 30] -> [31] :: start layout * |5______________________________23| :: trim(5, 23) * [5, 9] -> [10, 13, 14, 15] -> [21] :: result layout */ final Iterator<Chunk> descendingIterator = chunks.descendingIterator(); while (descendingIterator.hasNext()) { final Chunk currentTail = descendingIterator.next(); if (isFirstElementIsEmptyOrGreaterEqualThanKey(currentTail, endKey)) { freeChunk(currentTail); descendingIterator.remove(); } else { currentTail.cursor = findFirstIndexOfGreaterEqualElements(currentTail.keys, currentTail.startIndex, currentTail.cursor, endKey); break; } } final Iterator<Chunk> iterator = chunks.iterator(); while (iterator.hasNext()) { final Chunk currentHead = iterator.next(); if (isLastElementIsLessThanKey(currentHead, startKey)) { freeChunk(currentHead); iterator.remove(); } else { final int newStartIndex = findFirstIndexOfGreaterEqualElements(currentHead.keys, currentHead.startIndex, currentHead.cursor, startKey); if (currentHead.startIndex != newStartIndex) { currentHead.startIndex = newStartIndex; currentHead.chunkSize = currentHead.cursor - currentHead.startIndex; } break; } } }
@Test public void testTrim() { ChunkedAssociativeLongArray array = new ChunkedAssociativeLongArray(3); array.put(-3, 3); array.put(-2, 1); array.put(0, 5); array.put(3, 0); array.put(9, 8); array.put(15, 0); array.put(19, 5); array.put(21, 5); array.put(34, -9); array.put(109, 5); then(array.out()) .isEqualTo("[(-3: 3) (-2: 1) (0: 5) ]->[(3: 0) (9: 8) (15: 0) ]->[(19: 5) (21: 5) (34: -9) ]->[(109: 5) ]"); then(array.values()) .isEqualTo(new long[]{3, 1, 5, 0, 8, 0, 5, 5, -9, 5}); then(array.size()) .isEqualTo(10); array.trim(-2, 20); then(array.out()) .isEqualTo("[(-2: 1) (0: 5) ]->[(3: 0) (9: 8) (15: 0) ]->[(19: 5) ]"); then(array.values()) .isEqualTo(new long[]{1, 5, 0, 8, 0, 5}); then(array.size()) .isEqualTo(6); }
@Override public void check(final EncryptRule encryptRule, final ShardingSphereSchema schema, final SelectStatementContext sqlStatementContext) { for (OrderByItem each : getOrderByItems(sqlStatementContext)) { if (each.getSegment() instanceof ColumnOrderByItemSegment) { checkColumnOrderByItem(encryptRule, schema, sqlStatementContext, ((ColumnOrderByItemSegment) each.getSegment()).getColumn()); } } }
@Test void assertCheck() { assertThrows(UnsupportedEncryptSQLException.class, () -> new EncryptOrderByItemSupportedChecker().check(mockEncryptRule(), mock(ShardingSphereSchema.class), buildSelectStatementContext())); }
@Operation(summary = "Receive SAML logoutRequest") @PostMapping({"/idp/request_logout", "/entrance/request_logout"}) public void requestLogout(HttpServletRequest request, HttpServletResponse response) throws SamlParseException, SamlSessionException, SamlValidationException, DienstencatalogusException { logger.debug("Receive SAML logoutRequest"); LogoutRequestModel logoutRequestModel = logoutService.parseLogoutRequest(request); logoutService.generateResponse(logoutRequestModel, response); }
@Test public void requestLogoutTest() throws SamlValidationException, SamlSessionException, SamlParseException, DienstencatalogusException { when(logoutServiceMock.parseLogoutRequest(any(HttpServletRequest.class))).thenReturn(new LogoutRequestModel()); doNothing().when(logoutServiceMock).generateResponse(any(LogoutRequestModel.class), any(HttpServletResponse.class)); logoutController.requestLogout(request, response); verify(logoutServiceMock, times(1)).parseLogoutRequest(any(HttpServletRequest.class)); verify(logoutServiceMock, times(1)).generateResponse(any(LogoutRequestModel.class), any(HttpServletResponse.class)); }
@Override public void start() throws Exception { LOG.debug("Start leadership runner for job {}.", getJobID()); leaderElection.startLeaderElection(this); }
@Test void testCloseReleasesClassLoaderLease() throws Exception { final OneShotLatch closeClassLoaderLeaseLatch = new OneShotLatch(); final TestingClassLoaderLease classLoaderLease = TestingClassLoaderLease.newBuilder() .setCloseRunnable(closeClassLoaderLeaseLatch::trigger) .build(); try (JobManagerRunner jobManagerRunner = newJobMasterServiceLeadershipRunnerBuilder() .setClassLoaderLease(classLoaderLease) .build()) { jobManagerRunner.start(); jobManagerRunner.close(); closeClassLoaderLeaseLatch.await(); } }
@Override public void onOpened() { digestNotification(); }
@Test public void onOpened_appVisible_notifyJS() throws Exception { setUpForegroundApp(); final PushNotification uut = createUUT(); uut.onOpened(); verify(mContext, never()).startActivity(any(Intent.class)); verify(mJsIOHelper).sendEventToJS(eq(NOTIFICATION_OPENED_EVENT_NAME), argThat(new isValidResponse(mResponseBundle)), eq(mReactContext)); }
@Override @Deprecated public <VR> KStream<K, VR> transformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, ? extends VR> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowBadValueTransformerWithKeySupplierOnTransformValuesWithNamed() { final ValueTransformerWithKey<String, String, String> transformer = valueTransformerWithKeySupplier.get(); final IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> testStream.transformValues(() -> transformer, Named.as("transformer")) ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); }
@Override public Long computeValue(final Collection<T> elementsInBin, final int totalElements) { return (long) elementsInBin.stream() .mapToLong(valueRetrievalFunction) .average() .orElse(0L); }
@Test void testReturnsZeroOnNoData() { final Long result = toTest.computeValue(List.of(), 42); assertEquals(0, result); }
@Deprecated public String validateAntFileMask(final String fileMasks) throws IOException, InterruptedException { return validateAntFileMask(fileMasks, Integer.MAX_VALUE); }
@Test public void validateAntFileMask() throws Exception { File tmp = temp.getRoot(); FilePath d = new FilePath(channels.french, tmp.getPath()); d.child("d1/d2/d3").mkdirs(); d.child("d1/d2/d3/f.txt").touch(0); d.child("d1/d2/d3/f.html").touch(0); d.child("d1/d2/f.txt").touch(0); assertValidateAntFileMask(null, d, "**/*.txt"); assertValidateAntFileMask(null, d, "d1/d2/d3/f.txt"); assertValidateAntFileMask(null, d, "**/*.html"); assertValidateAntFileMask(Messages.FilePath_validateAntFileMask_portionMatchButPreviousNotMatchAndSuggest("**/*.js", "**", "**/*.js"), d, "**/*.js"); assertValidateAntFileMask(Messages.FilePath_validateAntFileMask_doesntMatchAnything("index.htm"), d, "index.htm"); assertValidateAntFileMask(Messages.FilePath_validateAntFileMask_doesntMatchAndSuggest("f.html", "d1/d2/d3/f.html"), d, "f.html"); // TODO lots more to test, e.g. multiple patterns separated by commas; ought to have full code coverage for this method }
public static GraphQLRequestParams toGraphQLRequestParams(byte[] postData, final String contentEncoding) throws JsonProcessingException, UnsupportedEncodingException { final String encoding = StringUtils.isNotEmpty(contentEncoding) ? contentEncoding : EncoderCache.URL_ARGUMENT_ENCODING; ObjectNode data; try (InputStreamReader reader = new InputStreamReader(new ByteArrayInputStream(postData), encoding)) { data = OBJECT_MAPPER.readValue(reader, ObjectNode.class); } catch (IOException e) { throw new IllegalArgumentException("Invalid json data: " + e.getLocalizedMessage(), e); } String operationName = null; String query; String variables = null; final JsonNode operationNameNode = data.has(OPERATION_NAME_FIELD) ? data.get(OPERATION_NAME_FIELD) : null; if (operationNameNode != null) { operationName = getJsonNodeTextContent(operationNameNode, true); } if (!data.has(QUERY_FIELD)) { throw new IllegalArgumentException("Not a valid GraphQL query."); } final JsonNode queryNode = data.get(QUERY_FIELD); query = getJsonNodeTextContent(queryNode, false); final String trimmedQuery = StringUtils.trim(query); if (!StringUtils.startsWith(trimmedQuery, QUERY_FIELD) && !StringUtils.startsWith(trimmedQuery, "mutation")) { throw new IllegalArgumentException("Not a valid GraphQL query."); } final JsonNode variablesNode = data.has(VARIABLES_FIELD) ? data.get(VARIABLES_FIELD) : null; if (variablesNode != null) { final JsonNodeType nodeType = variablesNode.getNodeType(); if (nodeType != JsonNodeType.NULL) { if (nodeType == JsonNodeType.OBJECT) { variables = OBJECT_MAPPER.writeValueAsString(variablesNode); } else { throw new IllegalArgumentException("Not a valid object node for GraphQL variables."); } } } return new GraphQLRequestParams(operationName, query, variables); }
@Test void testMissingParams() { Arguments args = new Arguments(); assertThrows(IllegalArgumentException.class, () -> GraphQLRequestParamUtils.toGraphQLRequestParams(args, null)); }
public boolean isDifferentQuestionId(ChecklistQuestion checklistQuestion) { return this.question != checklistQuestion.question; }
@DisplayName("체크리스트 내에서 질문끼리 다른 id를 갖고 있는지 확인 성공 : 같은 id일 경우") @Test void isDifferentQuestionId_false() { //given ChecklistQuestion checklistQuestion = ChecklistFixture.CHECKLIST_QUESTION_1; ChecklistQuestion compareChecklistQuestion = ChecklistFixture.CHECKLIST_QUESTION_1; //when & then assertThat(checklistQuestion.isDifferentQuestionId(compareChecklistQuestion)).isFalse(); }
@Override public Optional<Customer> findCustomerByTenantIdAndTitle(UUID tenantId, String title) { return Optional.ofNullable(DaoUtil.getData(customerRepository.findByTenantIdAndTitle(tenantId, title))); }
@Test public void testFindCustomersByTenantIdAndTitle() { UUID tenantId = Uuids.timeBased(); for (int i = 0; i < 10; i++) { createCustomer(tenantId, i); } Optional<Customer> customerOpt = customerDao.findCustomerByTenantIdAndTitle(tenantId, "CUSTOMER_5"); assertTrue(customerOpt.isPresent()); assertEquals("CUSTOMER_5", customerOpt.get().getTitle()); }
public static void generate(String cluster, OutputStream out, List<PrometheusRawMetricsProvider> metricsProviders) throws IOException { ByteBuf buf = PulsarByteBufAllocator.DEFAULT.heapBuffer(); try { SimpleTextOutputStream stream = new SimpleTextOutputStream(buf); generateSystemMetrics(stream, cluster); if (metricsProviders != null) { for (PrometheusRawMetricsProvider metricsProvider : metricsProviders) { metricsProvider.generate(stream); } } out.write(buf.array(), buf.arrayOffset(), buf.readableBytes()); } finally { buf.release(); } }
@Test public void testGenerateSystemMetricsWithoutCustomizedLabel() throws Exception { String defaultClusterValue = "cluster_test"; // default cluster. String metricsName = "label_without_customized_label" + randomString(); Counter counter = new Counter.Builder() .name(metricsName) .help("x") .register(CollectorRegistry.defaultRegistry); counter.inc(); ByteArrayOutputStream out = new ByteArrayOutputStream(); PrometheusMetricsGeneratorUtils.generate(defaultClusterValue, out, Collections.emptyList()); assertTrue(out.toString().contains( String.format("%s_total{cluster=\"%s\"} 1.0", metricsName, defaultClusterValue) )); // cleanup out.close(); CollectorRegistry.defaultRegistry.unregister(counter); }
public boolean updateNode(String key, String group, String data) { return zkClient.publishConfig(key, group, data); }
@Test public void testCreateParentNode2() { boolean result = zooKeeperBufferedClient.updateNode(CHILE_TWO_PATh_KEY, CHILE_TWO_PATh_PARENT, NODE_CONTENT); Assert.assertTrue(result); }
public Result resolve(List<PluginDescriptor> plugins) { // create graphs dependenciesGraph = new DirectedGraph<>(); dependentsGraph = new DirectedGraph<>(); // populate graphs Map<String, PluginDescriptor> pluginByIds = new HashMap<>(); for (PluginDescriptor plugin : plugins) { addPlugin(plugin); pluginByIds.put(plugin.getPluginId(), plugin); } log.debug("Graph: {}", dependenciesGraph); // get a sorted list of dependencies List<String> sortedPlugins = dependenciesGraph.reverseTopologicalSort(); log.debug("Plugins order: {}", sortedPlugins); // create the result object Result result = new Result(sortedPlugins); resolved = true; if (sortedPlugins != null) { // no cyclic dependency // detect not found dependencies for (String pluginId : sortedPlugins) { if (!pluginByIds.containsKey(pluginId)) { result.addNotFoundDependency(pluginId); } } } // check dependencies versions for (PluginDescriptor plugin : plugins) { String pluginId = plugin.getPluginId(); String existingVersion = plugin.getVersion(); List<String> dependents = getDependents(pluginId); while (!dependents.isEmpty()) { String dependentId = dependents.remove(0); PluginDescriptor dependent = pluginByIds.get(dependentId); String requiredVersion = getDependencyVersionSupport(dependent, pluginId); boolean ok = checkDependencyVersion(requiredVersion, existingVersion); if (!ok) { result.addWrongDependencyVersion(new WrongDependencyVersion(pluginId, dependentId, existingVersion, requiredVersion)); } } } return result; }
@Test void sortedPlugins() { // create incomplete plugin descriptor (ignore some attributes) PluginDescriptor pd1 = new DefaultPluginDescriptor() .setPluginId("p1") .setDependencies("p2"); PluginDescriptor pd2 = new DefaultPluginDescriptor() .setPluginId("p2") .setPluginVersion("0.0.0"); // needed in "checkDependencyVersion" method List<PluginDescriptor> plugins = new ArrayList<>(); plugins.add(pd1); plugins.add(pd2); DependencyResolver.Result result = resolver.resolve(plugins); assertTrue(result.getNotFoundDependencies().isEmpty()); assertEquals(result.getSortedPlugins(), Arrays.asList("p2", "p1")); }
@Override public Num calculate(Position position, int currentIndex) { Num pricePerAsset = position.getEntry().getPricePerAsset(); Num multiplier = pricePerAsset.one(); if (position.isClosed()) { multiplier = pricePerAsset.numOf(2); } return pricePerAsset.numOf(feePerTrade).multipliedBy(multiplier); }
@Test public void calculatePerPrice() { double feePerTrade = RANDOM.nextDouble(); FixedTransactionCostModel model = new FixedTransactionCostModel(feePerTrade); Num cost = model.calculate(PRICE, AMOUNT); assertNumEquals(cost, DoubleNum.valueOf(feePerTrade)); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 7) { onInvalidDataReceived(device, data); return; } // First byte: flags int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++); // See UNIT_* for unit options final int unit = (flags & 0x01) == UNIT_mmHg ? UNIT_mmHg : UNIT_kPa; final boolean timestampPresent = (flags & 0x02) != 0; final boolean pulseRatePresent = (flags & 0x04) != 0; final boolean userIdPresent = (flags & 0x08) != 0; final boolean measurementStatusPresent = (flags & 0x10) != 0; if (data.size() < 7 + (timestampPresent ? 7 : 0) + (pulseRatePresent ? 2 : 0) + (userIdPresent ? 1 : 0) + (measurementStatusPresent ? 2 : 0)) { onInvalidDataReceived(device, data); return; } // Following bytes - systolic, diastolic and mean arterial pressure final float cuffPressure = data.getFloatValue(Data.FORMAT_SFLOAT, offset); // final float ignored_1 = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 2); // final float ignored_2 = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 4); offset += 6; // Parse timestamp if present Calendar calendar = null; if (timestampPresent) { calendar = DateTimeDataCallback.readDateTime(data, offset); offset += 7; } // Parse pulse rate if present Float pulseRate = null; if (pulseRatePresent) { pulseRate = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // Read user id if present Integer userId = null; if (userIdPresent) { userId = data.getIntValue(Data.FORMAT_UINT8, offset); offset += 1; } // Read measurement status if present BPMStatus status = null; if (measurementStatusPresent) { final int measurementStatus = data.getIntValue(Data.FORMAT_UINT16_LE, offset); // offset += 2; status = new BPMStatus(measurementStatus); } onIntermediateCuffPressureReceived(device, cuffPressure, unit, pulseRate, userId, status, calendar); }
@Test public void onIntermediateCuffPressureReceived_full() { final DataReceivedCallback callback = new IntermediateCuffPressureDataCallback() { @Override public void onIntermediateCuffPressureReceived(@NonNull final BluetoothDevice device, final float cuffPressure, final int unit, @Nullable final Float pulseRate, @Nullable final Integer userID, @Nullable final BPMStatus status, @Nullable final Calendar calendar) { assertEquals("Cuff pressure", 4.0, cuffPressure, 0); assertEquals("Unit: mmHg", 0, unit); assertNotNull("Pulse rate set", pulseRate); assertEquals("Pulse rate", 60.0, pulseRate, 0); assertNotNull("User ID set", userID); assertEquals("User ID", 1, userID.intValue()); assertNotNull("Status set", status); assertTrue(status.bodyMovementDetected); assertTrue(status.cuffTooLose); assertTrue(status.irregularPulseDetected); assertTrue(status.pulseRateInRange); assertFalse(status.pulseRateExceedsUpperLimit); assertFalse(status.pulseRateIsLessThenLowerLimit); assertTrue(status.improperMeasurementPosition); assertNotNull("Calendar set", calendar); assertFalse(calendar.isSet(Calendar.YEAR)); assertTrue(calendar.isSet(Calendar.MONTH)); assertTrue(calendar.isSet(Calendar.DATE)); assertEquals("Month", Calendar.APRIL, calendar.get(Calendar.MONTH)); assertEquals("Day", 17, calendar.get(Calendar.DATE)); assertEquals("Hour", 20, calendar.get(Calendar.HOUR_OF_DAY)); assertEquals("Minute", 41, calendar.get(Calendar.MINUTE)); assertEquals("Second", 59, calendar.get(Calendar.SECOND)); assertEquals("Milliseconds", 0, calendar.get(Calendar.MILLISECOND)); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct ICP reported as invalid", 1, 2); } }; final MutableData data = new MutableData(new byte[19]); // Flags data.setByte(0b11110, 0); // Cuff pressure in mmHg data.setValue(4, 0, Data.FORMAT_SFLOAT, 1); // Date and Time data.setValue(0, Data.FORMAT_UINT16_LE, 7); data.setValue(4, Data.FORMAT_UINT8, 9); data.setValue(17, Data.FORMAT_UINT8, 10); data.setValue(20, Data.FORMAT_UINT8, 11); data.setValue(41, Data.FORMAT_UINT8, 12); data.setValue(59, Data.FORMAT_UINT8, 13); // Pulse rate data.setValue(60, 0, Data.FORMAT_SFLOAT, 14); // User ID data.setValue(1, Data.FORMAT_UINT8, 16); // Measurement status data.setValue(0b100111, Data.FORMAT_UINT16_LE, 17); assertArrayEquals( new byte[] { 0x1E, (byte) 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x11, 0x14, 0x29, 0x3B, 0x3C, 0x00, 0x01, 0x27, 0x00 }, data.getValue() ); callback.onDataReceived(null, data); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testMatchContainsOnlyDeep() { run( "def response = { foo: [ 'a', 'b' ] } ", "match response contains only deep { foo: [ 'b', 'a' ] }" ); }
public List<MessageQueue> allocate4Pop(AllocateMessageQueueStrategy allocateMessageQueueStrategy, final String consumerGroup, final String clientId, List<MessageQueue> mqAll, List<String> cidAll, int popShareQueueNum) { List<MessageQueue> allocateResult; if (popShareQueueNum <= 0 || popShareQueueNum >= cidAll.size() - 1) { //each client pop all messagequeue allocateResult = new ArrayList<>(mqAll.size()); for (MessageQueue mq : mqAll) { //must create new MessageQueue in case of change cache in AssignmentManager MessageQueue newMq = new MessageQueue(mq.getTopic(), mq.getBrokerName(), -1); allocateResult.add(newMq); } } else { if (cidAll.size() <= mqAll.size()) { //consumer working in pop mode could share the MessageQueues assigned to the N (N = popWorkGroupSize) consumer following it in the cid list allocateResult = allocateMessageQueueStrategy.allocate(consumerGroup, clientId, mqAll, cidAll); int index = cidAll.indexOf(clientId); if (index >= 0) { for (int i = 1; i <= popShareQueueNum; i++) { index++; index = index % cidAll.size(); List<MessageQueue> tmp = allocateMessageQueueStrategy.allocate(consumerGroup, cidAll.get(index), mqAll, cidAll); allocateResult.addAll(tmp); } } } else { //make sure each cid is assigned allocateResult = allocate(consumerGroup, clientId, mqAll, cidAll); } } return allocateResult; }
@Test public void testAllocate4Pop() { testAllocate4Pop(new AllocateMessageQueueAveragely()); testAllocate4Pop(new AllocateMessageQueueAveragelyByCircle()); testAllocate4Pop(new AllocateMessageQueueConsistentHash()); }
public int[] decodeInt4Array(final byte[] parameterBytes, final boolean isBinary) { ShardingSpherePreconditions.checkState(!isBinary, () -> new UnsupportedSQLOperationException("binary mode")); String parameterValue = new String(parameterBytes, StandardCharsets.UTF_8); Collection<String> parameterElements = decodeText(parameterValue); int[] result = new int[parameterElements.size()]; int index = 0; for (String each : parameterElements) { result[index++] = Integer.parseInt(each); } return result; }
@Test void assertParseInt4ArrayNormalTextMode() { int[] actual = DECODER.decodeInt4Array(INT_ARRAY_STR.getBytes(), false); assertThat(actual.length, is(2)); assertThat(actual[0], is(11)); assertThat(actual[1], is(12)); }
public BackgroundException map(final IOException failure, final Path directory) { return super.map("Connection failed", failure, directory); }
@Test public void testMapWrappedCause() { final DefaultIOExceptionMappingService s = new DefaultIOExceptionMappingService(); final BackgroundException cause = new BackgroundException(); assertSame(cause, s.map(new IOException(cause))); }
@Override public void init(TbContext ctx, TbNodeConfiguration configuration) throws TbNodeException { this.config = TbNodeUtils.convert(configuration, EmptyNodeConfiguration.class); }
@Test public void givenDefaultConfig_whenInit_thenOk() { assertThatNoException().isThrownBy(() -> node.init(ctxMock, nodeConfiguration)); }