focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Mono<GetAccountIdentityResponse> getAccountIdentity(final GetAccountIdentityRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedDevice(); return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .map(account -> { final AccountIdentifiers.Builder accountIdentifiersBuilder = AccountIdentifiers.newBuilder() .addServiceIdentifiers(ServiceIdentifierUtil.toGrpcServiceIdentifier(new AciServiceIdentifier(account.getUuid()))) .addServiceIdentifiers(ServiceIdentifierUtil.toGrpcServiceIdentifier(new PniServiceIdentifier(account.getPhoneNumberIdentifier()))) .setE164(account.getNumber()); account.getUsernameHash().ifPresent(usernameHash -> accountIdentifiersBuilder.setUsernameHash(ByteString.copyFrom(usernameHash))); return GetAccountIdentityResponse.newBuilder() .setAccountIdentifiers(accountIdentifiersBuilder.build()) .build(); }); }
@Test void getAccountIdentity() { final UUID phoneNumberIdentifier = UUID.randomUUID(); final String e164 = PhoneNumberUtil.getInstance().format( PhoneNumberUtil.getInstance().getExampleNumber("US"), PhoneNumberUtil.PhoneNumberFormat.E164); final byte[] usernameHash = TestRandomUtil.nextBytes(32); final Account account = mock(Account.class); when(account.getUuid()).thenReturn(AUTHENTICATED_ACI); when(account.getPhoneNumberIdentifier()).thenReturn(phoneNumberIdentifier); when(account.getNumber()).thenReturn(e164); when(account.getUsernameHash()).thenReturn(Optional.of(usernameHash)); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final GetAccountIdentityResponse expectedResponse = GetAccountIdentityResponse.newBuilder() .setAccountIdentifiers(AccountIdentifiers.newBuilder() .addServiceIdentifiers(ServiceIdentifierUtil.toGrpcServiceIdentifier(new AciServiceIdentifier(AUTHENTICATED_ACI))) .addServiceIdentifiers(ServiceIdentifierUtil.toGrpcServiceIdentifier(new PniServiceIdentifier(phoneNumberIdentifier))) .setE164(e164) .setUsernameHash(ByteString.copyFrom(usernameHash)) .build()) .build(); assertEquals(expectedResponse, authenticatedServiceStub().getAccountIdentity(GetAccountIdentityRequest.newBuilder().build())); }
public boolean contains(long value) { if (isEmpty()) { return false; } Sequence sequence = getHead(); while (sequence != null) { if (sequence.contains(value)) { return true; } sequence = sequence.getNext(); } return false; }
@Test public void testContains() { SequenceSet set = new SequenceSet(); set.add(new Sequence(0, 10)); set.add(new Sequence(21, 42)); set.add(new Sequence(47, 90)); set.add(new Sequence(142, 512)); assertTrue(set.contains(0)); assertTrue(set.contains(42)); assertTrue(set.contains(49)); assertTrue(set.contains(153)); assertFalse(set.contains(43)); assertFalse(set.contains(99)); assertFalse(set.contains(-1)); assertFalse(set.contains(11)); }
public static <T> Inner<T> fields(String... fields) { return fields(FieldAccessDescriptor.withFieldNames(fields)); }
@Test @Category(NeedsRunner.class) public void testDropNestedArrayField() { Schema expectedSchema = Schema.builder().addArrayField("field2", FieldType.STRING).build(); PCollection<Row> result = pipeline .apply( Create.of( nestedArray(simpleRow(1, "one1"), simpleRow(1, "one2")), nestedArray(simpleRow(2, "two1"), simpleRow(2, "two2")), nestedArray(simpleRow(3, "three1"), simpleRow(3, "three2"))) .withRowSchema(NESTED_ARRAY_SCHEMA)) .apply(DropFields.fields("array[].field1")); assertEquals(expectedSchema, result.getSchema()); List<Row> expectedRows = Lists.newArrayList( Row.withSchema(expectedSchema).addArray("one1", "one2").build(), Row.withSchema(expectedSchema).addArray("two1", "two2").build(), Row.withSchema(expectedSchema).addArray("three1", "three2").build()); PAssert.that(result).containsInAnyOrder(expectedRows); pipeline.run(); }
public String toString(Object object) { return toJson(object); }
@Test public void testLocal() { Map<String, Object> result = new HashMap<>(8); result.put("date1", new Date()); result.put("date2", LocalDate.now()); result.put("date3", LocalDateTime.now()); System.out.println(JsonKit.toString(result)); }
@Override public void trace(String msg) { logger.trace(msg); }
@Test void testTraceWithFormat() { jobRunrDashboardLogger.trace("trace with {}", "format"); verify(slfLogger).trace("trace with {}", "format"); }
@Override public int getCurrentVersion() { return 3; }
@Disabled @Test void writeCurrentVersionSnapshot() throws IOException { AvroSerializer<GenericRecord> serializer = new AvroSerializer<>(GenericRecord.class, Address.getClassSchema()); DataOutputSerializer out = new DataOutputSerializer(1024); TypeSerializerSnapshotSerializationUtil.writeSerializerSnapshot( out, serializer.snapshotConfiguration()); Path snapshotPath = getSerializerSnapshotFilePath(new AvroSerializerSnapshot<>().getCurrentVersion()); Files.write(snapshotPath, out.getCopyOfBuffer()); }
@Override public void marshal(Exchange exchange, Object graph, OutputStream stream) throws Exception { // Retrieve the message body as input stream InputStream is = exchange.getContext().getTypeConverter().mandatoryConvertTo(InputStream.class, graph); // and covert that to XML Document document = exchange.getContext().getTypeConverter().convertTo(Document.class, exchange, is); if (null != keyCipherAlgorithm && (keyCipherAlgorithm.equals(XMLCipher.RSA_v1dot5) || keyCipherAlgorithm.equals(XMLCipher.RSA_OAEP) || keyCipherAlgorithm.equals(XMLCipher.RSA_OAEP_11))) { encryptAsymmetric(exchange, document, stream); } else if (null != recipientKeyAlias) { encryptAsymmetric(exchange, document, stream); } else { encryptSymmetric(exchange, document, stream); } }
@Test public void testPartialPayloadMultiNodeXMLContentEncryption() throws Exception { context.addRoutes(new RouteBuilder() { public void configure() { from("direct:start") .marshal().xmlSecurity("//cheesesites/*/cheese", true, defaultKey.getEncoded()) .to("mock:encrypted"); } }); xmlsecTestHelper.testEncryption(context); }
@Override public void shutdown() throws PulsarClientException { try { // We will throw the last thrown exception only, though logging all of them. Throwable throwable = null; if (lookup != null) { try { lookup.close(); } catch (Throwable t) { log.warn("Failed to shutdown lookup", t); throwable = t; } } if (tcClient != null) { try { tcClient.close(); } catch (Throwable t) { log.warn("Failed to close tcClient"); throwable = t; } } // close the service url provider allocated resource. if (conf != null && conf.getServiceUrlProvider() != null) { conf.getServiceUrlProvider().close(); } try { // Shutting down eventLoopGroup separately because in some cases, cnxPool might be using different // eventLoopGroup. shutdownEventLoopGroup(eventLoopGroup); } catch (PulsarClientException e) { log.warn("Failed to shutdown eventLoopGroup", e); throwable = e; } try { closeCnxPool(cnxPool); } catch (PulsarClientException e) { log.warn("Failed to shutdown cnxPool", e); throwable = e; } if (timer != null && needStopTimer) { try { timer.stop(); } catch (Throwable t) { log.warn("Failed to shutdown timer", t); throwable = t; } } try { shutdownExecutors(); } catch (PulsarClientException e) { throwable = e; } if (conf != null && conf.getAuthentication() != null) { try { conf.getAuthentication().close(); } catch (Throwable t) { log.warn("Failed to close authentication", t); throwable = t; } } if (throwable != null) { throw throwable; } } catch (Throwable t) { log.warn("Failed to shutdown Pulsar client", t); throw PulsarClientException.unwrap(t); } }
@Test public void testInitializeWithDNSServerAddresses() throws Exception { ClientConfigurationData conf = new ClientConfigurationData(); conf.setDnsServerAddresses(DefaultDnsServerAddressStreamProvider.defaultAddressList()); conf.setServiceUrl("pulsar://localhost:6650"); initializeEventLoopGroup(conf); PulsarClientImpl client = new PulsarClientImpl(conf, eventLoopGroup); client.shutdown(); }
public Object getCell(final int columnIndex) { Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.length + 1); return data[columnIndex - 1]; }
@Test void assertGetCellWithNegativeColumnIndex() { assertThrows(IllegalArgumentException.class, () -> memoryResultSetRow.getCell(-1)); }
@Override protected void handlePartitionMetadataRequest(CommandPartitionedTopicMetadata partitionMetadata) { checkArgument(state == State.Connected); final long requestId = partitionMetadata.getRequestId(); if (log.isDebugEnabled()) { log.debug("[{}] Received PartitionMetadataLookup from {} for {}", partitionMetadata.getTopic(), remoteAddress, requestId); } TopicName topicName = validateTopicName(partitionMetadata.getTopic(), requestId, partitionMetadata); if (topicName == null) { return; } if (!this.service.getPulsar().isRunning()) { if (log.isDebugEnabled()) { log.debug("[{}] Failed PartitionMetadataLookup from {} for {} " + "due to pulsar service is not ready: {} state", partitionMetadata.getTopic(), remoteAddress, requestId, this.service.getPulsar().getState().toString()); } writeAndFlush(Commands.newPartitionMetadataResponse(ServerError.ServiceNotReady, "Failed due to pulsar service is not ready", requestId)); return; } final Semaphore lookupSemaphore = service.getLookupRequestSemaphore(); if (lookupSemaphore.tryAcquire()) { isTopicOperationAllowed(topicName, TopicOperation.LOOKUP, authenticationData, originalAuthData).thenApply( isAuthorized -> { if (isAuthorized) { // Get if exists, respond not found error if not exists. getBrokerService().isAllowAutoTopicCreationAsync(topicName).thenAccept(brokerAllowAutoCreate -> { boolean autoCreateIfNotExist = partitionMetadata.isMetadataAutoCreationEnabled() && brokerAllowAutoCreate; if (!autoCreateIfNotExist) { NamespaceService namespaceService = getBrokerService().getPulsar().getNamespaceService(); namespaceService.checkTopicExists(topicName).thenAccept(topicExistsInfo -> { lookupSemaphore.release(); if (!topicExistsInfo.isExists()) { writeAndFlush(Commands.newPartitionMetadataResponse( ServerError.TopicNotFound, "", requestId)); } else if (topicExistsInfo.getTopicType().equals(TopicType.PARTITIONED)) { commandSender.sendPartitionMetadataResponse(topicExistsInfo.getPartitions(), requestId); } else { commandSender.sendPartitionMetadataResponse(0, requestId); } // release resources. topicExistsInfo.recycle(); }).exceptionally(ex -> { lookupSemaphore.release(); log.error("{} {} Failed to get partition metadata", topicName, ServerCnx.this.toString(), ex); writeAndFlush( Commands.newPartitionMetadataResponse(ServerError.MetadataError, "Failed to get partition metadata", requestId)); return null; }); } else { // Get if exists, create a new one if not exists. unsafeGetPartitionedTopicMetadataAsync(getBrokerService().pulsar(), topicName) .whenComplete((metadata, ex) -> { lookupSemaphore.release(); if (ex == null) { int partitions = metadata.partitions; commandSender.sendPartitionMetadataResponse(partitions, requestId); } else { if (ex instanceof PulsarClientException) { log.warn("Failed to authorize {} at [{}] on topic {} : {}", getRole(), remoteAddress, topicName, ex.getMessage()); commandSender.sendPartitionMetadataResponse(ServerError.AuthorizationError, ex.getMessage(), requestId); } else { log.warn("Failed to get Partitioned Metadata [{}] {}: {}", remoteAddress, topicName, ex.getMessage(), ex); ServerError error = ServerError.ServiceNotReady; if (ex instanceof MetadataStoreException) { error = ServerError.MetadataError; } else if (ex instanceof RestException restException){ int responseCode = restException.getResponse().getStatus(); if (responseCode == NOT_FOUND.getStatusCode()){ error = ServerError.TopicNotFound; } else if (responseCode < INTERNAL_SERVER_ERROR.getStatusCode()){ error = ServerError.MetadataError; } } commandSender.sendPartitionMetadataResponse(error, ex.getMessage(), requestId); } } }); } }); } else { final String msg = "Client is not authorized to Get Partition Metadata"; log.warn("[{}] {} with role {} on topic {}", remoteAddress, msg, getPrincipal(), topicName); writeAndFlush( Commands.newPartitionMetadataResponse(ServerError.AuthorizationError, msg, requestId)); lookupSemaphore.release(); } return null; }).exceptionally(ex -> { logAuthException(remoteAddress, "partition-metadata", getPrincipal(), Optional.of(topicName), ex); final String msg = "Exception occurred while trying to authorize get Partition Metadata"; writeAndFlush(Commands.newPartitionMetadataResponse(ServerError.AuthorizationError, msg, requestId)); lookupSemaphore.release(); return null; }); } else { if (log.isDebugEnabled()) { log.debug("[{}] Failed Partition-Metadata lookup due to too many lookup-requests {}", remoteAddress, topicName); } commandSender.sendPartitionMetadataResponse(ServerError.TooManyRequests, "Failed due to too many pending lookup requests", requestId); } }
@Test(expectedExceptions = IllegalArgumentException.class) public void shouldFailHandlePartitionMetadataRequest() throws Exception { ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS); Field stateUpdater = ServerCnx.class.getDeclaredField("state"); stateUpdater.setAccessible(true); stateUpdater.set(serverCnx, ServerCnx.State.Failed); serverCnx.handlePartitionMetadataRequest(any()); }
public static String cvssV2ScoreToSeverity(Double score) { if (score != null) { if (ZERO.compareTo(score) <= 0 && FOUR.compareTo(score) > 0) { return LOW; } else if (FOUR.compareTo(score) <= 0 && SEVEN.compareTo(score) > 0) { return MEDIUM; } else if (SEVEN.compareTo(score) <= 0 && TEN.compareTo(score) >= 0) { return HIGH; } } return UNKNOWN; }
@Test public void testCvssV2ScoreToSeverity() { Double score = -1.0; String expResult = "UNKNOWN"; String result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 0.0; expResult = "LOW"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 1.0; expResult = "LOW"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 3.9; expResult = "LOW"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 4.0; expResult = "MEDIUM"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 6.9; expResult = "MEDIUM"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 7.0; expResult = "HIGH"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 10.0; expResult = "HIGH"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); score = 11.0; expResult = "UNKNOWN"; result = CvssUtil.cvssV2ScoreToSeverity(score); assertEquals(expResult, result); }
public static String encode(CharSequence input) throws UtilException { return encode(input, false); }
@Test public void encodeDecodeTest2(){ // 无需编码和解码 final String text = "Hutool"; final String strPunyCode = PunyCode.encode(text); assertEquals("Hutool", strPunyCode); }
@Override public Map<TopicPartition, Long> changelogOffsets() { return Collections.unmodifiableMap(stateMgr.changelogOffsets()); }
@Test public void shouldReturnStateManagerChangelogOffsets() { when(stateManager.changelogOffsets()).thenReturn(Collections.singletonMap(partition, 50L)); task = createStandbyTask(); assertEquals(Collections.singletonMap(partition, 50L), task.changelogOffsets()); }
public static <T> Patch<T> diff(List<T> original, List<T> revised, DiffAlgorithmListener progress) { return DiffUtils.diff(original, revised, DEFAULT_DIFF.create(), progress); }
@Test public void testDiff_Delete() { final Patch<String> patch = DiffUtils.diff(Arrays.asList("ddd", "fff", "ggg"), Arrays. asList("ggg")); assertNotNull(patch); assertEquals(1, patch.getDeltas().size()); final AbstractDelta<String> delta = patch.getDeltas().get(0); assertTrue(delta instanceof DeleteDelta); assertEquals(new Chunk<>(0, Arrays.asList("ddd", "fff")), delta.getSource()); assertEquals(new Chunk<>(0, Collections.<String>emptyList()), delta.getTarget()); }
public static TimestampExtractionPolicy create( final KsqlConfig ksqlConfig, final LogicalSchema schema, final Optional<TimestampColumn> timestampColumn ) { if (!timestampColumn.isPresent()) { return new MetadataTimestampExtractionPolicy(getDefaultTimestampExtractor(ksqlConfig)); } final ColumnName col = timestampColumn.get().getColumn(); final Optional<String> timestampFormat = timestampColumn.get().getFormat(); final Column column = schema.findColumn(col) .orElseThrow(() -> new KsqlException( "The TIMESTAMP column set in the WITH clause does not exist in the schema: '" + col.toString(FormatOptions.noEscape()) + "'")); final SqlBaseType tsColumnType = column.type().baseType(); if (tsColumnType == SqlBaseType.STRING) { final String format = timestampFormat.orElseThrow(() -> new KsqlException( "A String timestamp field has been specified without" + " also specifying the " + CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY.toLowerCase())); return new StringTimestampExtractionPolicy(col, format); } if (timestampFormat.isPresent()) { throw new KsqlException("'" + CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY + "' set in the WITH clause can only be used " + "when the timestamp column is of type STRING."); } if (tsColumnType == SqlBaseType.BIGINT) { return new LongColumnTimestampExtractionPolicy(col); } if (tsColumnType == SqlBaseType.TIMESTAMP) { return new TimestampColumnTimestampExtractionPolicy(col); } throw new KsqlException( "Timestamp column, " + col + ", should be LONG(INT64), TIMESTAMP," + " or a String with a " + CommonCreateConfigs.TIMESTAMP_FORMAT_PROPERTY.toLowerCase() + " specified."); }
@Test public void shouldThrowIfTimestampTypeAndFormatIsSupplied() { // Given: final String timestamp = "timestamp"; final LogicalSchema schema = schemaBuilder2 .valueColumn(ColumnName.of(timestamp.toUpperCase()), SqlTypes.TIMESTAMP) .build(); // When: assertThrows( KsqlException.class, () -> TimestampExtractionPolicyFactory .create(ksqlConfig, schema, Optional.of( new TimestampColumn( ColumnName.of(timestamp.toUpperCase()), Optional.of("b") ) ) ) ); }
@Override public long memoryAddress() { if (hasMemoryAddress()) { return EMPTY_BYTE_BUFFER_ADDRESS; } else { throw new UnsupportedOperationException(); } }
@Test public void testMemoryAddress() { EmptyByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT); if (empty.hasMemoryAddress()) { assertThat(empty.memoryAddress(), is(not(0L))); } else { try { empty.memoryAddress(); fail(); } catch (UnsupportedOperationException ignored) { // Ignore. } } }
public CompactOperator( SupplierWithException<FileSystem, IOException> fsFactory, CompactReader.Factory<T> readerFactory, CompactWriter.Factory<T> writerFactory) { this.fsFactory = fsFactory; this.readerFactory = readerFactory; this.writerFactory = writerFactory; }
@Test void testCompactOperator() throws Exception { AtomicReference<OperatorSubtaskState> state = new AtomicReference<>(); Path f0 = newFile(".uncompacted-f0", 3); Path f1 = newFile(".uncompacted-f1", 2); Path f2 = newFile(".uncompacted-f2", 2); Path f3 = newFile(".uncompacted-f3", 5); Path f4 = newFile(".uncompacted-f4", 1); Path f5 = newFile(".uncompacted-f5", 5); Path f6 = newFile(".uncompacted-f6", 4); FileSystem fs = f0.getFileSystem(); runCompact( harness -> { harness.setup(); harness.open(); harness.processElement( new CompactionUnit(0, "p0", Arrays.asList(f0, f1, f4)), 0); harness.processElement( new CompactionUnit(1, "p0", Collections.singletonList(f3)), 0); harness.processElement(new CompactionUnit(2, "p1", Arrays.asList(f2, f5)), 0); harness.processElement( new CompactionUnit(3, "p0", Collections.singletonList(f6)), 0); harness.processElement(new EndCompaction(1), 0); state.set(harness.snapshot(2, 0)); // check output commit info List<PartitionCommitInfo> outputs = harness.extractOutputValues(); assertThat(outputs).hasSize(1); assertThat(outputs.get(0).getCheckpointId()).isEqualTo(1); assertThat(outputs.get(0).getPartitions()).isEqualTo(new String[] {"p0", "p1"}); // check all compacted file generated assertThat(fs.exists(new Path(folder, "compacted-f0"))).isTrue(); assertThat(fs.exists(new Path(folder, "compacted-f2"))).isTrue(); assertThat(fs.exists(new Path(folder, "compacted-f3"))).isTrue(); assertThat(fs.exists(new Path(folder, "compacted-f6"))).isTrue(); // check one compacted file byte[] bytes = FileUtils.readAllBytes( new File(folder.getPath(), "compacted-f0").toPath()); Arrays.sort(bytes); assertThat(bytes).isEqualTo(new byte[] {0, 0, 0, 1, 1, 2}); }); runCompact( harness -> { harness.setup(); harness.initializeState(state.get()); harness.open(); harness.notifyOfCompletedCheckpoint(2); // check all temp files have been deleted assertThat(fs.exists(f0)).isFalse(); assertThat(fs.exists(f1)).isFalse(); assertThat(fs.exists(f2)).isFalse(); assertThat(fs.exists(f3)).isFalse(); assertThat(fs.exists(f4)).isFalse(); assertThat(fs.exists(f5)).isFalse(); assertThat(fs.exists(f6)).isFalse(); }); }
@Override public void check(final SQLStatementContext sqlStatementContext, final List<Object> params, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) { if (sqlStatementContext.getSqlStatement() instanceof DMLStatement) { ShardingRule rule = database.getRuleMetaData().getSingleRule(ShardingRule.class); if (((TableAvailable) sqlStatementContext).getTablesContext().getTableNames().stream().anyMatch(rule::isShardingTable)) { ShardingSpherePreconditions.checkNotEmpty( new ShardingConditionEngine(globalRuleMetaData, database, rule).createShardingConditions(sqlStatementContext, params), DMLWithoutShardingKeyException::new); } } }
@Test void assertNotDMLStatementCheck() { when(sqlStatementContext.getSqlStatement()).thenReturn(mock(DDLStatement.class)); shardingAuditAlgorithm.check(sqlStatementContext, Collections.emptyList(), mock(RuleMetaData.class), database); verify(database, times(0)).getRuleMetaData(); }
@ApiOperation(value = "Save or update entity view (saveEntityView)", notes = ENTITY_VIEW_DESCRIPTION + MODEL_DESCRIPTION + "Remove 'id', 'tenantId' and optionally 'customerId' from the request body example (below) to create new Entity View entity." + TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')") @RequestMapping(value = "/entityView", method = RequestMethod.POST) @ResponseBody public EntityView saveEntityView( @Parameter(description = "A JSON object representing the entity view.") @RequestBody EntityView entityView) throws Exception { entityView.setTenantId(getCurrentUser().getTenantId()); EntityView existingEntityView = null; if (entityView.getId() == null) { accessControlService .checkPermission(getCurrentUser(), Resource.ENTITY_VIEW, Operation.CREATE, null, entityView); } else { existingEntityView = checkEntityViewId(entityView.getId(), Operation.WRITE); } return tbEntityViewService.save(entityView, existingEntityView, getCurrentUser()); }
@Test public void testSaveEntityView() throws Exception { String name = "Test entity view"; Mockito.reset(tbClusterService, auditLogService); EntityView savedView = getNewSavedEntityView(name); Assert.assertNotNull(savedView); Assert.assertNotNull(savedView.getId()); Assert.assertTrue(savedView.getCreatedTime() > 0); assertEquals(tenantId, savedView.getTenantId()); Assert.assertNotNull(savedView.getCustomerId()); assertEquals(NULL_UUID, savedView.getCustomerId().getId()); assertEquals(name, savedView.getName()); EntityView foundEntityView = doGet("/api/entityView/" + savedView.getId().getId().toString(), EntityView.class); assertEquals(savedView, foundEntityView); testBroadcastEntityStateChangeEventTime(foundEntityView.getId(), tenantId, 1); testNotifyManyEntityManyTimeMsgToEdgeServiceEntityEqAny(foundEntityView, foundEntityView, tenantId, tenantAdminCustomerId, tenantAdminUserId, TENANT_ADMIN_EMAIL, ActionType.ADDED, 1, 1, 1); Mockito.reset(tbClusterService, auditLogService); savedView.setName("New test entity view"); savedView = doPost("/api/entityView", savedView, EntityView.class); foundEntityView = doGet("/api/entityView/" + savedView.getId().getId().toString(), EntityView.class); assertEquals(savedView, foundEntityView); testBroadcastEntityStateChangeEventTime(foundEntityView.getId(), tenantId, 1); testNotifyManyEntityManyTimeMsgToEdgeServiceEntityEqAny(foundEntityView, foundEntityView, tenantId, tenantAdminCustomerId, tenantAdminUserId, TENANT_ADMIN_EMAIL, ActionType.UPDATED, 1, 1, 5); doGet("/api/tenant/entityViews?entityViewName=" + name) .andExpect(status().isNotFound()) .andExpect(statusReason(containsString(msgErrorNotFound))); }
public static void checkAtLeastOneChar(final Properties props, final String propKey, final MaskAlgorithm<?, ?> algorithm) { checkRequired(props, propKey, algorithm); ShardingSpherePreconditions.checkNotEmpty(props.getProperty(propKey), () -> new AlgorithmInitializationException(algorithm, "%s's length must be at least one", propKey)); }
@Test void assertCheckAtLeastOneCharSuccess() { Properties props = PropertiesBuilder.build(new Property("key", "1")); assertDoesNotThrow(() -> MaskAlgorithmPropertiesChecker.checkAtLeastOneChar(props, "key", mock(MaskAlgorithm.class))); }
@Nonnull @Override public CreatedAggregations<AggregationBuilder> doCreateAggregation(Direction direction, String name, Pivot pivot, Time timeSpec, OSGeneratedQueryContext queryContext, Query query) { AggregationBuilder root = null; AggregationBuilder leaf = null; final Interval interval = timeSpec.interval(); final TimeRange timerange = query.timerange(); if (interval instanceof AutoInterval autoInterval && isAllMessages(timerange)) { for (String timeField : timeSpec.fields()) { final AutoDateHistogramAggregationBuilder builder = new AutoDateHistogramAggregationBuilder(name) .field(timeField) .setNumBuckets((int) (BASE_NUM_BUCKETS / autoInterval.scaling())) .format(DATE_TIME_FORMAT); if (root == null && leaf == null) { root = builder; leaf = builder; } else { leaf.subAggregation(builder); leaf = builder; } } } else { for (String timeField : timeSpec.fields()) { final DateHistogramInterval dateHistogramInterval = new DateHistogramInterval(interval.toDateInterval(query.effectiveTimeRange(pivot)).toString()); final List<BucketOrder> ordering = orderListForPivot(pivot, queryContext, defaultOrder); final DateHistogramAggregationBuilder builder = AggregationBuilders.dateHistogram(name) .field(timeField) .order(ordering) .format(DATE_TIME_FORMAT); setInterval(builder, dateHistogramInterval); if (root == null && leaf == null) { root = builder; leaf = builder; } else { leaf.subAggregation(builder); leaf = builder; } } } return CreatedAggregations.create(root, leaf); }
@Test public void timeSpecIntervalIsCalculatedOnPivotTimerangeIfOverridden() throws InvalidRangeParametersException { final ArgumentCaptor<TimeRange> timeRangeCaptor = ArgumentCaptor.forClass(TimeRange.class); when(interval.toDateInterval(timeRangeCaptor.capture())).thenReturn(DateInterval.days(1)); when(pivot.timerange()).thenReturn(Optional.of(DerivedTimeRange.of(RelativeRange.create(4242)))); this.osTimeHandler.doCreateAggregation(BucketSpecHandler.Direction.Row, "foobar", pivot, time, queryContext, query); final TimeRange argumentTimeRange = timeRangeCaptor.getValue(); assertThat(argumentTimeRange).isEqualTo(RelativeRange.create(4242)); }
public String getFingerprint() { try { return Base64.getEncoder().encodeToString(DigestUtils.sha256(Objects.requireNonNull(getX509Certificate()).getEncoded())); } catch (CertificateEncodingException e) { return null; } }
@Test void getFingerprints() { assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", getSslCertificateDigiDDomain().getFingerprint()); assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", getSslCertificateEidDomain().getFingerprint()); }
@Override protected void prepare(MessageTuple item) { channel.write(item.originMessage, item.channelPromise); }
@Test public void testPrepare() { Object message = new Object(); NettyBatchWriteQueue.MessageTuple messageTuple = new NettyBatchWriteQueue.MessageTuple(message, mockChannelPromise); nettyBatchWriteQueue.prepare(messageTuple); Mockito.verify(mockChannel).write(eq(message), eq(mockChannelPromise)); }
@Override public ScheduleResult schedule() { List<RemoteTask> newTasks = IntStream.range(0, partitionToNode.size()) .mapToObj(partition -> taskScheduler.scheduleTask(partitionToNode.get(partition), partition)) .filter(Optional::isPresent) .map(Optional::get) .collect(toImmutableList()); // no need to call stage.transitionToSchedulingSplits() since there is no table splits return ScheduleResult.nonBlocked(true, newTasks, 0); }
@Test public void testSingleNode() { FixedCountScheduler nodeScheduler = new FixedCountScheduler( (node, partition) -> Optional.of(taskFactory.createTableScanTask( new TaskId("test", 1, 0, 1, 0), node, ImmutableList.of(), new NodeTaskMap.NodeStatsTracker(delta -> {}, delta -> {}, (age, delta) -> {}))), generateRandomNodes(1)); ScheduleResult result = nodeScheduler.schedule(); assertTrue(result.isFinished()); assertTrue(result.getBlocked().isDone()); assertEquals(result.getNewTasks().size(), 1); assertTrue(result.getNewTasks().iterator().next().getNodeId().equals("other 0")); }
@Override public ByteBuffer getHashValue(Type type) { ByteBuffer buffer; // no need to consider the overflow when cast decimal to other type, because this func only be used when querying, not storing. // e.g. For column A with type INT, the data stored certainly no overflow. switch (type.getPrimitiveType()) { case TINYINT: buffer = ByteBuffer.allocate(8); buffer.order(ByteOrder.LITTLE_ENDIAN); buffer.put(value.byteValue()); break; case SMALLINT: buffer = ByteBuffer.allocate(8); buffer.order(ByteOrder.LITTLE_ENDIAN); buffer.putShort(value.shortValue()); break; case INT: buffer = ByteBuffer.allocate(8); buffer.order(ByteOrder.LITTLE_ENDIAN); buffer.putInt(value.intValue()); break; case BIGINT: buffer = ByteBuffer.allocate(8); buffer.order(ByteOrder.LITTLE_ENDIAN); buffer.putLong(value.longValue()); break; case DECIMALV2: buffer = getHashValueOfDecimalV2(); break; case DECIMAL32: case DECIMAL64: { checkType(type); buffer = ByteBuffer.allocate(8); buffer.order(ByteOrder.LITTLE_ENDIAN); int scale = ((ScalarType) type).getScalarScale(); BigDecimal scaledValue = value.multiply(SCALE_FACTOR[scale]); if (type.getPrimitiveType() == PrimitiveType.DECIMAL32) { buffer.putInt(scaledValue.intValue()); } else { buffer.putLong(scaledValue.longValue()); } break; } case DECIMAL128: { checkType(type); int precision = ((ScalarType) type).getScalarPrecision(); int scale = ((ScalarType) type).getScalarScale(); if (precision == 27 && scale == 9) { buffer = getHashValueOfDecimalV2(); } else { BigDecimal scaledValue = value.multiply(SCALE_FACTOR[scale]); try { LargeIntLiteral largeIntLiteral = new LargeIntLiteral(scaledValue.toBigInteger().toString()); return largeIntLiteral.getHashValue(Type.LARGEINT); } catch (AnalysisException e) { throw new InternalError(e.getMessage()); } } break; } default: return super.getHashValue(type); } buffer.flip(); return buffer; }
@Test public void testGetHashValueOfDecimal128p27s9() throws AnalysisException { String[] testCases = new String[] { "0.0", Strings.repeat("9", 18) + "." + Strings.repeat("9", 9), "+" + Strings.repeat("9", 18) + "." + Strings.repeat("9", 9), "0.1", "-0.1", "123456789123456789.9654321", "-123456789123456789.9654321", "1." + Strings.repeat("9", 9), "-1." + Strings.repeat("0", 8) + "1", "3.1415926", "-3.1415926", "0.000000001", "-0.000000001", }; for (String tc : testCases) { DecimalLiteral decimalLiteral = new DecimalLiteral(tc); ByteBuffer a = decimalLiteral.getHashValue(Type.DECIMALV2); ByteBuffer b = decimalLiteral.getHashValue(ScalarType.createDecimalV3Type(PrimitiveType.DECIMAL128, 27, 9)); Assert.assertEquals(a.limit(), 12); Assert.assertEquals(a.limit(), b.limit()); Assert.assertEquals(a.getLong(), b.getLong()); Assert.assertEquals(a.getInt(), b.getInt()); } }
@Override public boolean nextConfig(long timeout) { file.validateFile(); if (checkReloaded()) { log.log(FINE, () -> "User forced config reload at " + System.currentTimeMillis()); // User forced reload setConfigIfChanged(updateConfig()); ConfigState<T> configState = getConfigState(); log.log(FINE, () -> "Config updated at " + System.currentTimeMillis() + ", changed: " + configState.isConfigChanged()); log.log(FINE, () -> "Config: " + configState.getConfig().toString()); return true; } if (file.getLastModified() != ts) { setConfigIncGen(updateConfig()); return true; } try { Thread.sleep(timeout); } catch (InterruptedException e) { throw new ConfigInterruptedException(e); } return false; }
@Test public void require_that_new_config_is_detected_on_reload() throws IOException { writeConfig("intval", "23"); ConfigSubscription<SimpletypesConfig> sub = new FileConfigSubscription<>( new ConfigKey<>(SimpletypesConfig.class, ""), new FileSource(TEST_TYPES_FILE)); assertTrue(sub.nextConfig(1000)); assertEquals(23, sub.getConfigState().getConfig().intval()); writeConfig("intval", "33"); sub.reload(1); assertTrue(sub.nextConfig(1000)); ConfigSubscription.ConfigState<SimpletypesConfig> configState = sub.getConfigState(); assertEquals(33, configState.getConfig().intval()); assertTrue(configState.isConfigChanged()); assertTrue(configState.isGenerationChanged()); assertTrue(sub.isConfigChangedAndReset(7L)); assertSame(configState, sub.getConfigState()); assertTrue(configState.isConfigChanged()); assertTrue(configState.isGenerationChanged()); assertTrue(sub.isConfigChangedAndReset(1L)); assertNotSame(configState, sub.getConfigState()); configState = sub.getConfigState(); assertFalse(configState.isConfigChanged()); assertFalse(configState.isGenerationChanged()); sub.reload(2); assertTrue(sub.nextConfig(1000)); configState = sub.getConfigState(); assertEquals(33, configState.getConfig().intval()); assertFalse(configState.isConfigChanged()); assertTrue(configState.isGenerationChanged()); assertFalse(sub.isConfigChangedAndReset(2L)); assertNotSame(configState, sub.getConfigState()); configState = sub.getConfigState(); assertFalse(configState.isConfigChanged()); assertFalse(configState.isGenerationChanged()); }
@Override public String getString(int rowIndex, int columnIndex) { JsonNode jsonValue = _resultsArray.get(rowIndex).get(columnIndex); if (jsonValue.isTextual()) { return jsonValue.textValue(); } else { return jsonValue.toString(); } }
@Test public void testGetString() { // Run the test final String result = _selectionResultSetUnderTest.getString(0, 0); // Verify the results assertEquals("r1c1", result); }
@Override public List<KsqlPartitionLocation> locate( final List<KsqlKey> keys, final RoutingOptions routingOptions, final RoutingFilterFactory routingFilterFactory, final boolean isRangeScan ) { if (isRangeScan && keys.isEmpty()) { throw new IllegalStateException("Query is range scan but found no range keys."); } final ImmutableList.Builder<KsqlPartitionLocation> partitionLocations = ImmutableList.builder(); final Set<Integer> filterPartitions = routingOptions.getPartitions(); final Optional<Set<KsqlKey>> keySet = keys.isEmpty() ? Optional.empty() : Optional.of(Sets.newHashSet(keys)); // Depending on whether this is a key-based lookup, determine which metadata method to use. // If we don't have keys, find the metadata for all partitions since we'll run the query for // all partitions of the state store rather than a particular one. //For issue #7174. Temporarily turn off metadata finding for a partition with keys //if there are more than one key. final List<PartitionMetadata> metadata; if (keys.size() == 1 && keys.get(0).getKey().size() == 1 && !isRangeScan) { metadata = getMetadataForKeys(keys, filterPartitions); } else { metadata = getMetadataForAllPartitions(filterPartitions, keySet); } if (metadata.isEmpty()) { final MaterializationException materializationException = new MaterializationException( "Cannot determine which host contains the required partitions to serve the pull query. \n" + "The underlying persistent query may be restarting (e.g. as a result of " + "ALTER SYSTEM) view the status of your by issuing <DESCRIBE foo>."); LOG.debug(materializationException.getMessage()); throw materializationException; } // Go through the metadata and group them by partition. for (PartitionMetadata partitionMetadata : metadata) { LOG.debug("Handling pull query for partition {} of state store {}.", partitionMetadata.getPartition(), storeName); final HostInfo activeHost = partitionMetadata.getActiveHost(); final Set<HostInfo> standByHosts = partitionMetadata.getStandbyHosts(); final int partition = partitionMetadata.getPartition(); final Optional<Set<KsqlKey>> partitionKeys = partitionMetadata.getKeys(); LOG.debug("Active host {}, standby {}, partition {}.", activeHost, standByHosts, partition); // For a given partition, find the ordered, filtered list of hosts to consider final List<KsqlNode> filteredHosts = getFilteredHosts(routingOptions, routingFilterFactory, activeHost, standByHosts, partition); partitionLocations.add(new PartitionLocation(partitionKeys, partition, filteredHosts)); } return partitionLocations.build(); }
@Test public void shouldThrowIfRangeScanAndKeysEmpty() { // Given: getEmtpyMetadata(); // When: final Exception e = assertThrows( IllegalStateException.class, () -> locator.locate(Collections.emptyList(), routingOptions, routingFilterFactoryActive, true) ); // Then: assertThat(e.getMessage(), containsString( "Query is range scan but found no range keys.")); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap(); Iterator subIter = subscriptions.keySet().iterator(); //initialize subscription mappings for assignment while (subIter.hasNext()) { String memberId = (String) subIter.next(); assignment.put(memberId, new ArrayList()); } ArrayList<String> consumerList = new ArrayList(Utils.sorted(subscriptions.keySet())); Iterator partIter = this.allPartitionsSorted(partitionsPerTopic, subscriptions).iterator(); //assign partitions at random while (partIter.hasNext()) { TopicPartition partition = (TopicPartition) partIter.next(); String topic = partition.topic(); int rand = ThreadLocalRandom.current().nextInt(0, consumerList.size()); while (!((Subscription) subscriptions.get(consumerList.get(rand))).topics().contains(topic)) { rand = ThreadLocalRandom.current().nextInt(0, consumerList.size()); } (assignment.get(consumerList.get(rand))).add(partition); } return assignment; }
@Test public void assignmentWorksWithMultipleConsumers() { String topic = "testTopic"; List<String> topicList = new ArrayList<String>(); topicList.add(topic); String consumerId1 = "testConsumer1"; String consumerId2 = "testConsumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumerId1, new Subscription(topicList)); consumers.put(consumerId2, new Subscription(topicList)); Map<String, List<TopicPartition>> assignment = testAssignor.assign(partitionsPerTopic, consumers); List<TopicPartition> testAssignment = new ArrayList<>(); testAssignment.add(new TopicPartition(topic, 0)); if (assignment.get(consumerId1).equals(Collections.<String>emptyList()) && assignment.get(consumerId2).equals(testAssignment)) { return; } else if (assignment.get(consumerId1).equals(testAssignment) && assignment.get(consumerId2).equals(Collections.<String>emptyList())) { return; } else { fail("Partition should be assigned to a single consumer"); } }
public void validate(ProjectReactor reactor) { List<String> validationMessages = new ArrayList<>(); for (ProjectDefinition moduleDef : reactor.getProjects()) { validateModule(moduleDef, validationMessages); } if (isBranchFeatureAvailable()) { branchParamsValidator.validate(validationMessages); } else { validateBranchParamsWhenPluginAbsent(validationMessages); validatePullRequestParamsWhenPluginAbsent(validationMessages); } if (!validationMessages.isEmpty()) { throw MessageException.of("Validation of project failed:\n o " + String.join("\n o ", validationMessages)); } }
@Test void fail_when_pull_request_base_specified_but_branch_plugin_not_present() { ProjectDefinition def = ProjectDefinition.create().setProperty(CoreProperties.PROJECT_KEY_PROPERTY, "foo"); ProjectReactor reactor = new ProjectReactor(def); when(settings.get(ScannerProperties.PULL_REQUEST_BASE)).thenReturn(Optional.of("feature1")); assertThatThrownBy(() -> underTest.validate(reactor)) .isInstanceOf(MessageException.class) .hasMessageContaining(format("To use the property \"sonar.pullrequest.base\" and analyze pull requests, Developer Edition or above is required. See %s for more information.", LINK_TO_DOC)); }
@Override public boolean checkIndexExists( Database database, String schemaName, String tableName, String[] idxFields ) throws KettleDatabaseException { String tablename = database.getDatabaseMeta().getQuotedSchemaTableCombination( schemaName, tableName ); boolean[] exists = new boolean[ idxFields.length]; for ( int i = 0; i < exists.length; i++ ) { exists[i] = false; } try { // Get a list of all the indexes for this table // ResultSet indexList = null; try { indexList = database.getDatabaseMetaData().getIndexInfo( null, null, tablename, false, true ); while ( indexList.next() ) { // String tablen = indexList.getString("TABLE_NAME"); // String indexn = indexList.getString("INDEX_NAME"); String column = indexList.getString( "COLUMN_NAME" ); // int pos = indexList.getShort("ORDINAL_POSITION"); // int type = indexList.getShort("TYPE"); int idx = Const.indexOfString( column, idxFields ); if ( idx >= 0 ) { exists[idx] = true; } } } finally { if ( indexList != null ) { indexList.close(); } } // See if all the fields are indexed... boolean all = true; for ( int i = 0; i < exists.length && all; i++ ) { if ( !exists[i] ) { all = false; } } return all; } catch ( Exception e ) { throw new KettleDatabaseException( "Unable to determine if indexes exists on table [" + tablename + "]", e ); } }
@Test public void testCheckIndexExists() throws Exception { Database db = Mockito.mock( Database.class ); ResultSet rs = Mockito.mock( ResultSet.class ); DatabaseMetaData dmd = Mockito.mock( DatabaseMetaData.class ); DatabaseMeta dm = Mockito.mock( DatabaseMeta.class ); Mockito.when( dm.getQuotedSchemaTableCombination( "", "FOO" ) ).thenReturn( "FOO" ); Mockito.when( rs.next() ).thenAnswer( new Answer<Boolean>() { public Boolean answer( InvocationOnMock invocation ) throws Throwable { rowCnt++; return new Boolean( rowCnt < 3 ); } } ); Mockito.when( db.getDatabaseMetaData() ).thenReturn( dmd ); Mockito.when( dmd.getIndexInfo( null, null, "FOO", false, true ) ).thenReturn( rs ); Mockito.when( rs.getString( "COLUMN_NAME" ) ).thenAnswer( new Answer<String>() { @Override public String answer( InvocationOnMock invocation ) throws Throwable { if ( rowCnt == 1 ) { return "ROW1COL2"; } else if ( rowCnt == 2 ) { return "ROW2COL2"; } else { return null; } } } ); Mockito.when( db.getDatabaseMeta() ).thenReturn( dm ); assertTrue( odbcMeta.checkIndexExists( db, "", "FOO", new String[] { "ROW1COL2", "ROW2COL2" } ) ); assertFalse( odbcMeta.checkIndexExists( db, "", "FOO", new String[] { "ROW2COL2", "NOTTHERE" } ) ); assertFalse( odbcMeta.checkIndexExists( db, "", "FOO", new String[] { "NOTTHERE", "ROW1COL2" } ) ); }
@Override public <T> Invoker<T> refer(Class<T> type, URL url) throws RpcException { if (UrlUtils.isRegistry(url)) { return protocol.refer(type, url); } Invoker<T> invoker = protocol.refer(type, url); if (StringUtils.isEmpty(url.getParameter(REGISTRY_CLUSTER_TYPE_KEY))) { invoker = new ListenerInvokerWrapper<>( invoker, Collections.unmodifiableList(ScopeModelUtil.getExtensionLoader( InvokerListener.class, invoker.getUrl().getScopeModel()) .getActivateExtension(url, INVOKER_LISTENER_KEY))); } return invoker; }
@Test void testLoadingListenerForRemoteReference() { // verify that no listener is loaded by default URL urlWithoutListener = URL.valueOf("dubbo://127.0.0.1:20880/DemoService") .addParameter(INTERFACE_KEY, DemoService.class.getName()); AbstractInvoker<DemoService> invokerWithoutListener = new AbstractInvoker<DemoService>(DemoService.class, urlWithoutListener) { @Override protected Result doInvoke(Invocation invocation) throws Throwable { return null; } }; Protocol protocolWithoutListener = mock(Protocol.class); when(protocolWithoutListener.refer(DemoService.class, urlWithoutListener)) .thenReturn(invokerWithoutListener); ProtocolListenerWrapper protocolListenerWrapperWithoutListener = new ProtocolListenerWrapper(protocolWithoutListener); Invoker<?> invoker = protocolListenerWrapperWithoutListener.refer(DemoService.class, urlWithoutListener); Assertions.assertTrue(invoker instanceof ListenerInvokerWrapper); Assertions.assertEquals( 0, ((ListenerInvokerWrapper<?>) invoker).getListeners().size()); // verify that if the invoker.listener is configured, then load the specified listener URL urlWithListener = URL.valueOf("dubbo://127.0.0.1:20880/DemoService") .addParameter(INTERFACE_KEY, DemoService.class.getName()) .addParameter(INVOKER_LISTENER_KEY, "count"); AbstractInvoker<DemoService> invokerWithListener = new AbstractInvoker<DemoService>(DemoService.class, urlWithListener) { @Override protected Result doInvoke(Invocation invocation) throws Throwable { return null; } }; Protocol protocol = mock(Protocol.class); when(protocol.refer(DemoService.class, urlWithListener)).thenReturn(invokerWithListener); ProtocolListenerWrapper protocolListenerWrapper = new ProtocolListenerWrapper(protocol); invoker = protocolListenerWrapper.refer(DemoService.class, urlWithListener); Assertions.assertTrue(invoker instanceof ListenerInvokerWrapper); Assertions.assertEquals(1, CountInvokerListener.getCounter()); }
public void addCoinsReceivedEventListener(WalletCoinsReceivedEventListener listener) { addCoinsReceivedEventListener(Threading.USER_THREAD, listener); }
@Test public void exceptionsDoNotBlockAllListeners() { // Check that if a wallet listener throws an exception, the others still run. wallet.addCoinsReceivedEventListener((wallet, tx, prevBalance, newBalance) -> { log.info("onCoinsReceived 1"); throw new RuntimeException("barf"); }); final AtomicInteger flag = new AtomicInteger(); wallet.addCoinsReceivedEventListener((wallet, tx, prevBalance, newBalance) -> { log.info("onCoinsReceived 2"); flag.incrementAndGet(); }); sendMoneyToWallet(AbstractBlockChain.NewBlockType.BEST_CHAIN, COIN); log.info("Wait for user thread"); Threading.waitForUserCode(); log.info("... and test flag."); assertEquals(1, flag.get()); }
@Override public void write(int b) throws IOException { filePosition++; super.write(b); }
@Test public void testWriteByte() throws IOException { byte[] arr = new byte[257]; for (int i=0; i<256; i++) { arr[i] = (byte)i; writer.write(i); } arr[256] = (byte)0x80; writer.write(0x180); expectData(arr); }
@Override public Long createConfig(ConfigSaveReqVO createReqVO) { // 校验参数配置 key 的唯一性 validateConfigKeyUnique(null, createReqVO.getKey()); // 插入参数配置 ConfigDO config = ConfigConvert.INSTANCE.convert(createReqVO); config.setType(ConfigTypeEnum.CUSTOM.getType()); configMapper.insert(config); return config.getId(); }
@Test public void testCreateConfig_success() { // 准备参数 ConfigSaveReqVO reqVO = randomPojo(ConfigSaveReqVO.class) .setId(null); // 防止 id 被赋值,导致唯一性校验失败 // 调用 Long configId = configService.createConfig(reqVO); // 断言 assertNotNull(configId); // 校验记录的属性是否正确 ConfigDO config = configMapper.selectById(configId); assertPojoEquals(reqVO, config, "id"); assertEquals(ConfigTypeEnum.CUSTOM.getType(), config.getType()); }
public static String humanReadableBytes(Locale locale, long bytes) { int unit = 1024; if (bytes < unit) { return bytes + " B"; } int exp = (int) (Math.log(bytes) / Math.log(unit)); String pre = String.valueOf("KMGTPE".charAt(exp - 1)); return String.format(locale, "%.1f %sB", bytes / Math.pow(unit, exp), pre); }
@Test public void testHumanReadableBytes() { assertEquals("0 B", StringHelper.humanReadableBytes(Locale.ENGLISH, 0)); assertEquals("32 B", StringHelper.humanReadableBytes(Locale.ENGLISH, 32)); assertEquals("1.0 KB", StringHelper.humanReadableBytes(Locale.ENGLISH, 1024)); assertEquals("1.7 KB", StringHelper.humanReadableBytes(Locale.ENGLISH, 1730)); assertEquals("108.0 KB", StringHelper.humanReadableBytes(Locale.ENGLISH, 110592)); assertEquals("6.8 MB", StringHelper.humanReadableBytes(Locale.ENGLISH, 7077888)); assertEquals("432.0 MB", StringHelper.humanReadableBytes(Locale.ENGLISH, 452984832)); assertEquals("27.0 GB", StringHelper.humanReadableBytes(Locale.ENGLISH, 28991029248L)); assertEquals("1.7 TB", StringHelper.humanReadableBytes(Locale.ENGLISH, 1855425871872L)); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { final String prefix = containerService.isContainer(directory) ? StringUtils.EMPTY : containerService.getKey(directory) + Path.DELIMITER; return this.list(directory, listener, prefix); }
@Test(expected = NotfoundException.class) public void testListNotfoundContainer() throws Exception { final Path container = new Path("notfound.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); new SwiftObjectListService(session).list(container, new DisabledListProgressListener()); }
@Override protected void respondAsLeader( ChannelHandlerContext ctx, RoutedRequest routedRequest, T gateway) { HttpRequest httpRequest = routedRequest.getRequest(); if (log.isTraceEnabled()) { log.trace("Received request " + httpRequest.uri() + '.'); } FileUploads uploadedFiles = null; try { if (!inFlightRequestTracker.registerRequest()) { log.debug( "The handler instance for {} had already been closed.", untypedResponseMessageHeaders.getTargetRestEndpointURL()); ctx.channel().close(); return; } if (!(httpRequest instanceof FullHttpRequest)) { // The RestServerEndpoint defines a HttpObjectAggregator in the pipeline that always // returns // FullHttpRequests. log.error( "Implementation error: Received a request that wasn't a FullHttpRequest."); throw new RestHandlerException( "Bad request received.", HttpResponseStatus.BAD_REQUEST); } final ByteBuf msgContent = ((FullHttpRequest) httpRequest).content(); uploadedFiles = FileUploadHandler.getMultipartFileUploads(ctx); if (!untypedResponseMessageHeaders.acceptsFileUploads() && !uploadedFiles.getUploadedFiles().isEmpty()) { throw new RestHandlerException( "File uploads not allowed.", HttpResponseStatus.BAD_REQUEST); } R request; if (msgContent.capacity() == 0) { try { request = MAPPER.readValue("{}", untypedResponseMessageHeaders.getRequestClass()); } catch (JsonParseException | JsonMappingException je) { throw new RestHandlerException( "Bad request received. Request did not conform to expected format.", HttpResponseStatus.BAD_REQUEST, je); } } else { try { InputStream in = new ByteBufInputStream(msgContent); request = MAPPER.readValue(in, untypedResponseMessageHeaders.getRequestClass()); } catch (JsonParseException | JsonMappingException je) { throw new RestHandlerException( String.format( "Request did not match expected format %s.", untypedResponseMessageHeaders .getRequestClass() .getSimpleName()), HttpResponseStatus.BAD_REQUEST, je); } } final HandlerRequest<R> handlerRequest; try { handlerRequest = HandlerRequest.resolveParametersAndCreate( request, untypedResponseMessageHeaders.getUnresolvedMessageParameters(), routedRequest.getRouteResult().pathParams(), routedRequest.getRouteResult().queryParams(), uploadedFiles.getUploadedFiles()); } catch (HandlerRequestException hre) { log.error("Could not create the handler request.", hre); throw new RestHandlerException( String.format( "Bad request, could not parse parameters: %s", hre.getMessage()), HttpResponseStatus.BAD_REQUEST, hre); } log.trace("Starting request processing."); CompletableFuture<Void> requestProcessingFuture = respondToRequest(ctx, httpRequest, handlerRequest, gateway); final FileUploads finalUploadedFiles = uploadedFiles; requestProcessingFuture .handle( (Void ignored, Throwable throwable) -> { if (throwable != null) { return handleException( ExceptionUtils.stripCompletionException(throwable), ctx, httpRequest); } return CompletableFuture.<Void>completedFuture(null); }) .thenCompose(Function.identity()) .whenComplete( (Void ignored, Throwable throwable) -> { if (throwable != null) { log.warn( "An exception occurred while handling another exception.", throwable); } finalizeRequestProcessing(finalUploadedFiles); }); } catch (Throwable e) { final FileUploads finalUploadedFiles = uploadedFiles; handleException(e, ctx, httpRequest) .whenComplete( (Void ignored, Throwable throwable) -> finalizeRequestProcessing(finalUploadedFiles)); } }
@Test void testFileCleanup(@TempDir File temporaryFolder) throws Exception { final Path dir = temporaryFolder.toPath(); final Path file = dir.resolve("file"); Files.createFile(file); RestfulGateway mockRestfulGateway = new TestingRestfulGateway.Builder().build(); final GatewayRetriever<RestfulGateway> mockGatewayRetriever = () -> CompletableFuture.completedFuture(mockRestfulGateway); CompletableFuture<Void> requestProcessingCompleteFuture = new CompletableFuture<>(); TestHandler handler = new TestHandler(requestProcessingCompleteFuture, mockGatewayRetriever); RouteResult<?> routeResult = new RouteResult<>("", "", Collections.emptyMap(), Collections.emptyMap(), ""); HttpRequest request = new DefaultFullHttpRequest( HttpVersion.HTTP_1_1, HttpMethod.GET, TestHandler.TestHeaders.INSTANCE.getTargetRestEndpointURL(), Unpooled.wrappedBuffer(new byte[0])); RoutedRequest<?> routerRequest = new RoutedRequest<>(routeResult, request); Attribute<FileUploads> attribute = new SimpleAttribute(); attribute.set(new FileUploads(dir)); Channel channel = mock(Channel.class); when(channel.attr(any(AttributeKey.class))).thenReturn(attribute); ChannelHandlerContext context = mock(ChannelHandlerContext.class); when(context.channel()).thenReturn(channel); handler.respondAsLeader(context, routerRequest, mockRestfulGateway); // the (asynchronous) request processing is not yet complete so the files should still exist assertThat(Files.exists(file)).isTrue(); requestProcessingCompleteFuture.complete(null); assertThat(Files.exists(file)).isFalse(); }
@Override public ExecuteContext after(ExecuteContext context) { DefaultLitePullConsumerWrapper wrapper = RocketMqPullConsumerController .getPullConsumerWrapper((DefaultLitePullConsumer) context.getObject()); if (wrapper == null) { PullConsumerLocalInfoUtils.setSubscriptionType(SubscriptionType.SUBSCRIBE); } else { wrapper.setSubscriptionType(SubscriptionType.SUBSCRIBE); } if (handler != null) { handler.doAfter(context); return context; } // After adding topic subscriptions, consumer subscription information changes, and consumers need to be enabled // or prohibited from consuming according to the prohibited topic configuration disablePullConsumption(wrapper); return context; }
@Test public void testAfter() { // Wrapper is null interceptor.after(context); Assert.assertEquals(PullConsumerLocalInfoUtils.getSubscriptionType().name(), "SUBSCRIBE"); PullConsumerLocalInfoUtils.removeSubscriptionType(); // Wrapper is not null pullConsumerWrapper.setSubscribedTopics(subscription.keySet()); subscription.put("test-topic", new SubscriptionData()); RocketMqPullConsumerController.cachePullConsumer(pullConsumer); interceptor.after(context); Assert.assertEquals(pullConsumerWrapper.getSubscriptionType().name(), "SUBSCRIBE"); Assert.assertEquals(pullConsumerWrapper.getSubscribedTopics(), subscription.keySet()); }
public static Map<String, String> parseParameters(URI uri) throws URISyntaxException { if (!isCompositeURI(uri)) { return uri.getQuery() == null ? emptyMap() : parseQuery(stripPrefix(uri.getQuery(), "?")); } else { CompositeData data = URISupport.parseComposite(uri); Map<String, String> parameters = new HashMap<String, String>(); parameters.putAll(data.getParameters()); if (parameters.isEmpty()) { parameters = emptyMap(); } return parameters; } }
@Test public void testParsingParams() throws Exception { URI uri = new URI("static:(http://localhost:61617?proxyHost=jo&proxyPort=90)?proxyHost=localhost&proxyPort=80"); Map<String,String>parameters = URISupport.parseParameters(uri); verifyParams(parameters); uri = new URI("static://http://localhost:61617?proxyHost=localhost&proxyPort=80"); parameters = URISupport.parseParameters(uri); verifyParams(parameters); uri = new URI("http://0.0.0.0:61616"); parameters = URISupport.parseParameters(uri); }
public static PredicateTreeAnalyzerResult analyzePredicateTree(Predicate predicate) { AnalyzerContext context = new AnalyzerContext(); int treeSize = aggregatePredicateStatistics(predicate, false, context); int minFeature = ((int)Math.ceil(findMinFeature(predicate, false, context))) + (context.hasNegationPredicate ? 1 : 0); return new PredicateTreeAnalyzerResult(minFeature, treeSize, context.subTreeSizes); }
@Test void require_that_featureconjunctions_count_as_leaf_in_subtree_calculation() { Predicate p = and( and( feature("grault").inRange(0, 10), feature("waldo").inRange(0, 10)), conj( feature("foo").inSet("bar"), feature("baz").inSet("qux"), feature("quux").inSet("corge"))); PredicateTreeAnalyzerResult r = PredicateTreeAnalyzer.analyzePredicateTree(p); assertEquals(3, r.minFeature); assertEquals(3, r.treeSize); assertEquals(4, r.sizeMap.size()); assertSizeMapContains(r, pred(p).child(0), 2); assertSizeMapContains(r, pred(p).child(0).child(0), 1); assertSizeMapContains(r, pred(p).child(0).child(1), 1); assertSizeMapContains(r, pred(p).child(1), 1); }
@Override public String grantAuthorizationCodeForCode(Long userId, Integer userType, String clientId, List<String> scopes, String redirectUri, String state) { return oauth2CodeService.createAuthorizationCode(userId, userType, clientId, scopes, redirectUri, state).getCode(); }
@Test public void testGrantAuthorizationCodeForCode() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); List<String> scopes = Lists.newArrayList("read", "write"); String redirectUri = randomString(); String state = randomString(); // mock 方法 OAuth2CodeDO codeDO = randomPojo(OAuth2CodeDO.class); when(oauth2CodeService.createAuthorizationCode(eq(userId), eq(userType), eq(clientId), eq(scopes), eq(redirectUri), eq(state))).thenReturn(codeDO); // 调用,并断言 assertEquals(codeDO.getCode(), oauth2GrantService.grantAuthorizationCodeForCode(userId, userType, clientId, scopes, redirectUri, state)); }
public static <T> Serde<Windowed<T>> sessionWindowedSerdeFrom(final Class<T> type) { return new SessionWindowedSerde<>(Serdes.serdeFrom(type)); }
@Test public void testSessionWindowedSerdeFrom() { final Windowed<Integer> sessionWindowed = new Windowed<>(10, new SessionWindow(0, 1)); final Serde<Windowed<Integer>> sessionWindowedSerde = WindowedSerdes.sessionWindowedSerdeFrom(Integer.class); final byte[] bytes = sessionWindowedSerde.serializer().serialize(topic, sessionWindowed); final Windowed<Integer> windowed = sessionWindowedSerde.deserializer().deserialize(topic, bytes); assertEquals(sessionWindowed, windowed); }
public static Message parseMessage(XmlPullParser parser) throws XmlPullParserException, IOException, SmackParsingException { return parseMessage(parser, XmlEnvironment.EMPTY); }
@Test public void invalidMessageBodyContainingTagTest() throws Exception { String control = XMLBuilder.create("message") .namespace(StreamOpen.CLIENT_NAMESPACE) .a("from", "romeo@montague.lit/orchard") .a("to", "juliet@capulet.lit/balcony") .a("id", "zid615d9") .a("type", "chat") .a("xml:lang", "en") .e("body") .a("xmlns", "http://www.w3.org/1999/xhtml") .e("span") .a("style", "font-weight: bold;") .t("Bad Message Body") .asString(outputProperties); assertThrows(XmlPullParserException.class, () -> PacketParserUtils.parseMessage(TestUtils.getMessageParser(control)) ); }
public static void setMaskedSqlIfNeeded(final KsqlRequest request) { try { request.getMaskedKsql(); } catch (final Exception e) { ApiServerUtils.setMaskedSql(request); } }
@Test public void shouldMaskKsqlRequestQuery() throws ExecutionException, InterruptedException { // Given final String query = "--this is a comment. \n" + "CREATE SOURCE CONNECTOR `test-connector` WITH (" + " \"connector.class\" = 'PostgresSource', \n" + " 'connection.url' = 'jdbc:postgresql://localhost:5432/my.db',\n" + " \"mode\"='bulk',\n" + " \"topic.prefix\"='jdbc-',\n" + " \"table.whitelist\"='users',\n" + " \"key\"='username');"; final KsqlRequest req = new KsqlRequest(query, ImmutableMap.of(), ImmutableMap.of(), 1L); final String expected = "CREATE SOURCE CONNECTOR `test-connector` WITH " + "(\"connector.class\"='PostgresSource', " + "'connection.url'='[string]', " + "\"mode\"='[string]', " + "\"topic.prefix\"='[string]', " + "\"table.whitelist\"='[string]', " + "\"key\"='[string]');"; // When, Then assertThrows(NullPointerException.class, req::getMaskedKsql); ApiServerUtils.setMaskedSqlIfNeeded(req); assertThat(req.getMaskedKsql(), is(expected)); }
@Override public long getPos() throws IOException { // The starting position is not determined until a physical file has been assigned, so // we return the relative value to the starting position in this method return bufferPos + curPosRelative; }
@Test public void testGetPos() throws Exception { FileMergingCheckpointStateOutputStream stream = getNewStream(); // write one byte one time for (int i = 0; i < 64; ++i) { assertThat(stream.getPos()).isEqualTo(i); stream.write(0x42); } stream.closeAndGetHandle(); // write random number of bytes one time stream = getNewStream(); Random rnd = new Random(); long expectedPos = 0; for (int i = 0; i < 7; ++i) { int numBytes = rnd.nextInt(16); expectedPos += numBytes; stream.write(new byte[numBytes]); assertThat(stream.getPos()).isEqualTo(expectedPos); } physicalFileCanBeReused = true; SegmentFileStateHandle stateHandle = stream.closeAndGetHandle(); // reuse the last physical file assertThat(stateHandle).isNotNull(); expectedPos = 0; stream = getNewStream(true); stream.flushToFile(); for (int i = 0; i < 7; ++i) { int numBytes = rnd.nextInt(16); expectedPos += numBytes; stream.write(new byte[numBytes]); assertThat(stream.getPos()).isEqualTo(expectedPos); } stream.closeAndGetHandle(); }
public static void addSystemTopic(String systemTopic) { SYSTEM_TOPIC_SET.add(systemTopic); }
@Test public void testAddSystemTopic() { String topic = "SYSTEM_TOPIC_TEST"; TopicValidator.addSystemTopic(topic); assertThat(TopicValidator.getSystemTopicSet()).contains(topic); }
@Override public GetContainerReportResponse getContainerReport( GetContainerReportRequest request) throws YarnException, IOException { ContainerId containerId = request.getContainerId(); if (containerId == null) { throw new ContainerNotFoundException("Invalid container id: null"); } ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId(); ApplicationId appId = appAttemptId.getApplicationId(); UserGroupInformation callerUGI = getCallerUgi(appId, AuditConstants.GET_CONTAINER_REPORT); RMApp application = verifyUserAccessForRMApp(appId, callerUGI, AuditConstants.GET_CONTAINER_REPORT, ApplicationAccessType.VIEW_APP, false); boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application); GetContainerReportResponse response = null; if (allowAccess) { RMAppAttempt appAttempt = application.getAppAttempts().get(appAttemptId); if (appAttempt == null) { throw new ApplicationAttemptNotFoundException( "ApplicationAttempt with id '" + appAttemptId + "' doesn't exist in RM."); } RMContainer rmContainer = this.rmContext.getScheduler().getRMContainer( containerId); if (rmContainer == null) { throw new ContainerNotFoundException("Container with id '" + containerId + "' doesn't exist in RM."); } response = GetContainerReportResponse.newInstance(rmContainer .createContainerReport()); } else { throw new YarnException("User " + callerUGI.getShortUserName() + " does not have privilege to see this application " + appId); } return response; }
@Test public void testGetContainerReport() throws YarnException, IOException { ClientRMService rmService = createRMService(); GetContainerReportRequest request = recordFactory .newRecordInstance(GetContainerReportRequest.class); ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(123456, 1), 1); ContainerId containerId = ContainerId.newContainerId(attemptId, 1); request.setContainerId(containerId); try { GetContainerReportResponse response = rmService .getContainerReport(request); Assert.assertEquals(containerId, response.getContainerReport() .getContainerId()); } catch (ApplicationNotFoundException ex) { Assert.fail(ex.getMessage()); } }
public void poolingRowChange( int idx ) { if ( idx != -1 ) { if ( idx >= BaseDatabaseMeta.poolingParameters.length ) { idx = BaseDatabaseMeta.poolingParameters.length - 1; } if ( idx < 0 ) { idx = 0; } poolingDescription.setValue( BaseDatabaseMeta.poolingParameters[ idx ].getDescription() ); XulTreeRow row = poolParameterTree.getRootChildren().getItem( idx ).getRow(); if ( row.getSelectedColumnIndex() == 2 ) { row.addCellText( 0, "true" ); } } }
@Test public void testPoolingRowChange() throws Exception { }
public void handleWatchTopicList(NamespaceName namespaceName, long watcherId, long requestId, Pattern topicsPattern, String topicsHash, Semaphore lookupSemaphore) { if (!enableSubscriptionPatternEvaluation || topicsPattern.pattern().length() > maxSubscriptionPatternLength) { String msg = "Unable to create topic list watcher: "; if (!enableSubscriptionPatternEvaluation) { msg += "Evaluating subscription patterns is disabled."; } else { msg += "Pattern longer than maximum: " + maxSubscriptionPatternLength; } log.warn("[{}] {} on namespace {}", connection.toString(), msg, namespaceName); connection.getCommandSender().sendErrorResponse(requestId, ServerError.NotAllowedError, msg); lookupSemaphore.release(); return; } CompletableFuture<TopicListWatcher> watcherFuture = new CompletableFuture<>(); CompletableFuture<TopicListWatcher> existingWatcherFuture = watchers.putIfAbsent(watcherId, watcherFuture); if (existingWatcherFuture != null) { if (existingWatcherFuture.isDone() && !existingWatcherFuture.isCompletedExceptionally()) { TopicListWatcher watcher = existingWatcherFuture.getNow(null); log.info("[{}] Watcher with the same id is already created:" + " watcherId={}, watcher={}", connection.toString(), watcherId, watcher); watcherFuture = existingWatcherFuture; } else { // There was an early request to create a watcher with the same watcherId. This can happen when // client timeout is lower the broker timeouts. We need to wait until the previous watcher // creation request either completes or fails. log.warn("[{}] Watcher with id is already present on the connection," + " consumerId={}", connection.toString(), watcherId); ServerError error; if (!existingWatcherFuture.isDone()) { error = ServerError.ServiceNotReady; } else { error = ServerError.UnknownError; watchers.remove(watcherId, existingWatcherFuture); } connection.getCommandSender().sendErrorResponse(requestId, error, "Topic list watcher is already present on the connection"); lookupSemaphore.release(); return; } } else { initializeTopicsListWatcher(watcherFuture, namespaceName, watcherId, topicsPattern); } CompletableFuture<TopicListWatcher> finalWatcherFuture = watcherFuture; finalWatcherFuture.thenAccept(watcher -> { List<String> topicList = watcher.getMatchingTopics(); String hash = TopicList.calculateHash(topicList); if (hash.equals(topicsHash)) { topicList = Collections.emptyList(); } if (log.isDebugEnabled()) { log.debug( "[{}] Received WatchTopicList for namespace [//{}] by {}", connection.toString(), namespaceName, requestId); } connection.getCommandSender().sendWatchTopicListSuccess(requestId, watcherId, hash, topicList); lookupSemaphore.release(); }) .exceptionally(ex -> { log.warn("[{}] Error WatchTopicList for namespace [//{}] by {}", connection.toString(), namespaceName, requestId); connection.getCommandSender().sendErrorResponse(requestId, BrokerServiceException.getClientErrorCode( new BrokerServiceException.ServerMetadataException(ex)), ex.getMessage()); watchers.remove(watcherId, finalWatcherFuture); lookupSemaphore.release(); return null; }); }
@Test public void testCommandWatchSuccessResponse() { topicListService.handleWatchTopicList( NamespaceName.get("tenant/ns"), 13, 7, Pattern.compile("persistent://tenant/ns/topic\\d"), null, lookupSemaphore); List<String> topics = Collections.singletonList("persistent://tenant/ns/topic1"); String hash = TopicList.calculateHash(topics); topicListFuture.complete(topics); Assert.assertEquals(1, lookupSemaphore.availablePermits()); verify(topicResources).registerPersistentTopicListener( eq(NamespaceName.get("tenant/ns")), any(TopicListService.TopicListWatcher.class)); verify(connection.getCommandSender()).sendWatchTopicListSuccess(7, 13, hash, topics); }
@Override public InetSocketAddress resolve(ServerWebExchange exchange) { List<String> xForwardedValues = extractXForwardedValues(exchange); if (!xForwardedValues.isEmpty()) { int index = Math.max(0, xForwardedValues.size() - maxTrustedIndex); return new InetSocketAddress(xForwardedValues.get(index), 0); } return defaultRemoteIpResolver.resolve(exchange); }
@Test public void trustAllReturnsFirstForwardedIp() { ServerWebExchange exchange = buildExchange(oneTwoThreeBuilder()); InetSocketAddress address = trustAll.resolve(exchange); assertThat(address.getHostName()).isEqualTo("0.0.0.1"); }
@Override public boolean dropTable(TableIdentifier identifier, boolean purge) { if (!isValidIdentifier(identifier)) { return false; } String database = identifier.namespace().level(0); TableOperations ops = newTableOps(identifier); TableMetadata lastMetadata = null; if (purge) { try { lastMetadata = ops.current(); } catch (NotFoundException e) { LOG.warn( "Failed to load table metadata for table: {}, continuing drop without purge", identifier, e); } } try { clients.run(client -> { client.dropTable(database, identifier.name(), false /* do not delete data */, false /* throw NoSuchObjectException if the table doesn't exist */); return null; }); if (purge && lastMetadata != null) { CatalogUtil.dropTableData(ops.io(), lastMetadata); } LOG.info("Dropped table: {}", identifier); return true; } catch (NoSuchTableException | NoSuchObjectException e) { LOG.info("Skipping drop, table does not exist: {}", identifier, e); return false; } catch (TException e) { throw new RuntimeException("Failed to drop " + identifier, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("Interrupted in call to dropTable", e); } }
@Test public void testSetCurrentSchema() throws Exception { Schema schema = getTestSchema(); TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { Table table = catalog.buildTable(tableIdent, schema).create(); assertThat(hmsTableParameters()) .containsEntry(CURRENT_SCHEMA, SchemaParser.toJson(table.schema())); // add many new fields to make the schema json string exceed the limit UpdateSchema updateSchema = table.updateSchema(); for (int i = 0; i < 600; i++) { updateSchema.addColumn("new_col_" + i, Types.StringType.get()); } updateSchema.commit(); assertThat(SchemaParser.toJson(table.schema()).length()).isGreaterThan(32672); assertThat(hmsTableParameters()).doesNotContainKey(CURRENT_SCHEMA); } finally { catalog.dropTable(tableIdent); } }
public String send() throws MailException { try { return doSend(); } catch (MessagingException e) { if (e instanceof SendFailedException) { // 当地址无效时,显示更加详细的无效地址信息 final Address[] invalidAddresses = ((SendFailedException) e).getInvalidAddresses(); final String msg = StrUtil.format("Invalid Addresses: {}", ArrayUtil.toString(invalidAddresses)); throw new MailException(msg, e); } throw new MailException(e); } }
@Test @Disabled public void sendWithLongNameFileTest() { //附件名长度大于60时的测试 JakartaMailUtil.send("hutool@foxmail.com", "测试", "<h1>邮件来自Hutool测试</h1>", true, FileUtil.file("d:/6-LongLong一阶段平台建设周报2018.3.12-3.16.xlsx")); }
@Override @CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#id") public void deleteMailAccount(Long id) { // 校验是否存在账号 validateMailAccountExists(id); // 校验是否存在关联模版 if (mailTemplateService.getMailTemplateCountByAccountId(id) > 0) { throw exception(MAIL_ACCOUNT_RELATE_TEMPLATE_EXISTS); } // 删除 mailAccountMapper.deleteById(id); }
@Test public void testDeleteMailAccount_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> mailAccountService.deleteMailAccount(id), MAIL_ACCOUNT_NOT_EXISTS); }
@Override public ConfigInfoStateWrapper findConfigInfoState(final String dataId, final String group, final String tenant) { String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; try { return this.jt.queryForObject( "SELECT id,data_id,group_id,tenant_id,gmt_modified FROM config_info WHERE data_id=? AND group_id=? AND tenant_id=?", new Object[] {dataId, group, tenantTmp}, CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER); } catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null. return null; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e.toString(), e); throw e; } }
@Test void testFindConfigInfoState() { String dataId = "dataId1324"; String group = "group23546"; String tenant = "tenant13245"; //mock select config state ConfigInfoStateWrapper mockedConfig = new ConfigInfoStateWrapper(); mockedConfig.setLastModified(2345678L); mockedConfig.setId(23456789098765L); when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenReturn(mockedConfig); //execute return mock obj ConfigInfoStateWrapper configInfoStateWrapper = externalConfigInfoPersistService.findConfigInfoState(dataId, group, tenant); //expect check schema & tags. assertEquals(mockedConfig.getId(), configInfoStateWrapper.getId()); assertEquals(mockedConfig.getLastModified(), configInfoStateWrapper.getLastModified()); //mock EmptyResultDataAccessException when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new EmptyResultDataAccessException(1)); //expect return null. assertNull(externalConfigInfoPersistService.findConfigInfoState(dataId, group, tenant)); //mock CannotGetJdbcConnectionException when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))).thenThrow(new CannotGetJdbcConnectionException("mock exp")); //expect throw exception. try { externalConfigInfoPersistService.findConfigInfoState(dataId, group, tenant); assertFalse(true); } catch (Exception e) { assertTrue(e instanceof CannotGetJdbcConnectionException); assertTrue(e.getMessage().endsWith("mock exp")); } }
@Override public byte readByte() { checkReadableBytes0(1); int i = readerIndex; byte b = _getByte(i); readerIndex = i + 1; return b; }
@Test public void testReadByteAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().readByte(); } }); }
public static boolean isPlainHttp(NetworkService networkService) { checkNotNull(networkService); var isWebService = isWebService(networkService); var isKnownServiceName = IS_PLAIN_HTTP_BY_KNOWN_WEB_SERVICE_NAME.containsKey( Ascii.toLowerCase(networkService.getServiceName())); var doesNotSupportAnySslVersion = networkService.getSupportedSslVersionsCount() == 0; if (!isKnownServiceName) { return isWebService && doesNotSupportAnySslVersion; } var isKnownPlainHttpService = IS_PLAIN_HTTP_BY_KNOWN_WEB_SERVICE_NAME.getOrDefault( Ascii.toLowerCase(networkService.getServiceName()), false); return isKnownPlainHttpService && doesNotSupportAnySslVersion; }
@Test public void isPlainHttp_whenRadanHttpService_returnsTrue() { assertThat( NetworkServiceUtils.isPlainHttp( NetworkService.newBuilder().setServiceName("radan-http").build())) .isTrue(); }
public T getRecordingProxy() { return _templateProxy; }
@Test(expectedExceptions = IllegalArgumentException.class) public void testSimpleSetRemoveOptionalIfNullOnRequiredFieldFail() { makeOne().getRecordingProxy().setFooRequired(null, SetMode.REMOVE_OPTIONAL_IF_NULL); }
@Operation(summary = "release", description = "RELEASE_PROCESS_DEFINITION_NOTES") @Parameters({ @Parameter(name = "name", description = "PROCESS_DEFINITION_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "code", description = "PROCESS_DEFINITION_CODE", required = true, schema = @Schema(implementation = long.class, example = "123456789")), @Parameter(name = "releaseState", description = "PROCESS_DEFINITION_RELEASE", required = true, schema = @Schema(implementation = ReleaseState.class)), }) @PostMapping(value = "/{code}/release") @ResponseStatus(HttpStatus.OK) @ApiException(RELEASE_PROCESS_DEFINITION_ERROR) public Result<Boolean> releaseProcessDefinition(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable(value = "code", required = true) long workflowDefinitionCode, @RequestParam(value = "releaseState", required = true) ReleaseState releaseState) { switch (releaseState) { case ONLINE: processDefinitionService.onlineWorkflowDefinition(loginUser, projectCode, workflowDefinitionCode); break; case OFFLINE: processDefinitionService.offlineWorkflowDefinition(loginUser, projectCode, workflowDefinitionCode); break; default: throw new IllegalArgumentException( "The releaseState " + releaseState + " is illegal, please check it."); } return Result.success(true); }
@Test public void testReleaseProcessDefinition() { long projectCode = 1L; long id = 1L; Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS); Mockito.doNothing().when(processDefinitionService) .offlineWorkflowDefinition(user, projectCode, id); Result<Boolean> response = processDefinitionController.releaseProcessDefinition(user, projectCode, id, ReleaseState.OFFLINE); Assertions.assertTrue(response != null && response.isSuccess()); }
public String getAuthUuid() { if (Qualifiers.SUBVIEW.equals(qualifier)) { return authUuid; } return uuid; }
@Test void getAuthUuid_whenEntityIsSubportfolio_shouldReturnAuthUuid() { PortfolioDto portfolioDto = new PortfolioDto(); portfolioDto.qualifier = Qualifiers.SUBVIEW; portfolioDto.authUuid = "authUuid"; portfolioDto.setUuid("uuid"); String authUuid = portfolioDto.getAuthUuid(); assertThat(authUuid).isEqualTo("authUuid"); }
@Nonnull public static <T extends GeneratedMessageV3> ProtobufSerializer<T> from(@Nonnull Class<T> clazz, int typeId) { return new ProtobufSerializer<>(clazz, typeId) { }; }
@Test public void when_serializes_then_isAbleToDeserialize() { // Given Person original = Person.newBuilder().setName("Joe").setAge(18).build(); StreamSerializer<Person> serializer = ProtobufSerializer.from(Person.class, 1); // When Person transformed = deserialize(serializer, serialize(serializer, original)); // Then assertThat(transformed).isEqualTo(original); }
@SuppressWarnings("unchecked") public static <T> T[] tail(final T[] elements) { checkNotNull(elements); if (elements.length <= 1) { return (T[]) Array.newInstance(elements.getClass().getComponentType(), 0); } return Arrays.copyOfRange(elements, 1, elements.length); }
@Test public void should_get_tail() { assertThat(Iterables.tail(new Integer[]{1, 2}), is(new Integer[] {2})); assertThat(Iterables.tail(new Integer[1]), is(new Integer[0])); assertThat(Iterables.tail(new Integer[0]), is(new Integer[0])); assertThrows(NullPointerException.class, () -> Iterables.tail(null)); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testUpdatingSubscriptionTriggersNewTargetAssignment() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; Uuid barTopicId = Uuid.randomUuid(); String barTopicName = "bar"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .addTopic(barTopicId, barTopicName, 3) .addRacks() .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId) .setState(MemberState.STABLE) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Collections.singletonList("foo")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) .build()) .withAssignment(memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5))) .withAssignmentEpoch(10)) .build(); assignor.prepareGroupAssignment(new GroupAssignment( Collections.singletonMap(memberId, new MemberAssignmentImpl(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) ))) )); CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> result = context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(10) .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); assertResponseEquals( new ConsumerGroupHeartbeatResponseData() .setMemberId(memberId) .setMemberEpoch(11) .setHeartbeatIntervalMs(5000) .setAssignment(new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(fooTopicId) .setPartitions(Arrays.asList(0, 1, 2, 3, 4, 5)), new ConsumerGroupHeartbeatResponseData.TopicPartitions() .setTopicId(barTopicId) .setPartitions(Arrays.asList(0, 1, 2)) ))), result.response() ); ConsumerGroupMember expectedMember = new ConsumerGroupMember.Builder(memberId) .setState(MemberState.STABLE) .setMemberEpoch(11) .setPreviousMemberEpoch(10) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2))) .build(); List<CoordinatorRecord> expectedRecords = Arrays.asList( GroupCoordinatorRecordHelpers.newConsumerGroupMemberSubscriptionRecord(groupId, expectedMember), GroupCoordinatorRecordHelpers.newConsumerGroupSubscriptionMetadataRecord(groupId, new HashMap<String, TopicMetadata>() { { put(fooTopicName, new TopicMetadata(fooTopicId, fooTopicName, 6, mkMapOfPartitionRacks(6))); put(barTopicName, new TopicMetadata(barTopicId, barTopicName, 3, mkMapOfPartitionRacks(3))); } }), GroupCoordinatorRecordHelpers.newConsumerGroupEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentRecord(groupId, memberId, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2, 3, 4, 5), mkTopicAssignment(barTopicId, 0, 1, 2) )), GroupCoordinatorRecordHelpers.newConsumerGroupTargetAssignmentEpochRecord(groupId, 11), GroupCoordinatorRecordHelpers.newConsumerGroupCurrentAssignmentRecord(groupId, expectedMember) ); assertRecordsEquals(expectedRecords, result.records()); }
@Override public String getProperty(String key) { String answer = super.getProperty(key); if (answer == null) { answer = super.getProperty(StringHelper.dashToCamelCase(key)); } if (answer == null) { answer = super.getProperty(StringHelper.camelCaseToDash(key)); } return answer; }
@Test public void testOrdered() { Properties prop = new CamelCaseOrderedProperties(); prop.setProperty("hello-world", "Hi Camel"); prop.setProperty("camel.main.stream-caching-enabled", "true"); assertEquals(2, prop.size()); Iterator it = prop.keySet().iterator(); assertEquals("hello-world", it.next()); assertEquals("camel.main.stream-caching-enabled", it.next()); it = prop.values().iterator(); assertEquals("Hi Camel", it.next()); assertEquals("true", it.next()); assertEquals("Hi Camel", prop.getProperty("hello-world")); assertEquals("Hi Camel", prop.getProperty("helloWorld")); assertEquals("true", prop.getProperty("camel.main.stream-caching-enabled")); assertEquals("true", prop.getProperty("camel.main.streamCachingEnabled")); assertEquals("Davs", prop.getProperty("bye-world", "Davs")); }
public boolean statsHaveChanged() { if (!aggregatedStats.hasUpdatesFromAllDistributors()) { return false; } for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) { int nodeIndex = contentNodeStats.getNodeIndex(); boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex); Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex); if (prevValue != null) { if (prevValue != currValue) { return true; } } else { return true; } } return false; }
@Test void stats_have_not_changed_if_no_nodes_have_changed_state() { Fixture f = Fixture.fromStats(stats().bucketsPending(0).bucketsPending(1)); f.newAggregatedStats(stats().bucketsPending(0).bucketsPending(1)); assertFalse(f.statsHaveChanged()); }
@Override public Map<String, String> getDefaultOptions() { Map<String, String> defaultOptions = new HashMap<>(); defaultOptions.put( getPluginId() + ".defaultFetchSize", "500" ); defaultOptions.put( getPluginId() + ".useCursorFetch", "true" ); return defaultOptions; }
@Test public void testGetDefaultOptions() { MySQLDatabaseMeta mySQLDatabaseMeta = new MySQLDatabaseMeta(); mySQLDatabaseMeta.setPluginId( "foobar" ); Map<String, String> map =mySQLDatabaseMeta.getDefaultOptions(); assertNotNull( map ); assertEquals( 2, map.size() ); for ( String key : map.keySet() ) { assert( key.startsWith( "foobar." ) ); } }
public static int sizeOfUnsignedVarint(int value) { // Protocol buffers varint encoding is variable length, with a minimum of 1 byte // (for zero). The values themselves are not important. What's important here is // any leading zero bits are dropped from output. We can use this leading zero // count w/ fast intrinsic to calc the output length directly. // Test cases verify this matches the output for loop logic exactly. // return (38 - leadingZeros) / 7 + leadingZeros / 32; // The above formula provides the implementation, but the Java encoding is suboptimal // when we have a narrow range of integers, so we can do better manually int leadingZeros = Integer.numberOfLeadingZeros(value); int leadingZerosBelow38DividedBy7 = ((38 - leadingZeros) * 0b10010010010010011) >>> 19; return leadingZerosBelow38DividedBy7 + (leadingZeros >>> 5); }
@Test public void testSizeOfUnsignedVarint() { // The old well-known implementation for sizeOfUnsignedVarint IntFunction<Integer> simpleImplementation = (int value) -> { int bytes = 1; while ((value & 0xffffff80) != 0L) { bytes += 1; value >>>= 7; } return bytes; }; // compare the full range of values for (int i = 0; i < Integer.MAX_VALUE && i >= 0; i += 13) { final int actual = ByteUtils.sizeOfUnsignedVarint(i); final int expected = simpleImplementation.apply(i); assertEquals(expected, actual); } }
@Override public void onMetadataUpdate( MetadataDelta delta, MetadataImage newImage, LoaderManifest manifest ) { switch (manifest.type()) { case LOG_DELTA: try { publishDelta(delta); } catch (Throwable e) { faultHandler.handleFault("Failed to publish controller metrics from log delta " + " ending at offset " + manifest.provenance().lastContainedOffset(), e); } finally { prevImage = newImage; } break; case SNAPSHOT: try { publishSnapshot(newImage); } catch (Throwable e) { faultHandler.handleFault("Failed to publish controller metrics from " + manifest.provenance().snapshotName(), e); } finally { prevImage = newImage; } break; } }
@Test public void testLoadSnapshot() { try (TestEnv env = new TestEnv()) { MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); ImageReWriter writer = new ImageReWriter(delta); IMAGE1.write(writer, new ImageWriterOptions.Builder(). setMetadataVersion(delta.image().features().metadataVersion()). build()); env.publisher.onMetadataUpdate(delta, IMAGE1, fakeManifest(true)); assertEquals(0, env.metrics.activeBrokerCount()); assertEquals(3, env.metrics.globalTopicCount()); assertEquals(7, env.metrics.globalPartitionCount()); assertEquals(3, env.metrics.offlinePartitionCount()); assertEquals(4, env.metrics.preferredReplicaImbalanceCount()); assertEquals(0, env.metrics.metadataErrorCount()); } }
@Override public Status unwrap() { return status; }
@Test void unwrap() { assertThat(response.unwrap()).isSameAs(status); }
@Override public Result invoke(Invocation invocation) throws RpcException { Result result; String value = getUrl().getMethodParameter( RpcUtils.getMethodName(invocation), MOCK_KEY, Boolean.FALSE.toString()) .trim(); if (ConfigUtils.isEmpty(value)) { // no mock result = this.invoker.invoke(invocation); } else if (value.startsWith(FORCE_KEY)) { if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "force mock", "", "force-mock: " + RpcUtils.getMethodName(invocation) + " force-mock enabled , url : " + getUrl()); } // force:direct mock result = doMockInvoke(invocation, null); } else { // fail-mock try { result = this.invoker.invoke(invocation); // fix:#4585 if (result.getException() != null && result.getException() instanceof RpcException) { RpcException rpcException = (RpcException) result.getException(); if (rpcException.isBiz()) { throw rpcException; } else { result = doMockInvoke(invocation, rpcException); } } } catch (RpcException e) { if (e.isBiz()) { throw e; } if (logger.isWarnEnabled()) { logger.warn( CLUSTER_FAILED_MOCK_REQUEST, "failed to mock invoke", "", "fail-mock: " + RpcUtils.getMethodName(invocation) + " fail-mock enabled , url : " + getUrl(), e); } result = doMockInvoke(invocation, e); } } return result; }
@SuppressWarnings("unchecked") @Test void testMockInvokerFromOverride_Invoke_check_ListString() { URL url = URL.valueOf("remote://1.2.3.4/" + IHelloService.class.getName()) .addParameter( REFER_KEY, URL.encode(PATH_KEY + "=" + IHelloService.class.getName() + "&" + "getListString.mock=force:return [\"hi\",\"hi2\"]")) .addParameter("invoke_return_error", "true"); Invoker<IHelloService> cluster = getClusterInvoker(url); // Configured with mock RpcInvocation invocation = new RpcInvocation(); invocation.setMethodName("getListString"); Result ret = cluster.invoke(invocation); List<String> rl = (List<String>) ret.getValue(); Assertions.assertEquals(2, rl.size()); Assertions.assertEquals("hi", rl.get(0)); }
@Override public byte[] serialize() { byte[] optionsData = null; if (this.options.hasOptions()) { optionsData = this.options.serialize(); } int optionsLength = 0; if (optionsData != null) { optionsLength = optionsData.length; } final byte[] data = new byte[HEADER_LENGTH + optionsLength]; final ByteBuffer bb = ByteBuffer.wrap(data); bb.put(this.currentHopLimit); bb.put((byte) ((this.mFlag & 0x1) << 7 | (this.oFlag & 0x1) << 6)); bb.putShort(routerLifetime); bb.putInt(reachableTime); bb.putInt(retransmitTimer); if (optionsData != null) { bb.put(optionsData); } return data; }
@Test public void testSerialize() { RouterAdvertisement ra = new RouterAdvertisement(); ra.setCurrentHopLimit((byte) 3); ra.setMFlag((byte) 1); ra.setOFlag((byte) 1); ra.setRouterLifetime((short) 0x258); ra.setReachableTime(0x3e8); ra.setRetransmitTimer(0x1f4); ra.addOption(NeighborDiscoveryOptions.TYPE_TARGET_LL_ADDRESS, MAC_ADDRESS.toBytes()); assertArrayEquals(ra.serialize(), bytePacket); }
public static Read<String> readStrings() { return Read.newBuilder( (PubsubMessage message) -> new String(message.getPayload(), StandardCharsets.UTF_8)) .setCoder(StringUtf8Coder.of()) .build(); }
@Test public void testFailedParseWithDeadLetterConfigured() { ByteString data = ByteString.copyFrom("Hello, World!".getBytes(StandardCharsets.UTF_8)); RuntimeException exception = new RuntimeException("Some error message"); ImmutableList<IncomingMessage> expectedReads = ImmutableList.of( IncomingMessage.of( com.google.pubsub.v1.PubsubMessage.newBuilder().setData(data).build(), 1234L, 0, UUID.randomUUID().toString(), UUID.randomUUID().toString())); ImmutableList<OutgoingMessage> expectedWrites = ImmutableList.of( OutgoingMessage.of( com.google.pubsub.v1.PubsubMessage.newBuilder() .setData(data) .putAttributes("exceptionClassName", exception.getClass().getName()) .putAttributes("exceptionMessage", exception.getMessage()) .putAttributes("pubsubMessageId", "<null>") .build(), 1234L, null, null)); clientFactory = PubsubTestClient.createFactoryForPullAndPublish( SUBSCRIPTION, TOPIC, CLOCK, 60, expectedReads, expectedWrites, ImmutableList.of()); PCollection<String> read = pipeline.apply( PubsubIO.readStrings() .fromSubscription(SUBSCRIPTION.getPath()) .withDeadLetterTopic(TOPIC.getPath()) .withClock(CLOCK) .withClientFactory(clientFactory) .withCoderAndParseFn( StringUtf8Coder.of(), SimpleFunction.fromSerializableFunctionWithOutputType( message -> { throw exception; }, TypeDescriptors.strings()))); PAssert.that(read).empty(); pipeline.run(); }
void handleJobLevelCheckpointException( CheckpointProperties checkpointProperties, CheckpointException exception, long checkpointId) { if (!checkpointProperties.isSavepoint()) { checkFailureAgainstCounter(exception, checkpointId, failureCallback::failJob); } }
@Test void testIgnoreOneCheckpointRepeatedlyCountMultiTimes() { TestFailJobCallback callback = new TestFailJobCallback(); CheckpointFailureManager failureManager = new CheckpointFailureManager(2, callback); CheckpointProperties checkpointProperties = forCheckpoint(NEVER_RETAIN_AFTER_TERMINATION); failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED), 1); failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED), 2); // ignore this failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.JOB_FAILOVER_REGION), 3); // ignore repeatedly report from one checkpoint failureManager.handleJobLevelCheckpointException( checkpointProperties, new CheckpointException(CheckpointFailureReason.CHECKPOINT_DECLINED), 2); assertThat(callback.getInvokeCounter()).isZero(); }
public static Builder builder(final SqlStruct schema) { return new Builder(schema); }
@Test public void shouldThrowOnSettingNegativeFieldIndex() { // When: final DataException e = assertThrows( DataException.class, () -> KsqlStruct.builder(SCHEMA) .set(-1, Optional.empty()) ); // Then: assertThat(e.getMessage(), containsString("Invalid field index: -1")); }
@Override public Hedge hedge(final String name) { return hedge(name, getDefaultConfig(), emptyMap()); }
@Test public void hedgeNewWithNullNameAndNonDefaultConfig() { exception.expect(NullPointerException.class); exception.expectMessage(NAME_MUST_NOT_BE_NULL); HedgeRegistry registry = HedgeRegistry.builder().withDefaultConfig(config).build(); registry.hedge(null, config); }
@Override public PurgeCurrentExecutionFiles.Output run(RunContext runContext) throws Exception { return Output.builder() .uris(runContext.storage().deleteExecutionFiles()) .build(); }
@Test void run() throws Exception { // create a file var flow = Flow.builder() .namespace("namespace") .id("flowId") .build(); var runContext = runContextFactory.of(flow, Map.of( "execution", Map.of("id", "executionId"), "task", Map.of("id", "taskId"), "taskrun", Map.of("id", "taskRunId") )); var file = runContext.workingDir().createFile("test.txt", "Hello World".getBytes()); runContext.storage().putFile(file.toFile()); var purge = PurgeCurrentExecutionFiles.builder() .build(); var output = purge.run(runContext); assertThat(output.getUris().size(), is(2)); }
@Override public Client getClient(String clientId) { return getClientManagerById(clientId).getClient(clientId); }
@Test void testChooseConnectionClient() { delegate.getClient(connectionId); verify(connectionBasedClientManager).getClient(connectionId); verify(ephemeralIpPortClientManager, never()).getClient(connectionId); verify(persistentIpPortClientManager, never()).getClient(connectionId); }
Map<HoodieFileGroupId, List<Integer>> getFileGroupToPartitions() { return fileGroupToPartitions; }
@Test public void testAssignmentCorrectness() { HoodieFileGroupId fg1 = new HoodieFileGroupId("p1", "f1"); HoodieFileGroupId fg2 = new HoodieFileGroupId("p1", "f2"); HoodieFileGroupId fg3 = new HoodieFileGroupId("p1", "f3"); Map<HoodieFileGroupId, Long> fileToComparisons = new HashMap<HoodieFileGroupId, Long>() { { put(fg1, 40L); put(fg2, 35L); put(fg3, 20L); } }; BucketizedBloomCheckPartitioner p = new BucketizedBloomCheckPartitioner(4, fileToComparisons, 10); Map<HoodieFileGroupId, List<Integer>> assignments = p.getFileGroupToPartitions(); assertEquals(4, assignments.get(fg1).size(), "f1 should have 4 buckets"); assertEquals(4, assignments.get(fg2).size(), "f2 should have 4 buckets"); assertEquals(2, assignments.get(fg3).size(), "f3 should have 2 buckets"); assertArrayEquals(new Integer[] {0, 0, 1, 3}, assignments.get(fg1).toArray(), "f1 spread across 3 partitions"); assertArrayEquals(new Integer[] {2, 2, 3, 1}, assignments.get(fg2).toArray(), "f2 spread across 3 partitions"); assertArrayEquals(new Integer[] {1, 0}, assignments.get(fg3).toArray(), "f3 spread across 2 partitions"); }
@Override public double calcMinWeightPerDistance() { return 1d / (maxSpeedCalc.calcMax() / SPEED_CONV) / maxPrioCalc.calcMax() + distanceInfluence; }
@Test public void testMaxPriority() { double maxSpeed = 155; assertEquals(maxSpeed, avSpeedEnc.getMaxOrMaxStorableDecimal(), 0.1); assertEquals(1d / maxSpeed / 0.5 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToPriority(If("true", MULTIPLY, "0.5"))).calcMinWeightPerDistance(), 1.e-6); // ignore too big limit assertEquals(1d / maxSpeed / 1.0 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToPriority(If("true", LIMIT, "2.0"))).calcMinWeightPerDistance(), 1.e-6); // priority bigger 1 is fine (if CustomModel not in query) assertEquals(1d / maxSpeed / 2.0 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToPriority(If("true", MULTIPLY, "3.0")). addToPriority(If("true", LIMIT, "2.0"))).calcMinWeightPerDistance(), 1.e-6); assertEquals(1d / maxSpeed / 1.5 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToPriority(If("true", MULTIPLY, "1.5"))).calcMinWeightPerDistance(), 1.e-6); // pick maximum priority from value even if this is for a special case assertEquals(1d / maxSpeed / 3.0 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToPriority(If("road_class == SERVICE", MULTIPLY, "3.0"))).calcMinWeightPerDistance(), 1.e-6); // do NOT pick maximum priority when it is for a special case assertEquals(1d / maxSpeed / 1.0 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToPriority(If("road_class == SERVICE", MULTIPLY, "0.5"))).calcMinWeightPerDistance(), 1.e-6); }
@Override public long footprint() { return footprint; }
@Test public void testFootprint() { MerkleTree merkleTree1 = new ArrayMerkleTree(3); MerkleTree merkleTree2 = new ArrayMerkleTree(3); for (int i = 0; i < 10; i++) { merkleTree1.updateAdd(i, i); } for (int i = 0; i < 100; i++) { merkleTree2.updateAdd(i, i); } assertEquals(merkleTree2.footprint(), merkleTree1.footprint()); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testPersonalBestNoTrailingPeriod() { final String FIGHT_DURATION = "Fight duration: <col=ff0000>0:59</col>. Personal best: 0:55"; final String FIGHT_DURATION_PRECISE = "Fight duration: <col=ff0000>0:59.20</col>. Personal best: 0:55.40"; // This sets lastBoss ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Zulrah kill count is: <col=ff0000>4</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", FIGHT_DURATION, null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "zulrah", 55.0); // Precise times chatMessage = new ChatMessage(null, GAMEMESSAGE, "", FIGHT_DURATION_PRECISE, null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("personalbest", "zulrah", 55.4); }
public static String serializeAwsCredentialsProvider(AwsCredentialsProvider credentialsProvider) { return serialize(credentialsProvider); }
@Test(expected = IllegalArgumentException.class) public void testFailOnAwsCredentialsProviderSerialization() { AwsCredentialsProvider awsCredentialsProvider = new UnknownAwsCredentialsProvider(); AwsSerializableUtils.serializeAwsCredentialsProvider(awsCredentialsProvider); }
@OnlyForTest protected static ThreadPoolExecutor getExecutor(String groupId) { return GROUP_THREAD_POOLS.getOrDefault(groupId, GlobalThreadPoolHolder.INSTANCE); }
@Test public void testInvalidGroup() { ThreadPoolExecutor executor1 = ThreadPoolsFactory.getExecutor(GROUP_ID_001); ThreadPoolExecutor executor = ThreadPoolsFactory.getExecutor("test"); Assert.assertEquals(executor1, executor); }
public void withLock(final List<String> e164s, final Runnable task, final Executor lockAcquisitionExecutor) { if (e164s.isEmpty()) { throw new IllegalArgumentException("List of e164s to lock must not be empty"); } final List<LockItem> lockItems = new ArrayList<>(e164s.size()); try { // Offload the acquire/release tasks to the dedicated lock acquisition executor. The lock client performs blocking // operations while holding locks which forces thread pinning when this method runs on a virtual thread. // https://github.com/awslabs/amazon-dynamodb-lock-client/issues/97 CompletableFuture.runAsync(() -> { for (final String e164 : e164s) { try { lockItems.add(lockClient.acquireLock(AcquireLockOptions.builder(e164) .withAcquireReleasedLocksConsistently(true) .build())); } catch (final InterruptedException e) { throw new CompletionException(e); } } }, lockAcquisitionExecutor).join(); task.run(); } finally { CompletableFuture.runAsync(() -> { for (final LockItem lockItem : lockItems) { lockClient.releaseLock(ReleaseLockOptions.builder(lockItem) .withBestEffort(true) .build()); } }, lockAcquisitionExecutor).join(); } }
@Test void withLockEmptyList() { final Runnable task = mock(Runnable.class); assertThrows(IllegalArgumentException.class, () -> accountLockManager.withLock(Collections.emptyList(), () -> {}, executor)); verify(task, never()).run(); }
public FEELFnResult<BigDecimal> invoke(@ParameterName("from") String from, @ParameterName("grouping separator") String group, @ParameterName("decimal separator") String decimal) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( group != null && !group.equals( " " ) && !group.equals( "." ) && !group.equals( "," ) ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "group", "not a valid one, can only be one of: dot ('.'), comma (','), space (' ') ")); } if ( decimal != null ) { if (!decimal.equals( "." ) && !decimal.equals( "," )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "not a valid one, can only be one of: dot ('.'), comma (',') ")); } else if (group != null && decimal.equals( group )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "cannot be the same as parameter 'group' ")); } } if ( group != null ) { from = from.replaceAll( "\\" + group, "" ); } if ( decimal != null ) { from = from.replaceAll( "\\" + decimal, "." ); } BigDecimal result = NumberEvalHelper.getBigDecimalOrNull(from ); if( from != null && result == null ) { // conversion failed return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to calculate final number result" ) ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeNumberWithGroupAndDecimalChar() { FunctionTestUtil.assertResult(numberFunction.invoke("9 876.124", " ", "."), BigDecimal.valueOf(9876.124)); FunctionTestUtil.assertResult(numberFunction.invoke("9 876 000.124", " ", "."), BigDecimal.valueOf(9876000.124)); FunctionTestUtil.assertResult(numberFunction.invoke("9.876.000,124", ".", ","), BigDecimal.valueOf(9876000.124)); }
public String getQualifiedName() { String column = identifier.getValueWithQuoteCharacters(); if (null != nestedObjectAttributes && !nestedObjectAttributes.isEmpty()) { column = String.join(".", column, nestedObjectAttributes.stream().map(IdentifierValue::getValueWithQuoteCharacters).collect(Collectors.joining("."))); } return null == owner ? column : String.join(".", owner.getIdentifier().getValueWithQuoteCharacters(), column); }
@Test void assertGetQualifiedNameWithOwner() { ColumnSegment actual = new ColumnSegment(0, 0, new IdentifierValue("col")); actual.setOwner(new OwnerSegment(0, 0, new IdentifierValue("tbl"))); assertThat(actual.getQualifiedName(), is("tbl.col")); }
@Override protected LinkedHashMap<String, Callable<? extends ChannelHandler>> getChannelHandlers(MessageInput input) { final LinkedHashMap<String, Callable<? extends ChannelHandler>> handlers = new LinkedHashMap<>(super.getChannelHandlers(input)); // Replace the default "codec-aggregator" handler with one that passes the remote address final RemoteAddressCodecAggregator aggregator = (RemoteAddressCodecAggregator) getAggregator(); handlers.replace("codec-aggregator", () -> new NetflowMessageAggregationHandler(aggregator, localRegistry)); handlers.remove("udp-datagram"); return handlers; }
@Test public void getChildChannelHandlersContainsCustomCodecAggregator() throws Exception { final LinkedHashMap<String, Callable<? extends ChannelHandler>> handlers = transport.getChannelHandlers(mock(MessageInput.class)); assertThat(handlers) .containsKey("codec-aggregator") .doesNotContainKey("udp-datagram"); final ChannelHandler channelHandler = handlers.get("codec-aggregator").call(); assertThat(channelHandler).isInstanceOf(NetflowMessageAggregationHandler.class); }
Collection<OutputFile> compile() { List<OutputFile> out = new ArrayList<>(queue.size() + 1); for (Schema schema : queue) { out.add(compile(schema)); } if (protocol != null) { out.add(compileInterface(protocol)); } return out; }
@Test void logicalTypesWithMultipleFields() throws Exception { Schema logicalTypesWithMultipleFields = new Schema.Parser() .parse(new File("src/test/resources/logical_types_with_multiple_fields.avsc")); assertCompilesWithJavaCompiler(new File(OUTPUT_DIR, "testLogicalTypesWithMultipleFields"), new SpecificCompiler(logicalTypesWithMultipleFields).compile(), true); }
@Override public int compareTo(ID o) { TaskID that = (TaskID)o; int jobComp = this.jobId.compareTo(that.jobId); if(jobComp == 0) { if(this.type == that.type) { return this.id - that.id; } else { return this.type.compareTo(that.type); } } else return jobComp; }
@Test public void testCompareTo() { JobID jobId = new JobID("1234", 1); TaskID taskId1 = new TaskID(jobId, TaskType.REDUCE, 0); TaskID taskId2 = new TaskID(jobId, TaskType.REDUCE, 0); assertEquals("The compareTo() method returned non-zero for two equal " + "task IDs", 0, taskId1.compareTo(taskId2)); taskId2 = new TaskID(jobId, TaskType.MAP, 1); assertTrue("The compareTo() method did not weigh task type more than task " + "ID", taskId1.compareTo(taskId2) > 0); TaskType[] types = TaskType.values(); for (int i = 0; i < types.length; i++) { for (int j = 0; j < types.length; j++) { taskId1 = new TaskID(jobId, types[i], 0); taskId2 = new TaskID(jobId, types[j], 0); if (i == j) { assertEquals("The compareTo() method returned non-zero for two equal " + "task IDs", 0, taskId1.compareTo(taskId2)); } else if (i < j) { assertTrue("The compareTo() method did not order " + types[i] + " before " + types[j], taskId1.compareTo(taskId2) < 0); } else { assertTrue("The compareTo() method did not order " + types[i] + " after " + types[j], taskId1.compareTo(taskId2) > 0); } } } try { taskId1.compareTo(jobId); fail("The compareTo() method allowed comparison to a JobID object"); } catch (ClassCastException ex) { // Expected } try { taskId1.compareTo(null); fail("The compareTo() method allowed comparison to a null object"); } catch (NullPointerException ex) { // Expected } }
RegistryEndpointProvider<Void> committer(URL location) { return new Committer(location); }
@Test public void testCommitter_getHttpMethod() { Assert.assertEquals("PUT", testBlobPusher.committer(mockUrl).getHttpMethod()); }
@Override public <PS extends Serializer<P>, P> KeyValueIterator<K, V> prefixScan(final P prefix, final PS prefixKeySerializer) { Objects.requireNonNull(prefix); Objects.requireNonNull(prefixKeySerializer); final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.prefixScan(prefix, prefixKeySerializer); } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction)); }
@Test public void shouldThrowUnsupportedOperationExceptionWhilePrefixScan() { stubOneUnderlying.put("a", "1"); stubOneUnderlying.put("b", "1"); try (final KeyValueIterator<String, String> keyValueIterator = theStore.prefixScan("a", new StringSerializer())) { assertThrows(UnsupportedOperationException.class, keyValueIterator::remove); } }
@Override public T deserialize(@Nullable byte[] message) throws IOException { if (message == null) { return null; } checkAvroInitialized(); getInputStream().setBuffer(message); Schema writerSchema = schemaCoder.readSchema(getInputStream()); Schema readerSchema = getReaderSchema(); GenericDatumReader<T> datumReader = getDatumReader(); datumReader.setSchema(writerSchema); datumReader.setExpected(readerSchema); if (getEncoding() == AvroEncoding.JSON) { ((JsonDecoder) getDecoder()).configure(getInputStream()); } return datumReader.read(null, getDecoder()); }
@Test void testGenericRecordReadWithCompatibleSchema() throws IOException { RegistryAvroDeserializationSchema<GenericRecord> deserializer = new RegistryAvroDeserializationSchema<>( GenericRecord.class, SchemaBuilder.record("Address") .fields() .requiredString("street") .requiredInt("num") .optionalString("country") .endRecord(), () -> new SchemaCoder() { @Override public Schema readSchema(InputStream in) { return Address.getClassSchema(); } @Override public void writeSchema(Schema schema, OutputStream out) throws IOException { // do nothing } }); GenericRecord genericRecord = deserializer.deserialize(writeRecord(address, Address.getClassSchema())); assertThat(genericRecord.get("num")).isEqualTo(address.getNum()); assertThat(genericRecord.get("street").toString()).isEqualTo(address.getStreet()); assertThat(genericRecord.get("country")).isNull(); }
@Override public long approximateNumEntries() { final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); long total = 0; for (final ReadOnlyKeyValueStore<K, V> store : stores) { total += store.approximateNumEntries(); if (total < 0) { return Long.MAX_VALUE; } } return total; }
@Test public void shouldThrowInvalidStoreExceptionOnApproximateNumEntriesDuringRebalance() { assertThrows(InvalidStateStoreException.class, () -> rebalancing().approximateNumEntries()); }
@Udf(description = "Converts an INT value in degrees to a value in radians") public Double radians( @UdfParameter( value = "value", description = "The value in degrees to convert to radians." ) final Integer value ) { return radians(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNegative() { assertThat(udf.radians(-180.0), closeTo(-Math.PI, 0.000000000000001)); assertThat(udf.radians(-360.0), closeTo(-2 * Math.PI, 0.000000000000001)); assertThat(udf.radians(-70.73163980890013), closeTo(-1.2345, 0.000000000000001)); assertThat(udf.radians(-114), closeTo(-1.9896753472735358, 0.000000000000001)); assertThat(udf.radians(-114L), closeTo(-1.9896753472735358, 0.000000000000001)); }
UuidGenerator loadUuidGenerator() { Class<? extends UuidGenerator> objectFactoryClass = options.getUuidGeneratorClass(); ClassLoader classLoader = classLoaderSupplier.get(); ServiceLoader<UuidGenerator> loader = ServiceLoader.load(UuidGenerator.class, classLoader); if (objectFactoryClass == null) { return loadSingleUuidGeneratorOrDefault(loader); } return loadSelectedUuidGenerator(loader, objectFactoryClass); }
@Test void test_case_7() { Options options = () -> OtherGenerator.class; UuidGeneratorServiceLoader loader = new UuidGeneratorServiceLoader( () -> new ServiceLoaderTestClassLoader(UuidGenerator.class, RandomUuidGenerator.class, IncrementingUuidGenerator.class, OtherGenerator.class, YetAnotherGenerator.class), options); assertThat(loader.loadUuidGenerator(), instanceOf(OtherGenerator.class)); }
@Override public void serialize(Asn1OutputStream out, Integer obj) { out.writeInt(obj); }
@Test public void shouldSerialize() { assertArrayEquals( new byte[] { 0x31, (byte) 0xab, (byte) 0xcd, (byte) 0xef }, serialize(new IntegerConverter(), int.class, 0x31abcdef) ); }
public static String getOwner(FileDescriptor fd) throws IOException { ensureInitialized(); if (Shell.WINDOWS) { String owner = Windows.getOwner(fd); owner = stripDomain(owner); return owner; } else { long uid = POSIX.getUIDforFDOwnerforOwner(fd); CachedUid cUid = uidCache.get(uid); long now = System.currentTimeMillis(); if (cUid != null && (cUid.timestamp + cacheTimeout) > now) { return cUid.username; } String user = POSIX.getUserName(uid); LOG.info("Got UserName " + user + " for UID " + uid + " from the native implementation"); cUid = new CachedUid(user, now); uidCache.put(uid, cUid); return user; } }
@Test (timeout = 30000) public void testMultiThreadedFstat() throws Exception { assumeNotWindows(); final FileOutputStream fos = new FileOutputStream( new File(TEST_DIR, "testfstat")); final AtomicReference<Throwable> thrown = new AtomicReference<Throwable>(); List<Thread> statters = new ArrayList<Thread>(); for (int i = 0; i < 10; i++) { Thread statter = new Thread() { @Override public void run() { long et = Time.now() + 5000; while (Time.now() < et) { try { NativeIO.POSIX.Stat stat = NativeIO.POSIX.getFstat(fos.getFD()); assertEquals(System.getProperty("user.name"), stat.getOwner()); assertNotNull(stat.getGroup()); assertTrue(!stat.getGroup().isEmpty()); assertEquals("Stat mode field should indicate a regular file", S_IFREG, stat.getMode() & S_IFMT); } catch (Throwable t) { thrown.set(t); } } } }; statters.add(statter); statter.start(); } for (Thread t : statters) { t.join(); } fos.close(); if (thrown.get() != null) { throw new RuntimeException(thrown.get()); } }
@Override public MutableAnalysisMetadataHolder setProject(Project project) { checkState(!this.project.isInitialized(), "Project has already been set"); this.project.setProperty(project); return this; }
@Test public void setProject_throws_ISE_when_called_twice() { AnalysisMetadataHolderImpl underTest = new AnalysisMetadataHolderImpl(editionProvider); underTest.setProject(Project.from(newPrivateProjectDto())); assertThatThrownBy(() -> underTest.setProject(Project.from(newPrivateProjectDto()))) .isInstanceOf(IllegalStateException.class) .hasMessage("Project has already been set"); }
long parkTime(long n) { final long proposedShift = n - parkThreshold; final long allowedShift = min(maxShift, proposedShift); return proposedShift > maxShift ? maxParkPeriodNs : proposedShift < maxShift ? minParkPeriodNs << allowedShift : min(minParkPeriodNs << allowedShift, maxParkPeriodNs); }
@Test public void when_proposedShiftLessThanAllowed_then_shiftProposed() { final BackoffIdleStrategy strat = new BackoffIdleStrategy(0, 0, 1, 4); assertEquals(1, strat.parkTime(0)); assertEquals(2, strat.parkTime(1)); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseTest6() { final String str = "Tue Jun 4 16:25:15 +0800 2019"; final DateTime dateTime = DateUtil.parse(str); assert dateTime != null; assertEquals("2019-06-04 16:25:15", dateTime.toString()); }