focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public WsResponse call(WsRequest request) { checkState(!globalMode.isMediumTest(), "No WS call should be made in medium test mode"); WsResponse response = target.wsConnector().call(request); failIfUnauthorized(response); checkAuthenticationWarnings(response); return response; }
@Test public void call_whenInvalidCredentials_shouldFailWithMsg() { WsRequest request = newRequest(); server.stubFor(get(urlEqualTo(URL_ENDPOINT)) .willReturn(aResponse() .withStatus(401) .withBody("Invalid credentials"))); DefaultScannerWsClient client = new DefaultScannerWsClient(wsClient, /* credentials are configured */true, new GlobalAnalysisMode(new ScannerProperties(Collections.emptyMap())), analysisWarnings); assertThatThrownBy(() -> client.call(request)) .isInstanceOf(MessageException.class) .hasMessage("Not authorized. Please check the user token in the property 'sonar.token' or the credentials in the properties 'sonar.login' and 'sonar.password'."); }
public Set<String> emptyLogDirs() { return emptyLogDirs; }
@Test public void testEmptyLogDirsForEmpty() { assertEquals(new HashSet<>(), EMPTY.emptyLogDirs()); }
@SuppressWarnings("java:S2583") public static boolean verify(@NonNull JWKSet jwks, @NonNull JWSObject jws) { if (jwks == null) { throw new IllegalArgumentException("no JWKS provided to verify JWS"); } if (jwks.getKeys() == null || jwks.getKeys().isEmpty()) { return false; } var header = jws.getHeader(); if (!JWSAlgorithm.ES256.equals(header.getAlgorithm())) { throw new UnsupportedOperationException( "only supports ES256, found: " + header.getAlgorithm()); } var key = jwks.getKeyByKeyId(header.getKeyID()); if (key == null) { return false; } try { var processor = new DefaultJWSVerifierFactory(); var verifier = processor.createJWSVerifier(jws.getHeader(), key.toECKey().toPublicKey()); return jws.verify(verifier); } catch (JOSEException e) { throw FederationExceptions.badSignature(e); } }
@Test void verifyUnknownKey() throws ParseException, JOSEException { var signerJwks = new ECKeyGenerator(Curve.P_256).generate(); var jws = toJws(signerJwks, "test").serialize(); jws = tamperSignature(jws); var in = JWSObject.parse(jws); // when & then assertFalse(JwsVerifier.verify(JWKS, in)); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public HistoryInfo get() { return getHistoryInfo(); }
@Test public void testHSDefault() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history/") .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); verifyHSInfo(json.getJSONObject("historyInfo"), appContext); }
@Override public void build(T instance) { super.build(instance); if (check != null) { instance.setCheck(check); } if (init != null) { instance.setInit(init); } if (!StringUtils.isEmpty(generic)) { instance.setGeneric(generic); } if (injvm != null) { instance.setInjvm(injvm); } if (lazy != null) { instance.setLazy(lazy); } if (!StringUtils.isEmpty(reconnect)) { instance.setReconnect(reconnect); } if (sticky != null) { instance.setSticky(sticky); } if (!StringUtils.isEmpty(version)) { instance.setVersion(version); } if (!StringUtils.isEmpty(group)) { instance.setGroup(group); } }
@Test void build() { ReferenceBuilder builder = new ReferenceBuilder(); builder.check(true) .init(false) .generic(true) .injvm(false) .lazy(true) .reconnect("reconnect") .sticky(false) .version("version") .group("group") .id("id"); ReferenceConfig config = builder.build(); ReferenceConfig config2 = builder.build(); Assertions.assertEquals("id", config.getId()); Assertions.assertTrue(config.isCheck()); Assertions.assertFalse(config.isInit()); Assertions.assertTrue(config.isGeneric()); Assertions.assertFalse(config.isInjvm()); Assertions.assertTrue(config.getLazy()); Assertions.assertFalse(config.getSticky()); Assertions.assertEquals("reconnect", config.getReconnect()); Assertions.assertEquals("version", config.getVersion()); Assertions.assertEquals("group", config.getGroup()); Assertions.assertNotSame(config, config2); }
@Nullable public static Field findPropertyField(Class<?> clazz, String fieldName) { Field field; try { field = clazz.getField(fieldName); } catch (NoSuchFieldException e) { return null; } if (!Modifier.isPublic(field.getModifiers()) || Modifier.isStatic(field.getModifiers())) { return null; } return field; }
@Test public void when_findPropertyField_nonExistent_then_returnsNull() { assertNull(findPropertyField(JavaFields.class, "nonExistentField")); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowDistVariablesStatement sqlStatement, final ContextManager contextManager) { ShardingSphereMetaData metaData = contextManager.getMetaDataContexts().getMetaData(); Collection<LocalDataQueryResultRow> result = ConfigurationPropertyKey.getKeyNames().stream() .filter(each -> !ConfigurationPropertyKey.SQL_SHOW.name().equals(each) && !ConfigurationPropertyKey.SQL_SIMPLE.name().equals(each)) .map(each -> new LocalDataQueryResultRow(each.toLowerCase(), getStringResult(metaData.getProps().getValue(ConfigurationPropertyKey.valueOf(each))))).collect(Collectors.toList()); result.addAll(TemporaryConfigurationPropertyKey.getKeyNames().stream() .map(each -> new LocalDataQueryResultRow(each.toLowerCase(), getStringResult(metaData.getTemporaryProps().getValue(TemporaryConfigurationPropertyKey.valueOf(each))))) .collect(Collectors.toList())); result.add(new LocalDataQueryResultRow(DistSQLVariable.CACHED_CONNECTIONS.name().toLowerCase(), connectionContext.getConnectionSize())); addLoggingPropsRows(metaData, result); if (sqlStatement.getLikePattern().isPresent()) { String pattern = RegexUtils.convertLikePatternToRegex(sqlStatement.getLikePattern().get()); result = result.stream().filter(each -> Pattern.compile(pattern, Pattern.CASE_INSENSITIVE).matcher((String) each.getCell(1)).matches()).collect(Collectors.toList()); } return result.stream().sorted(Comparator.comparing(each -> each.getCell(1).toString())).collect(Collectors.toList()); }
@Test void assertExecuteWithLike() { when(contextManager.getMetaDataContexts().getMetaData().getProps()).thenReturn(new ConfigurationProperties(PropertiesBuilder.build(new Property("system-log-level", "INFO")))); when(contextManager.getMetaDataContexts().getMetaData().getTemporaryProps()) .thenReturn(new TemporaryConfigurationProperties(PropertiesBuilder.build(new Property("proxy-meta-data-collector-enabled", Boolean.FALSE.toString())))); when(contextManager.getMetaDataContexts().getMetaData().getGlobalRuleMetaData()) .thenReturn(new RuleMetaData(Collections.singleton(new LoggingRule(new DefaultLoggingRuleConfigurationBuilder().build())))); ShowDistVariablesExecutor executor = new ShowDistVariablesExecutor(); executor.setConnectionContext(new DistSQLConnectionContext(mock(QueryContext.class), 1, mock(DatabaseType.class), mock(DatabaseConnectionManager.class), mock(ExecutorStatementManager.class))); Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowDistVariablesStatement("sql_%"), contextManager); assertThat(actual.size(), is(2)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); assertThat(iterator.next().getCell(1), is("sql_show")); assertThat(iterator.next().getCell(1), is("sql_simple")); }
@Override public boolean hasNext() { synchronized (lock) { try { return parser.peek() != JsonToken.END_DOCUMENT; } catch (MalformedJsonException e) { throw new JsonSyntaxException(e); } catch (IOException e) { throw new JsonIOException(e); } } }
@Test public void testIncompleteInput() { JsonStreamParser parser = new JsonStreamParser("["); assertThat(parser.hasNext()).isTrue(); assertThrows(JsonSyntaxException.class, parser::next); }
@Override public @Nullable State waitUntilFinish() { return waitUntilFinish(Duration.millis(-1)); }
@Test public void testWaitUntilFinishNoRepeatedLogs() throws Exception { DataflowPipelineJob job = new DataflowPipelineJob(mockDataflowClient, JOB_ID, options, null); Sleeper sleeper = new ZeroSleeper(); NanoClock nanoClock = mock(NanoClock.class); Instant separatingTimestamp = new Instant(42L); JobMessage theMessage = infoMessage(separatingTimestamp, "nothing"); MonitoringUtil mockMonitor = mock(MonitoringUtil.class); when(mockMonitor.getJobMessages(anyString(), anyLong())) .thenReturn(ImmutableList.of(theMessage)); // The Job just always reports "running" across all calls Job fakeJob = new Job(); fakeJob.setCurrentState("JOB_STATE_RUNNING"); when(mockDataflowClient.getJob(anyString())).thenReturn(fakeJob); // After waitUntilFinish the DataflowPipelineJob should record the latest message timestamp when(nanoClock.nanoTime()).thenReturn(0L).thenReturn(2000000000L); job.waitUntilFinish(Duration.standardSeconds(1), mockHandler, sleeper, nanoClock, mockMonitor); verify(mockHandler).process(ImmutableList.of(theMessage)); // Second waitUntilFinish should request jobs with `separatingTimestamp` so the monitor // will only return new messages when(nanoClock.nanoTime()).thenReturn(3000000000L).thenReturn(6000000000L); job.waitUntilFinish(Duration.standardSeconds(1), mockHandler, sleeper, nanoClock, mockMonitor); verify(mockMonitor).getJobMessages(anyString(), eq(separatingTimestamp.getMillis())); }
public long getMaxSize() { return maxSize; }
@Test public void fileMaxSizeTest() { FileSegment fileSegment = new PosixFileSegment( storeConfig, FileSegmentType.COMMIT_LOG, MessageStoreUtil.toFilePath(mq), 100L); Assert.assertEquals(storeConfig.getTieredStoreCommitLogMaxSize(), fileSegment.getMaxSize()); fileSegment.destroyFile(); fileSegment = new PosixFileSegment( storeConfig, FileSegmentType.CONSUME_QUEUE, MessageStoreUtil.toFilePath(mq), 100L); Assert.assertEquals(storeConfig.getTieredStoreConsumeQueueMaxSize(), fileSegment.getMaxSize()); fileSegment.destroyFile(); fileSegment = new PosixFileSegment( storeConfig, FileSegmentType.INDEX, MessageStoreUtil.toFilePath(mq), 100L); Assert.assertEquals(Long.MAX_VALUE, fileSegment.getMaxSize()); fileSegment.destroyFile(); }
public static Map<String, String> getParamsFromFileStoreInfo(FileStoreInfo fsInfo) { Map<String, String> params = new HashMap<>(); switch (fsInfo.getFsType()) { case S3: S3FileStoreInfo s3FileStoreInfo = fsInfo.getS3FsInfo(); params.put(CloudConfigurationConstants.AWS_S3_REGION, s3FileStoreInfo.getRegion()); params.put(CloudConfigurationConstants.AWS_S3_ENDPOINT, s3FileStoreInfo.getEndpoint()); if (s3FileStoreInfo.getPartitionedPrefixEnabled()) { // Don't show the parameters if not enabled. params.put(CloudConfigurationConstants.AWS_S3_ENABLE_PARTITIONED_PREFIX, Boolean.toString(true)); params.put(CloudConfigurationConstants.AWS_S3_NUM_PARTITIONED_PREFIX, Integer.toString(s3FileStoreInfo.getNumPartitionedPrefix())); } AwsCredentialInfo credentialInfo = s3FileStoreInfo.getCredential(); if (credentialInfo.hasSimpleCredential()) { params.put(CloudConfigurationConstants.AWS_S3_USE_INSTANCE_PROFILE, "false"); params.put(CloudConfigurationConstants.AWS_S3_USE_AWS_SDK_DEFAULT_BEHAVIOR, "false"); params.put(CloudConfigurationConstants.AWS_S3_ACCESS_KEY, credentialInfo.getSimpleCredential().getAccessKey()); params.put(CloudConfigurationConstants.AWS_S3_SECRET_KEY, credentialInfo.getSimpleCredential().getAccessKeySecret()); } else if (credentialInfo.hasAssumeRoleCredential()) { params.put(CloudConfigurationConstants.AWS_S3_USE_INSTANCE_PROFILE, "true"); params.put(CloudConfigurationConstants.AWS_S3_USE_AWS_SDK_DEFAULT_BEHAVIOR, "false"); params.put(CloudConfigurationConstants.AWS_S3_IAM_ROLE_ARN, credentialInfo.getAssumeRoleCredential().getIamRoleArn()); params.put(CloudConfigurationConstants.AWS_S3_EXTERNAL_ID, credentialInfo.getAssumeRoleCredential().getExternalId()); } else if (credentialInfo.hasProfileCredential()) { params.put(CloudConfigurationConstants.AWS_S3_USE_INSTANCE_PROFILE, "true"); params.put(CloudConfigurationConstants.AWS_S3_USE_AWS_SDK_DEFAULT_BEHAVIOR, "false"); } else if (credentialInfo.hasDefaultCredential()) { params.put(CloudConfigurationConstants.AWS_S3_USE_AWS_SDK_DEFAULT_BEHAVIOR, "true"); } return params; case HDFS: HDFSFileStoreInfo hdfsFileStoreInfo = fsInfo.getHdfsFsInfo(); params.putAll(hdfsFileStoreInfo.getConfigurationMap()); String userName = hdfsFileStoreInfo.getUsername(); if (!Strings.isNullOrEmpty(userName)) { params.put(CloudConfigurationConstants.HDFS_USERNAME_DEPRECATED, userName); } return params; case AZBLOB: AzBlobFileStoreInfo azBlobFileStoreInfo = fsInfo.getAzblobFsInfo(); params.put(CloudConfigurationConstants.AZURE_BLOB_ENDPOINT, azBlobFileStoreInfo.getEndpoint()); AzBlobCredentialInfo azBlobcredentialInfo = azBlobFileStoreInfo.getCredential(); String sharedKey = azBlobcredentialInfo.getSharedKey(); if (!Strings.isNullOrEmpty(sharedKey)) { params.put(CloudConfigurationConstants.AZURE_BLOB_SHARED_KEY, sharedKey); } String sasToken = azBlobcredentialInfo.getSasToken(); if (!Strings.isNullOrEmpty(sasToken)) { params.put(CloudConfigurationConstants.AZURE_BLOB_SAS_TOKEN, sasToken); } return params; default: return params; } }
@Test public void testGetParamsFromFileStoreInfo() { AwsCredentialInfo.Builder awsCredBuilder = AwsCredentialInfo.newBuilder(); awsCredBuilder.getSimpleCredentialBuilder() .setAccessKey("ak") .setAccessKeySecret("sk") .build(); FileStoreInfo.Builder fsInfoBuilder = FileStoreInfo.newBuilder(); fsInfoBuilder.getS3FsInfoBuilder() .setBucket("bucket") .setEndpoint("endpoint") .setRegion("region") .setCredential(awsCredBuilder); fsInfoBuilder.setFsKey("0") .setFsType(FileStoreType.S3) .addLocations("s3://bucket"); { FileStoreInfo fs = fsInfoBuilder.build(); Map<String, String> params = StorageVolume.getParamsFromFileStoreInfo(fs); Assert.assertFalse(params.containsKey(CloudConfigurationConstants.AWS_S3_ENABLE_PARTITIONED_PREFIX)); Assert.assertFalse(params.containsKey(CloudConfigurationConstants.AWS_S3_NUM_PARTITIONED_PREFIX)); } fsInfoBuilder.getS3FsInfoBuilder() .setPartitionedPrefixEnabled(true) .setNumPartitionedPrefix(32); { FileStoreInfo fs = fsInfoBuilder.build(); Map<String, String> params = StorageVolume.getParamsFromFileStoreInfo(fs); Assert.assertTrue(params.containsKey(CloudConfigurationConstants.AWS_S3_ENABLE_PARTITIONED_PREFIX)); Assert.assertTrue(params.containsKey(CloudConfigurationConstants.AWS_S3_NUM_PARTITIONED_PREFIX)); Assert.assertEquals("32", params.get(CloudConfigurationConstants.AWS_S3_NUM_PARTITIONED_PREFIX)); } // It's OK to have trailing '/' after bucket name fsInfoBuilder.addLocations("s3://bucket/"); { FileStoreInfo fs = fsInfoBuilder.build(); ExceptionChecker.expectThrowsNoException(() -> StorageVolume.fromFileStoreInfo(fs)); } // can't have more after bucket name fsInfoBuilder.addLocations("s3://bucket/abc"); { FileStoreInfo fs = fsInfoBuilder.build(); Assert.assertThrows(DdlException.class, () -> StorageVolume.fromFileStoreInfo(fs)); } }
protected void revertImmutableChanges(PersistentVolumeClaim current, PersistentVolumeClaim desired) { desired.getSpec().setVolumeName(current.getSpec().getVolumeName()); desired.getSpec().setStorageClassName(current.getSpec().getStorageClassName()); desired.getSpec().setAccessModes(current.getSpec().getAccessModes()); desired.getSpec().setSelector(current.getSpec().getSelector()); desired.getSpec().setDataSource(current.getSpec().getDataSource()); }
@Test public void testRevertingImmutableFields() { PersistentVolumeClaim desired = new PersistentVolumeClaimBuilder() .withNewMetadata() .withName("my-pvc") .withNamespace("my-namespace") .endMetadata() .withNewSpec() .withNewResources() .withRequests(Collections.singletonMap("storage", new Quantity("100", null))) .endResources() .endSpec() .build(); PersistentVolumeClaim current = new PersistentVolumeClaimBuilder() .withNewMetadata() .withName("my-pvc") .withNamespace("my-namespace") .endMetadata() .withNewSpec() .withAccessModes("ReadWriteOnce") .withNewResources() .withRequests(Collections.singletonMap("storage", new Quantity("10", null))) .endResources() .withStorageClassName("my-storage-class") .withSelector(new LabelSelector(null, Collections.singletonMap("key", "label"))) .withVolumeName("pvc-ce9ebf52-435a-11e9-8fbc-06b5ff7c7748") .endSpec() .build(); PvcOperator op = createResourceOperations(vertx, mock(KubernetesClient.class)); op.revertImmutableChanges(current, desired); assertThat(current.getSpec().getStorageClassName(), is(desired.getSpec().getStorageClassName())); assertThat(current.getSpec().getAccessModes(), is(desired.getSpec().getAccessModes())); assertThat(current.getSpec().getSelector(), is(desired.getSpec().getSelector())); assertThat(current.getSpec().getVolumeName(), is(desired.getSpec().getVolumeName())); }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertCompressedStreamMessageToAmqpMessageWithAmqpSequencey() throws Exception { ActiveMQStreamMessage outbound = createStreamMessage(true); outbound.setShortProperty(JMS_AMQP_ORIGINAL_ENCODING, AMQP_SEQUENCE); outbound.writeBoolean(false); outbound.writeString("test"); outbound.onSend(); outbound.storeContent(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof AmqpSequence); assertTrue(((AmqpSequence) amqp.getBody()).getValue() instanceof List); @SuppressWarnings("unchecked") List<Object> amqpList = ((AmqpSequence) amqp.getBody()).getValue(); assertEquals(2, amqpList.size()); }
@Override @SuppressFBWarnings(value = "EI_EXPOSE_REP") public KsqlConfig getKsqlConfig() { return ksqlConfig; }
@Test public void shouldUseFirstPolledConfig() { // Given: addPollResult(KafkaConfigStore.CONFIG_MSG_KEY, savedProperties, badProperties); expectRead(consumerBefore); // When: final KsqlConfig mergedConfig = getKsqlConfig(); // Then: verifyMergedConfig(mergedConfig); }
@Override public ValidationResult getConfigurationValidationResultFromResponseBody(String responseBody) { return new JSONResultMessageHandler().toValidationResult(responseBody); }
@Test public void getConfigurationValidationResultFromResponseBody_shouldDeserializeJsonToValidationResult() { final ArtifactMessageConverterV2 converter = new ArtifactMessageConverterV2(); String responseBody = "[{\"message\":\"Url must not be blank.\",\"key\":\"Url\"},{\"message\":\"SearchBase must not be blank.\",\"key\":\"SearchBase\"}]"; ValidationResult validationResult = converter.getConfigurationValidationResultFromResponseBody(responseBody); assertThat(validationResult.isSuccessful(), is(false)); assertThat(validationResult.getErrors(), containsInAnyOrder( new ValidationError("Url", "Url must not be blank."), new ValidationError("SearchBase", "SearchBase must not be blank.") )); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildOrderByStreamMergedResultWithMySQLLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "MySQL")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class)); MySQLSelectStatement selectStatement = (MySQLSelectStatement) buildSelectStatement(new MySQLSelectStatement()); selectStatement.setOrderBy(new OrderBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralLimitValueSegment(0, 0, 1L), null)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(LimitDecoratorMergedResult.class)); assertThat(((LimitDecoratorMergedResult) actual).getMergedResult(), instanceOf(OrderByStreamMergedResult.class)); }
public static void rename(FileSystem fs, String oldName, String newName) throws IOException { Path oldDir = new Path(oldName); Path newDir = new Path(newName); if (!fs.rename(oldDir, newDir)) { throw new IOException("Could not rename " + oldDir + " to " + newDir); } }
@Test public void testRenameWithFalse() { final String ERROR_MESSAGE = "Could not rename"; final String NEW_FILE_NAME = "test-new.mapfile"; final String OLD_FILE_NAME = "test-old.mapfile"; MapFile.Writer writer = null; try { FileSystem fs = FileSystem.getLocal(conf); FileSystem spyFs = spy(fs); writer = createWriter(OLD_FILE_NAME, IntWritable.class, IntWritable.class); writer.close(); Path oldDir = new Path(TEST_DIR, OLD_FILE_NAME); Path newDir = new Path(TEST_DIR, NEW_FILE_NAME); when(spyFs.rename(oldDir, newDir)).thenReturn(false); MapFile.rename(spyFs, oldDir.toString(), newDir.toString()); fail("testRenameWithException no exception error !!!"); } catch (IOException ex) { assertTrue("testRenameWithFalse invalid IOExceptionMessage error !!!", ex .getMessage().startsWith(ERROR_MESSAGE)); } finally { IOUtils.cleanupWithLogger(LOG, writer); } }
@Override public byte readByte() throws EOFException { if (availableLong() < 1) { throw new EOFException(); } return _dataBuffer.getByte(_currentOffset++); }
@Test void testReadByte() throws EOFException { int read = _dataBufferPinotInputStream.readByte(); assertEquals(read, _byteBuffer.get(0) & 0xFF); assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), 1); }
public Path makeQualified(FileSystem fs) { Path path = this; if (!isAbsolute()) { path = new Path(fs.getWorkingDirectory(), this); } final URI pathUri = path.toUri(); final URI fsUri = fs.getUri(); String scheme = pathUri.getScheme(); String authority = pathUri.getAuthority(); if (scheme != null && (authority != null || fsUri.getAuthority() == null)) { return path; } if (scheme == null) { scheme = fsUri.getScheme(); } if (authority == null) { authority = fsUri.getAuthority(); if (authority == null) { authority = ""; } } return new Path(scheme + ":" + "//" + authority + pathUri.getPath()); }
@Test void testMakeQualified() throws IOException { // make relative path qualified String path = "test/test"; Path p = new Path(path).makeQualified(FileSystem.getLocalFileSystem()); URI u = p.toUri(); assertThat(u.getScheme()).isEqualTo("file"); assertThat(u.getAuthority()).isNull(); String q = new Path(FileSystem.getLocalFileSystem().getWorkingDirectory().getPath(), path) .getPath(); assertThat(u.getPath()).isEqualTo(q); // make absolute path qualified path = "/test/test"; p = new Path(path).makeQualified(FileSystem.getLocalFileSystem()); u = p.toUri(); assertThat(u.getScheme()).isEqualTo("file"); assertThat(u.getAuthority()).isNull(); assertThat(u.getPath()).isEqualTo(path); }
@Override public SmsSendRespDTO sendSms(Long sendLogId, String mobile, String apiTemplateId, List<KeyValue<String, Object>> templateParams) throws Throwable { // 1. 执行请求 // 参考链接 https://cloud.tencent.com/document/product/382/55981 TreeMap<String, Object> body = new TreeMap<>(); body.put("PhoneNumberSet", new String[]{mobile}); body.put("SmsSdkAppId", getSdkAppId()); body.put("SignName", properties.getSignature()); body.put("TemplateId",apiTemplateId); body.put("TemplateParamSet", ArrayUtils.toArray(templateParams, param -> String.valueOf(param.getValue()))); JSONObject response = request("SendSms", body); // 2. 解析请求 JSONObject responseResult = response.getJSONObject("Response"); JSONObject error = responseResult.getJSONObject("Error"); if (error != null) { return new SmsSendRespDTO().setSuccess(false) .setApiRequestId(responseResult.getStr("RequestId")) .setApiCode(error.getStr("Code")) .setApiMsg(error.getStr("Message")); } JSONObject responseData = responseResult.getJSONArray("SendStatusSet").getJSONObject(0); return new SmsSendRespDTO().setSuccess(Objects.equals(API_CODE_SUCCESS, responseData.getStr("Code"))) .setApiRequestId(responseResult.getStr("RequestId")) .setSerialNo(responseData.getStr("SerialNo")) .setApiMsg(responseData.getStr("Message")); }
@Test public void testDoSendSms_success() throws Throwable { try (MockedStatic<HttpUtils> httpUtilsMockedStatic = mockStatic(HttpUtils.class)) { // 准备参数 Long sendLogId = randomLongId(); String mobile = randomString(); String apiTemplateId = randomString(); List<KeyValue<String, Object>> templateParams = Lists.newArrayList( new KeyValue<>("1", 1234), new KeyValue<>("2", "login")); // mock 方法 httpUtilsMockedStatic.when(() -> HttpUtils.post(anyString(), anyMap(), anyString())) .thenReturn("{\n" + " \"Response\": {\n" + " \"SendStatusSet\": [\n" + " {\n" + " \"SerialNo\": \"5000:1045710669157053657849499619\",\n" + " \"PhoneNumber\": \"+8618511122233\",\n" + " \"Fee\": 1,\n" + " \"SessionContext\": \"test\",\n" + " \"Code\": \"Ok\",\n" + " \"Message\": \"send success\",\n" + " \"IsoCode\": \"CN\"\n" + " },\n" + " ],\n" + " \"RequestId\": \"a0aabda6-cf91-4f3e-a81f-9198114a2279\"\n" + " }\n" + "}"); // 调用 SmsSendRespDTO result = smsClient.sendSms(sendLogId, mobile, apiTemplateId, templateParams); // 断言 assertTrue(result.getSuccess()); assertEquals("5000:1045710669157053657849499619", result.getSerialNo()); assertEquals("a0aabda6-cf91-4f3e-a81f-9198114a2279", result.getApiRequestId()); assertEquals("send success", result.getApiMsg()); } }
@Override public void revert(final Path file) throws BackgroundException { if(file.isFile()) { try { final S3Object destination = new S3Object(containerService.getKey(file)); // Keep same storage class destination.setStorageClass(file.attributes().getStorageClass()); final Encryption.Algorithm encryption = file.attributes().getEncryption(); destination.setServerSideEncryptionAlgorithm(encryption.algorithm); // Set custom key id stored in KMS destination.setServerSideEncryptionKmsKeyId(encryption.key); try { // Apply non standard ACL final Acl list = acl.getPermission(file); if(list.isEditable()) { destination.setAcl(acl.toAcl(list)); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Ignore failure %s", e)); } final Path bucket = containerService.getContainer(file); final String bucketname = bucket.isRoot() ? RequestEntityRestStorageService.findBucketInHostname(session.getHost()) : bucket.getName(); session.getClient().copyVersionedObject(file.attributes().getVersionId(), bucketname, containerService.getKey(file), bucketname, destination, false); if(file.getParent().attributes().getCustom().containsKey(S3VersionedObjectListService.KEY_DELETE_MARKER)) { // revert placeholder session.getClient().deleteVersionedObject( file.getParent().attributes().getVersionId(), bucketname, containerService.getKey(file.getParent())); } } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot revert file", e, file); } } }
@Test public void testRevert() throws Exception { final Path bucket = new Path("versioning-test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final Path directory = new S3DirectoryFeature(session, new S3WriteFeature(session, acl), acl).mkdir(new Path( bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path test = new S3TouchFeature(session, acl).touch(new Path(directory, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path ignored = new S3TouchFeature(session, acl).touch(new Path(directory, String.format("%s-2", test.getName()), EnumSet.of(Path.Type.file)), new TransferStatus()); { // Make sure there is another versioned copy of a file not to be included when listing final byte[] content = RandomUtils.nextBytes(245); final TransferStatus status = new TransferStatus().withLength(content.length); final S3MultipartWriteFeature writer = new S3MultipartWriteFeature(session, acl); final HttpResponseOutputStream<StorageObject> out = writer.write(ignored, status, new DisabledConnectionCallback()); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); } final PathAttributes initialAttributes = new PathAttributes(test.attributes()); final String initialVersion = test.attributes().getVersionId(); final byte[] content = RandomUtils.nextBytes(32769); final TransferStatus status = new TransferStatus(); status.setLength(content.length); final S3MultipartWriteFeature writer = new S3MultipartWriteFeature(session, acl); final HttpResponseOutputStream<StorageObject> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); final PathAttributes updated = new S3AttributesFinderFeature(session, acl).find(new Path(test).withAttributes(PathAttributes.EMPTY)); assertNotEquals(initialVersion, updated.getVersionId()); final S3VersioningFeature feature = new S3VersioningFeature(session, acl); { final AttributedList<Path> versions = feature.list(new Path(test).withAttributes(status.getResponse()), new DisabledListProgressListener()); assertFalse(versions.isEmpty()); assertEquals(1, versions.size()); assertEquals(new Path(test).withAttributes(initialAttributes), versions.get(0)); assertTrue(new S3FindFeature(session, acl).find(versions.get(0))); assertEquals(initialVersion, new S3AttributesFinderFeature(session, acl).find(versions.get(0)).getVersionId()); } feature.revert(new Path(test).withAttributes(initialAttributes)); final PathAttributes reverted = new S3AttributesFinderFeature(session, acl).find(new Path(test).withAttributes(PathAttributes.EMPTY)); assertNotEquals(initialVersion, reverted.getVersionId()); assertEquals(test.attributes().getSize(), reverted.getSize()); { final AttributedList<Path> versions = feature.list(test, new DisabledListProgressListener()); assertFalse(versions.isEmpty()); assertEquals(2, versions.size()); assertEquals(content.length, versions.get(0).attributes().getSize()); assertEquals(updated.getVersionId(), versions.get(0).attributes().getVersionId()); assertEquals(initialVersion, versions.get(1).attributes().getVersionId()); } new S3DefaultDeleteFeature(session).delete(Arrays.asList(directory, test, ignored), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static CharSequence trimOws(CharSequence value) { final int length = value.length(); if (length == 0) { return value; } int start = indexOfFirstNonOwsChar(value, length); int end = indexOfLastNonOwsChar(value, start, length); return start == 0 && end == length - 1 ? value : value.subSequence(start, end + 1); }
@Test public void trimOws() { assertSame("", StringUtil.trimOws("")); assertEquals("", StringUtil.trimOws(" \t ")); assertSame("a", StringUtil.trimOws("a")); assertEquals("a", StringUtil.trimOws(" a")); assertEquals("a", StringUtil.trimOws("a ")); assertEquals("a", StringUtil.trimOws(" a ")); assertSame("abc", StringUtil.trimOws("abc")); assertEquals("abc", StringUtil.trimOws("\tabc")); assertEquals("abc", StringUtil.trimOws("abc\t")); assertEquals("abc", StringUtil.trimOws("\tabc\t")); assertSame("a\t b", StringUtil.trimOws("a\t b")); assertEquals("", StringUtil.trimOws("\t ").toString()); assertEquals("a b", StringUtil.trimOws("\ta b \t").toString()); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException{ PluginRiskConsent riskConsent = PluginRiskConsent.valueOf(config.get(PLUGINS_RISK_CONSENT).orElse(NOT_ACCEPTED.name())); if (userSession.hasSession() && userSession.isLoggedIn() && userSession.isSystemAdministrator() && riskConsent == REQUIRED) { redirectTo(response, request.getContextPath() + PLUGINS_RISK_CONSENT_PATH); } chain.doFilter(request, response); }
@Test public void doFilter_givenLoggedInAdminAndConsentRequired_redirect() throws Exception { PluginsRiskConsentFilter consentFilter = new PluginsRiskConsentFilter(configuration, userSession); when(userSession.hasSession()).thenReturn(true); when(userSession.isLoggedIn()).thenReturn(true); when(userSession.isSystemAdministrator()).thenReturn(true); consentFilter.doFilter(request, response, chain); verify(response, times(1)).sendRedirect(Mockito.anyString()); }
@Override public void start() { this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder() .setDaemon(true) .setNameFormat(getClass().getCanonicalName() + "-thread-%d") .build()); for (MonitoringTask task : monitoringTasks) { scheduledExecutorService.scheduleWithFixedDelay(task, task.getDelay(), task.getPeriod(), MILLISECONDS); } }
@Test public void start_givenTwoTasks_callsGetsDelayAndPeriodFromTasks() { underTest.start(); verify(task1, times(1)).getDelay(); verify(task1, times(1)).getPeriod(); verify(task2, times(1)).getDelay(); verify(task2, times(1)).getPeriod(); }
@Override protected InputStream openObjectInputStream( long position, int bytesToRead) throws IOException { OSSObject object; try { GetObjectRequest getObjectRequest = new GetObjectRequest(mBucketName, mPath); getObjectRequest.setRange(position, position + bytesToRead - 1); object = mClient.getObject(getObjectRequest); } catch (OSSException e) { String errorMessage = String .format("Failed to get object: %s bucket: %s", mPath, mBucketName); throw new IOException(errorMessage, e); } return object.getObjectContent(); }
@Test public void openObjectInputStream() throws Exception { OSSObject object = Mockito.mock(OSSObject.class); InputStream inputStream = Mockito.mock(InputStream.class); Mockito.when(mClient.getObject(ArgumentMatchers.any( GetObjectRequest.class))).thenReturn(object); Mockito.when(object.getObjectContent()).thenReturn(inputStream); // test successful open object input stream long position = 0L; int bytesToRead = 10; Object objectInputStream = mOSSPositionReader.openObjectInputStream(position, bytesToRead); Assert.assertTrue(objectInputStream instanceof InputStream); // test open object input stream with exception Mockito.when(mClient.getObject(ArgumentMatchers.any(GetObjectRequest.class))) .thenThrow(OSSException.class); try { mOSSPositionReader.openObjectInputStream(position, bytesToRead); } catch (Exception e) { Assert.assertTrue(e instanceof IOException); String errorMessage = String .format("Failed to get object: %s bucket: %s", mPath, mBucketName); Assert.assertEquals(errorMessage, e.getMessage()); } }
public static void trace(Throwable e) { traceContext(e, ContextUtil.getContext()); }
@Test public void testTraceWhenContextSizeExceedsThreshold() { int i = 0; for (; i < Constants.MAX_CONTEXT_NAME_SIZE; i++) { ContextUtil.enter("test-context-" + i); ContextUtil.exit(); } try { ContextUtil.enter("test-context-" + i); throw new RuntimeException("test"); } catch (Exception e) { Tracer.trace(e); } finally { ContextUtil.exit(); } }
public synchronized void clear() { this.buckets = new long[bucketType.getNumBuckets()]; this.numBoundedBucketRecords = 0; this.numTopRecords = 0; this.topRecordsSum = 0; this.numBottomRecords = 0; this.bottomRecordsSum = 0; this.mean = 0; this.sumOfSquaredDeviations = 0; }
@Test public void testClear() { HistogramData histogramData = HistogramData.linear(0, 0.2, 50); histogramData.record(-1, 1, 2, 3); assertThat(histogramData.getTotalCount(), equalTo(4L)); assertThat(histogramData.getCount(5), equalTo(1L)); histogramData.clear(); assertThat(histogramData.getTotalCount(), equalTo(0L)); assertThat(histogramData.getCount(5), equalTo(0L)); }
ContainerLaunchContext setupApplicationMasterContainer( String yarnClusterEntrypoint, boolean hasKrb5, JobManagerProcessSpec processSpec) { // ------------------ Prepare Application Master Container ------------------------------ // respect custom JVM options in the YAML file List<ConfigOption<String>> jvmOptions = Arrays.asList( CoreOptions.FLINK_DEFAULT_JVM_OPTIONS, CoreOptions.FLINK_JVM_OPTIONS, CoreOptions.FLINK_DEFAULT_JM_JVM_OPTIONS, CoreOptions.FLINK_JM_JVM_OPTIONS); String javaOpts = Utils.generateJvmOptsString(flinkConfiguration, jvmOptions, hasKrb5); // Set up the container launch context for the application master ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class); final Map<String, String> startCommandValues = new HashMap<>(); startCommandValues.put("java", "$JAVA_HOME/bin/java"); String jvmHeapMem = JobManagerProcessUtils.generateJvmParametersStr(processSpec, flinkConfiguration); startCommandValues.put("jvmmem", jvmHeapMem); startCommandValues.put("jvmopts", javaOpts); startCommandValues.put( "logging", YarnLogConfigUtil.getLoggingYarnCommand(flinkConfiguration)); startCommandValues.put("class", yarnClusterEntrypoint); startCommandValues.put( "redirects", "1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " + "2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err"); String dynamicParameterListStr = JobManagerProcessUtils.generateDynamicConfigsStr(processSpec); startCommandValues.put("args", dynamicParameterListStr); final String commandTemplate = flinkConfiguration.get(YARN_CONTAINER_START_COMMAND_TEMPLATE); final String amCommand = getStartCommand(commandTemplate, startCommandValues); amContainer.setCommands(Collections.singletonList(amCommand)); LOG.debug("Application Master start command: " + amCommand); return amContainer; }
@Test void testSetupApplicationMasterContainer() { Configuration cfg = new Configuration(); YarnClusterDescriptor clusterDescriptor = createYarnClusterDescriptor(cfg); final JobManagerProcessSpec jobManagerProcessSpec = createDefaultJobManagerProcessSpec(1024); final String java = "$JAVA_HOME/bin/java"; final String jvmmem = JobManagerProcessUtils.generateJvmParametersStr(jobManagerProcessSpec, cfg); final String dynamicParameters = JobManagerProcessUtils.generateDynamicConfigsStr(jobManagerProcessSpec); final String defaultJvmOpts = "-DdefaultJvm"; // if set final String jvmOpts = "-Djvm"; // if set final String defaultJmJvmOpts = "-DdefaultJmJvm"; // if set final String jmJvmOpts = "-DjmJvm"; // if set final String krb5 = "-Djava.security.krb5.conf=krb5.conf"; final String logfile = "-Dlog.file=\"" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.log\""; // if set final String logback = "-Dlogback.configurationFile=file:" + YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME; // if set final String log4j = "-Dlog4j.configuration=file:" + YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME + " -Dlog4j.configurationFile=file:" + YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME; // if set final String mainClass = clusterDescriptor.getYarnSessionClusterEntrypoint(); final String redirects = "1> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.out " + "2> " + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/jobmanager.err"; try { // no logging, with/out krb5 assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, false, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, mainClass, dynamicParameters, redirects)); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, mainClass, dynamicParameters, redirects)); // logback only, with/out krb5 cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, false, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, logfile, logback, mainClass, dynamicParameters, redirects)); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, logfile, logback, mainClass, dynamicParameters, redirects)); // log4j, with/out krb5 cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, false, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, logfile, log4j, mainClass, dynamicParameters, redirects)); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, logfile, log4j, mainClass, dynamicParameters, redirects)); // logback, with/out krb5 cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, false, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, logfile, logback, mainClass, dynamicParameters, redirects)); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, logfile, logback, mainClass, dynamicParameters, redirects)); // logback, with/out krb5, different JVM opts // IMPORTANT: Be aware that we are using side effects here to modify the created // YarnClusterDescriptor, // because we have a reference to the ClusterDescriptor's configuration which we modify // continuously cfg.set(CoreOptions.FLINK_DEFAULT_JVM_OPTIONS, defaultJvmOpts); cfg.set(CoreOptions.FLINK_JVM_OPTIONS, jvmOpts); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, false, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, defaultJvmOpts, jvmOpts, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, logfile, logback, mainClass, dynamicParameters, redirects)); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, defaultJvmOpts, jvmOpts, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, logfile, logback, mainClass, dynamicParameters, redirects)); // log4j, with/out krb5, different JVM opts // IMPORTANT: Be aware that we are using side effects here to modify the created // YarnClusterDescriptor cfg.set(CoreOptions.FLINK_DEFAULT_JM_JVM_OPTIONS, defaultJmJvmOpts); cfg.set(CoreOptions.FLINK_JM_JVM_OPTIONS, jmJvmOpts); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, false, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, defaultJvmOpts, jvmOpts, defaultJmJvmOpts, jmJvmOpts, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, logfile, log4j, mainClass, dynamicParameters, redirects)); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOG4J_NAME); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, jvmmem, defaultJvmOpts, jvmOpts, defaultJmJvmOpts, jmJvmOpts, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, logfile, log4j, mainClass, dynamicParameters, redirects)); // now try some configurations with different yarn.container-start-command-template // IMPORTANT: Be aware that we are using side effects here to modify the created // YarnClusterDescriptor cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); cfg.set( YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% 1 %jvmmem% 2 %jvmopts% 3 %logging% 4 %class% 5 %args% 6 %redirects%"); assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, "1", jvmmem, "2", defaultJvmOpts, jvmOpts, defaultJmJvmOpts, jmJvmOpts, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, "3", logfile, logback, "4", mainClass, "5", dynamicParameters, "6", redirects)); cfg.set( YarnConfigOptionsInternal.APPLICATION_LOG_CONFIG_FILE, YarnLogConfigUtil.CONFIG_FILE_LOGBACK_NAME); cfg.set( YARN_CONTAINER_START_COMMAND_TEMPLATE, "%java% %logging% %jvmopts% %jvmmem% %class% %args% %redirects%"); // IMPORTANT: Be aware that we are using side effects here to modify the created // YarnClusterDescriptor assertThat( clusterDescriptor .setupApplicationMasterContainer( mainClass, true, jobManagerProcessSpec) .getCommands() .get(0)) .isEqualTo( String.join( " ", java, logfile, logback, defaultJvmOpts, jvmOpts, defaultJmJvmOpts, jmJvmOpts, YarnClusterDescriptor.IGNORE_UNRECOGNIZED_VM_OPTIONS, krb5, jvmmem, mainClass, dynamicParameters, redirects)); } finally { clusterDescriptor.close(); } }
@Deprecated @Override public void toXML(Object obj, OutputStream out) { super.toXML(obj, out); }
@Issue("JENKINS-71182") @Test public void writeEmoji() throws Exception { Bar b = new Bar(); String text = "Fox 🦊"; b.s = text; StringWriter w = new StringWriter(); XStream2 xs = new XStream2(); xs.toXML(b, w); String xml = w.toString(); assertThat(xml, is("<hudson.util.XStream2Test_-Bar>\n <s>Fox 🦊</s>\n</hudson.util.XStream2Test_-Bar>")); b = (Bar) xs.fromXML(xml); assertEquals(text, b.s); }
@VisibleForTesting void validateMenu(Long parentId, String name, Long id) { MenuDO menu = menuMapper.selectByParentIdAndName(parentId, name); if (menu == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的菜单 if (id == null) { throw exception(MENU_NAME_DUPLICATE); } if (!menu.getId().equals(id)) { throw exception(MENU_NAME_DUPLICATE); } }
@Test public void testValidateMenu_success() { // mock 父子菜单 MenuDO sonMenu = createParentAndSonMenu(); // 准备参数 Long parentId = sonMenu.getParentId(); Long otherSonMenuId = randomLongId(); String otherSonMenuName = randomString(); // 调用,无需断言 menuService.validateMenu(parentId, otherSonMenuName, otherSonMenuId); }
@Override public Long aggregate(final Object currentValue, final Long aggregateValue) { if (currentValue == null) { return aggregateValue; } return aggregateValue + 1; }
@Test public void shouldHandleNullCount() { final CountKudaf doubleCountKudaf = getDoubleCountKudaf(); final double[] values = new double[]{3.0, 5.0, 8.0, 2.2, 3.5, 4.6, 5.0}; Long currentCount = 0L; // aggregate null before any aggregation currentCount = doubleCountKudaf.aggregate(null, currentCount); assertThat(0L, equalTo(currentCount)); // now send each value to aggregation and verify for (final double i: values) { currentCount = doubleCountKudaf.aggregate(i, currentCount); } assertThat(7L, equalTo(currentCount)); // null should not affect count currentCount = doubleCountKudaf.aggregate(null, currentCount); assertThat(7L, equalTo(currentCount)); }
public Map<String, List<Pair<PipelineConfig, PipelineConfigs>>> getPluggableSCMMaterialUsageInPipelines() { if (pluggableSCMMaterialToPipelineMap == null) { synchronized (this) { if (pluggableSCMMaterialToPipelineMap == null) { pluggableSCMMaterialToPipelineMap = new HashMap<>(); for (PipelineConfigs pipelineConfigs : this) { for (PipelineConfig pipelineConfig : pipelineConfigs) { for (PluggableSCMMaterialConfig pluggableSCMMaterialConfig : pipelineConfig.pluggableSCMMaterialConfigs()) { String scmId = pluggableSCMMaterialConfig.getScmId(); if (!pluggableSCMMaterialToPipelineMap.containsKey(scmId)) { pluggableSCMMaterialToPipelineMap.put(scmId, new ArrayList<>()); } pluggableSCMMaterialToPipelineMap.get(scmId).add(new Pair<>(pipelineConfig, pipelineConfigs)); } } } } } } return pluggableSCMMaterialToPipelineMap; }
@Test public void shouldGetPluggableSCMMaterialUsageInPipelines() throws Exception { PluggableSCMMaterialConfig pluggableSCMMaterialOne = new PluggableSCMMaterialConfig("scm-id-one"); PluggableSCMMaterialConfig pluggableSCMMaterialTwo = new PluggableSCMMaterialConfig("scm-id-two"); final PipelineConfig p1 = PipelineConfigMother.pipelineConfig("pipeline1", new MaterialConfigs(pluggableSCMMaterialOne, pluggableSCMMaterialTwo), new JobConfigs(new JobConfig(new CaseInsensitiveString("jobName")))); final PipelineConfig p2 = PipelineConfigMother.pipelineConfig("pipeline2", new MaterialConfigs(pluggableSCMMaterialTwo), new JobConfigs(new JobConfig(new CaseInsensitiveString("jobName")))); PipelineGroups groups = new PipelineGroups(); PipelineConfigs groupOne = new BasicPipelineConfigs(p1); PipelineConfigs groupTwo = new BasicPipelineConfigs(p2); groups.addAll(List.of(groupOne, groupTwo)); Map<String, List<Pair<PipelineConfig, PipelineConfigs>>> pluggableSCMMaterialUsageInPipelinesOne = groups.getPluggableSCMMaterialUsageInPipelines(); assertThat(pluggableSCMMaterialUsageInPipelinesOne.get("scm-id-one").size(), is(1)); assertThat(pluggableSCMMaterialUsageInPipelinesOne.get("scm-id-one"), hasItems(new Pair<>(p1, groupOne))); assertThat(pluggableSCMMaterialUsageInPipelinesOne.get("scm-id-two").size(), is(2)); assertThat(pluggableSCMMaterialUsageInPipelinesOne.get("scm-id-two"), hasItems(new Pair<>(p1, groupOne), new Pair<>(p2, groupTwo))); Map<String, List<Pair<PipelineConfig, PipelineConfigs>>> pluggableSCMMaterialUsageInPipelinesTwo = groups.getPluggableSCMMaterialUsageInPipelines(); assertSame(pluggableSCMMaterialUsageInPipelinesOne, pluggableSCMMaterialUsageInPipelinesTwo); }
public static ReadonlyConfig replaceTablePlaceholder( ReadonlyConfig config, CatalogTable table) { return replaceTablePlaceholder(config, table, Collections.emptyList()); }
@Test public void testSinkOptionsWithMultiTable() { ReadonlyConfig config = createConfig(); CatalogTable table1 = createTestTable(); CatalogTable table2 = createTestTableWithNoDatabaseAndSchemaName(); ReadonlyConfig newConfig1 = TablePlaceholder.replaceTablePlaceholder(config, table1, Arrays.asList()); ReadonlyConfig newConfig2 = TablePlaceholder.replaceTablePlaceholder(config, table2, Arrays.asList()); Assertions.assertEquals("xyz_my-database_test", newConfig1.get(DATABASE)); Assertions.assertEquals("xyz_my-schema_test", newConfig1.get(SCHEMA)); Assertions.assertEquals("xyz_my-table_test", newConfig1.get(TABLE)); Assertions.assertEquals("f1,f2", newConfig1.get(PRIMARY_KEY)); Assertions.assertEquals("f3,f4", newConfig1.get(UNIQUE_KEY)); Assertions.assertEquals("f1,f2,f3,f4,f5", newConfig1.get(FIELD_NAMES)); Assertions.assertEquals(Arrays.asList("f1", "f2"), newConfig1.get(PRIMARY_KEY_ARRAY)); Assertions.assertEquals(Arrays.asList("f3", "f4"), newConfig1.get(UNIQUE_KEY_ARRAY)); Assertions.assertEquals( Arrays.asList("f1", "f2", "f3", "f4", "f5"), newConfig1.get(FIELD_NAMES_ARRAY)); Assertions.assertEquals("xyz_default_db_test", newConfig2.get(DATABASE)); Assertions.assertEquals("xyz_default_schema_test", newConfig2.get(SCHEMA)); Assertions.assertEquals("xyz_default_table_test", newConfig2.get(TABLE)); Assertions.assertEquals("f1,f2", newConfig2.get(PRIMARY_KEY)); Assertions.assertEquals("f3,f4", newConfig2.get(UNIQUE_KEY)); Assertions.assertEquals("f1,f2,f3,f4,f5", newConfig2.get(FIELD_NAMES)); Assertions.assertEquals(Arrays.asList("f1", "f2"), newConfig2.get(PRIMARY_KEY_ARRAY)); Assertions.assertEquals(Arrays.asList("f3", "f4"), newConfig2.get(UNIQUE_KEY_ARRAY)); Assertions.assertEquals( Arrays.asList("f1", "f2", "f3", "f4", "f5"), newConfig2.get(FIELD_NAMES_ARRAY)); }
@Override public VersionedRecord<V> delete(final K key, final long timestamp) { final ValueAndTimestamp<V> valueAndTimestamp = internal.delete(key, timestamp); return valueAndTimestamp == null ? null : new VersionedRecord<>(valueAndTimestamp.value(), valueAndTimestamp.timestamp()); }
@Test public void shouldThrowNullPointerOnDeleteIfKeyIsNull() { assertThrows(NullPointerException.class, () -> store.delete(null, TIMESTAMP)); }
public void isPresent() { if (actual == null) { failWithActual(simpleFact("expected present optional")); } else if (!actual.isPresent()) { failWithoutActual(simpleFact("expected to be present")); } }
@Test public void isPresentFailingNull() { expectFailureWhenTestingThat(null).isPresent(); assertFailureKeys("expected present optional", "but was"); }
public static int[] parseInts(String nums, String sperator) { String[] ss = StringUtils.split(nums, sperator); int[] ints = new int[ss.length]; for (int i = 0; i < ss.length; i++) { ints[i] = Integer.parseInt(ss[i]); } return ints; }
@Test public void parseInts() { Assert.assertArrayEquals(new int[] { 1, 2, 3 }, CommonUtils.parseInts("1,2,3", ",")); }
@Override public String getTableAlias() { return ast.getTableSource().getAlias(); }
@Test public void testGetTableAlias() { String sql = "update t set a = ?, b = ?, c = ?"; SQLStatement sqlStatement = getSQLStatement(sql); SqlServerUpdateRecognizer recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement); Assertions.assertNull(recognizer.getTableAlias()); sql = "update t t1 set a = ?"; sqlStatement = getSQLStatement(sql); recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement); Assertions.assertEquals("t1", recognizer.getTableAlias()); }
@Override public boolean supports(Job job) { if (jobActivator == null) return false; JobDetails jobDetails = job.getJobDetails(); return !jobDetails.hasStaticFieldName() && jobActivator.activateJob(toClass(jobDetails.getClassName())) != null; }
@Test void doesNotSupportJobIfNoJobActivatorIsRegistered() { backgroundIoCJobWithIocRunner = new BackgroundJobWithIocRunner(null); Job job = anEnqueuedJob() .withJobDetails(defaultJobDetails()) .build(); assertThat(backgroundIoCJobWithIocRunner.supports(job)).isFalse(); }
@Override public void connect() throws IllegalStateException, IOException { if (isConnected()) { throw new IllegalStateException("Already connected"); } try { connection = connectionFactory.newConnection(); } catch (TimeoutException e) { throw new IllegalStateException(e); } channel = connection.createChannel(); }
@Test public void shouldConnectToGraphiteServer() throws Exception { graphite.connect(); verify(connectionFactory, atMost(1)).newConnection(); verify(connection, atMost(1)).createChannel(); }
@Override public void onProjectBranchesChanged(Set<Project> projects, Set<String> impactedBranches) { checkNotNull(projects, "projects can't be null"); if (projects.isEmpty()) { return; } Arrays.stream(listeners) .forEach(safelyCallListener(listener -> listener.onProjectBranchesChanged(projects, impactedBranches))); }
@Test public void onProjectBranchesChanged_throws_NPE_if_set_is_null() { assertThatThrownBy(() -> underTestWithListeners.onProjectBranchesChanged(null, null)) .isInstanceOf(NullPointerException.class) .hasMessage("projects can't be null"); }
@Override public Message postProcessMessage(Message message) { MessageProducerRequest request = new MessageProducerRequest(message); TraceContext maybeParent = currentTraceContext.get(); // Unlike message consumers, we try current span before trying extraction. This is the proper // order because the span in scope should take precedence over a potentially stale header entry. // // NOTE: Brave instrumentation used properly does not result in stale header entries, as we // always clear message headers after reading. Span span; if (maybeParent == null) { TraceContextOrSamplingFlags extracted = springRabbitTracing.extractAndClearTraceIdHeaders(extractor, request, message); span = springRabbitTracing.nextMessagingSpan(sampler, request, extracted); } else { // If we have a span in scope assume headers were cleared before span = tracer.newChild(maybeParent); } if (!span.isNoop()) { span.kind(PRODUCER).name("publish"); if (remoteServiceName != null) span.remoteServiceName(remoteServiceName); // incur timestamp overhead only once long timestamp = tracing.clock(span.context()).currentTimeMicroseconds(); span.start(timestamp).finish(timestamp); } injector.inject(span.context(), request); return message; }
@Test void should_resume_headers() { Message message = MessageBuilder.withBody(new byte[0]).build(); message.getMessageProperties().setHeader("b3", B3SingleFormat.writeB3SingleFormat(parent)); Message postProcessMessage = tracingMessagePostProcessor.postProcessMessage(message); assertThat(spans.get(0).parentId()).isEqualTo(parent.spanIdString()); Map<String, Object> headers = postProcessMessage.getMessageProperties().getHeaders(); assertThat(headers.get("b3").toString()).endsWith("-" + spans.get(0).id() + "-1"); }
@Override public void onCreating(AbstractJob job) { JobDetails jobDetails = job.getJobDetails(); Optional<Job> jobAnnotation = getJobAnnotation(jobDetails); setJobName(job, jobAnnotation); setAmountOfRetries(job, jobAnnotation); setLabels(job, jobAnnotation); }
@Test void testDisplayNameIsUsedIfProvidedByJobBuilder() { Job job = anEnqueuedJob() .withName("My job name") .withJobDetails(jobDetails() .withClassName(TestService.class) .withMethodName("doWork") .withJobParameter(2)) .build(); defaultJobFilter.onCreating(job); assertThat(job).hasJobName("My job name"); }
@Override public Optional<DispatchEvent> build(final DataChangedEvent event) { if (Strings.isNullOrEmpty(event.getValue())) { return Optional.empty(); } Optional<QualifiedDataSource> qualifiedDataSource = QualifiedDataSourceNode.extractQualifiedDataSource(event.getKey()); if (qualifiedDataSource.isPresent()) { QualifiedDataSourceState state = new YamlQualifiedDataSourceStateSwapper().swapToObject(YamlEngine.unmarshal(event.getValue(), YamlQualifiedDataSourceState.class)); return Optional.of(new QualifiedDataSourceStateEvent(qualifiedDataSource.get(), state)); } return Optional.empty(); }
@Test void assertCreateEmptyEvent() { Optional<DispatchEvent> actual = new QualifiedDataSourceDispatchEventBuilder().build( new DataChangedEvent("/nodes/qualified_data_sources/replica_query_db.readwrite_ds.replica_ds_0", "", Type.ADDED)); assertFalse(actual.isPresent()); }
int parseAndConvert(String[] args) throws Exception { Options opts = createOptions(); int retVal = 0; try { if (args.length == 0) { LOG.info("Missing command line arguments"); printHelp(opts); return 0; } CommandLine cliParser = new GnuParser().parse(opts, args); if (cliParser.hasOption(CliOption.HELP.shortSwitch)) { printHelp(opts); return 0; } FSConfigToCSConfigConverter converter = prepareAndGetConverter(cliParser); converter.convert(converterParams); String outputDir = converterParams.getOutputDirectory(); boolean skipVerification = cliParser.hasOption(CliOption.SKIP_VERIFICATION.shortSwitch); if (outputDir != null && !skipVerification) { validator.validateConvertedConfig( converterParams.getOutputDirectory()); } } catch (ParseException e) { String msg = "Options parsing failed: " + e.getMessage(); logAndStdErr(e, msg); printHelp(opts); retVal = -1; } catch (PreconditionException e) { String msg = "Cannot start FS config conversion due to the following" + " precondition error: " + e.getMessage(); handleException(e, msg); retVal = -1; } catch (UnsupportedPropertyException e) { String msg = "Unsupported property/setting encountered during FS config " + "conversion: " + e.getMessage(); handleException(e, msg); retVal = -1; } catch (ConversionException | IllegalArgumentException e) { String msg = "Fatal error during FS config conversion: " + e.getMessage(); handleException(e, msg); retVal = -1; } catch (VerificationException e) { Throwable cause = e.getCause(); String msg = "Verification failed: " + e.getCause().getMessage(); conversionOptions.handleVerificationFailure(cause, msg); retVal = -1; } conversionOptions.handleParsingFinished(); return retVal; }
@Test public void testMissingOutputDirArgument() throws Exception { setupFSConfigConversionFiles(true); FSConfigToCSConfigArgumentHandler argumentHandler = createArgumentHandler(); String[] args = new String[] {"-y", FSConfigConverterTestCommons.YARN_SITE_XML}; int retVal = argumentHandler.parseAndConvert(args); assertEquals("Return value", -1, retVal); assertTrue("Error content missing", fsTestCommons.getErrContent() .toString() .contains("Output directory or console mode was not defined")); }
@Override public void execute(SensorContext context) { Set<String> reportPaths = loadReportPaths(); Map<String, SarifImportResults> filePathToImportResults = new HashMap<>(); for (String reportPath : reportPaths) { try { SarifImportResults sarifImportResults = processReport(context, reportPath); filePathToImportResults.put(reportPath, sarifImportResults); } catch (NoSuchFileException e) { throw MessageException.of(format("SARIF report file not found: %s", e.getFile())); } catch (Exception exception) { LOG.warn("Failed to process SARIF report from file '{}', error: '{}'", reportPath, exception.getMessage()); } } filePathToImportResults.forEach(SarifIssuesImportSensor::displayResults); }
@Test public void execute_whenDeserializationFails_shouldSkipReport() throws NoSuchFileException { sensorSettings.setProperty("sonar.sarifReportPaths", SARIF_REPORT_PATHS_PARAM); failDeserializingReport(FILE_1); ReportAndResults reportAndResults2 = mockSuccessfulReportAndResults(FILE_2); SarifIssuesImportSensor sensor = new SarifIssuesImportSensor(sarifSerializer, sarifImporter, sensorSettings.asConfig()); sensor.execute(sensorContext); verify(sarifImporter).importSarif(reportAndResults2.getSarifReport()); assertThat(logTester.logs(Level.WARN)).contains("Failed to process SARIF report from file 'path/to/sarif/file.sarif', error: 'deserialization failed'"); assertSummaryIsCorrectlyDisplayedForSuccessfulFile(FILE_2, reportAndResults2.getSarifImportResults()); }
public Bson parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) { final Filter filter = singleFilterParser.parseSingleExpression(filterExpression, attributes); return filter.toBson(); }
@Test void throwsExceptionOnFieldThatDoesNotExistInAttributeList() { assertThrows(IllegalArgumentException.class, () -> toTest.parseSingleExpression("strange_field:blabla", List.of(EntityAttribute.builder() .id("owner") .title("Owner") .filterable(true) .build()) )); }
@Override public PageData<WidgetTypeInfo> findSystemWidgetTypes(WidgetTypeFilter widgetTypeFilter, PageLink pageLink) { boolean deprecatedFilterEnabled = !DeprecatedFilter.ALL.equals(widgetTypeFilter.getDeprecatedFilter()); boolean deprecatedFilterBool = DeprecatedFilter.DEPRECATED.equals(widgetTypeFilter.getDeprecatedFilter()); boolean widgetTypesEmpty = widgetTypeFilter.getWidgetTypes() == null || widgetTypeFilter.getWidgetTypes().isEmpty(); return DaoUtil.toPageData( widgetTypeInfoRepository .findSystemWidgetTypes( NULL_UUID, pageLink.getTextSearch(), widgetTypeFilter.isFullSearch(), deprecatedFilterEnabled, deprecatedFilterBool, widgetTypesEmpty, widgetTypeFilter.getWidgetTypes() == null ? Collections.emptyList() : widgetTypeFilter.getWidgetTypes(), widgetTypeFilter.isScadaFirst(), DaoUtil.toPageable(pageLink, WidgetTypeInfoEntity.SEARCH_COLUMNS_MAP))); }
@Test public void testFindSystemWidgetTypes() { PageData<WidgetTypeInfo> widgetTypes = widgetTypeDao.findSystemWidgetTypes( WidgetTypeFilter.builder() .tenantId(TenantId.SYS_TENANT_ID) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(Collections.singletonList("static")).build(), new PageLink(1024, 0, "TYPE_DESCRIPTION", new SortOrder("createdTime"))); assertEquals(1, widgetTypes.getData().size()); assertEquals(new WidgetTypeInfo(widgetTypeList.get(1)), widgetTypes.getData().get(0)); widgetTypes = widgetTypeDao.findSystemWidgetTypes( WidgetTypeFilter.builder() .tenantId(TenantId.SYS_TENANT_ID) .fullSearch(true) .deprecatedFilter(DeprecatedFilter.ALL) .widgetTypes(Collections.emptyList()).build(), new PageLink(1024, 0, "hfgfd tag2_2 ghg", new SortOrder("createdTime"))); assertEquals(1, widgetTypes.getData().size()); assertEquals(new WidgetTypeInfo(widgetTypeList.get(2)), widgetTypes.getData().get(0)); }
@Override public synchronized void putAll(final List<KeyValue<Bytes, byte[]>> entries) { physicalStore.putAll(entries.stream() .map(kv -> new KeyValue<>( prefixKeyFormatter.addPrefix(kv.key), kv.value)) .collect(Collectors.toList())); }
@Test public void shouldPutAll() { final List<KeyValue<Bytes, byte[]>> segment0Records = new ArrayList<>(); segment0Records.add(new KeyValue<>( new Bytes(serializeBytes("shared")), serializeBytes("v1"))); segment0Records.add(new KeyValue<>( new Bytes(serializeBytes("segment0_only")), serializeBytes("foo"))); final List<KeyValue<Bytes, byte[]>> segment1Records = new ArrayList<>(); segment1Records.add(new KeyValue<>( new Bytes(serializeBytes("shared")), serializeBytes("v2"))); segment1Records.add(new KeyValue<>( new Bytes(serializeBytes("segment1_only")), serializeBytes("bar"))); final List<KeyValue<Bytes, byte[]>> negativeSegmentRecords = new ArrayList<>(); negativeSegmentRecords.add(new KeyValue<>( new Bytes(serializeBytes("shared")), serializeBytes("v3"))); negativeSegmentRecords.add(new KeyValue<>( new Bytes(serializeBytes("negative_segment_only")), serializeBytes("baz"))); segment0.putAll(segment0Records); segment1.putAll(segment1Records); negativeIdSegment.putAll(negativeSegmentRecords); assertEquals("v1", getAndDeserialize(segment0, "shared")); assertEquals("v2", getAndDeserialize(segment1, "shared")); assertEquals("v3", getAndDeserialize(negativeIdSegment, "shared")); assertEquals("foo", getAndDeserialize(segment0, "segment0_only")); assertNull(getAndDeserialize(segment1, "segment0_only")); assertNull(getAndDeserialize(negativeIdSegment, "segment0_only")); assertNull(getAndDeserialize(segment0, "segment1_only")); assertEquals("bar", getAndDeserialize(segment1, "segment1_only")); assertNull(getAndDeserialize(negativeIdSegment, "segment1_only")); assertNull(getAndDeserialize(segment0, "negative_segment_only")); assertNull(getAndDeserialize(segment1, "negative_segment_only")); assertEquals("baz", getAndDeserialize(negativeIdSegment, "negative_segment_only")); }
@VisibleForTesting MissingSegmentInfo findMissingSegments(Map<String, Map<String, String>> idealStateMap, Instant now) { // create the maps Map<Integer, LLCSegmentName> partitionGroupIdToLatestConsumingSegmentMap = new HashMap<>(); Map<Integer, LLCSegmentName> partitionGroupIdToLatestCompletedSegmentMap = new HashMap<>(); idealStateMap.forEach((segmentName, instanceToStatusMap) -> { LLCSegmentName llcSegmentName = LLCSegmentName.of(segmentName); if (llcSegmentName != null) { // Skip the uploaded realtime segments that don't conform to llc naming if (instanceToStatusMap.containsValue(SegmentStateModel.CONSUMING)) { updateMap(partitionGroupIdToLatestConsumingSegmentMap, llcSegmentName); } else if (instanceToStatusMap.containsValue(SegmentStateModel.ONLINE)) { updateMap(partitionGroupIdToLatestCompletedSegmentMap, llcSegmentName); } } }); MissingSegmentInfo missingSegmentInfo = new MissingSegmentInfo(); if (!_partitionGroupIdToLargestStreamOffsetMap.isEmpty()) { _partitionGroupIdToLargestStreamOffsetMap.forEach((partitionGroupId, largestStreamOffset) -> { if (!partitionGroupIdToLatestConsumingSegmentMap.containsKey(partitionGroupId)) { LLCSegmentName latestCompletedSegment = partitionGroupIdToLatestCompletedSegmentMap.get(partitionGroupId); if (latestCompletedSegment == null) { // There's no consuming or completed segment for this partition group. Possibilities: // 1) it's a new partition group that has not yet been detected // 2) the first consuming segment has been deleted from ideal state manually missingSegmentInfo._newPartitionGroupCount++; missingSegmentInfo._totalCount++; } else { // Completed segment is available, but there's no consuming segment. // Note that there is no problem in case the partition group has reached its end of life. SegmentZKMetadata segmentZKMetadata = _segmentMetadataFetcher .fetchSegmentZkMetadata(_realtimeTableName, latestCompletedSegment.getSegmentName()); StreamPartitionMsgOffset completedSegmentEndOffset = _streamPartitionMsgOffsetFactory.create(segmentZKMetadata.getEndOffset()); if (completedSegmentEndOffset.compareTo(largestStreamOffset) < 0) { // there are unconsumed messages available on the stream missingSegmentInfo._totalCount++; updateMaxDurationInfo(missingSegmentInfo, partitionGroupId, segmentZKMetadata.getCreationTime(), now); } } } }); } else { partitionGroupIdToLatestCompletedSegmentMap.forEach((partitionGroupId, latestCompletedSegment) -> { if (!partitionGroupIdToLatestConsumingSegmentMap.containsKey(partitionGroupId)) { missingSegmentInfo._totalCount++; long segmentCompletionTimeMillis = _segmentMetadataFetcher .fetchSegmentCompletionTime(_realtimeTableName, latestCompletedSegment.getSegmentName()); updateMaxDurationInfo(missingSegmentInfo, partitionGroupId, segmentCompletionTimeMillis, now); } }); } return missingSegmentInfo; }
@Test public void noMissingConsumingSegmentsScenario3() { // scenario 3: no missing segments and there's no exception in connecting to stream // two partitions have reached end of life Map<String, Map<String, String>> idealStateMap = new HashMap<>(); // partition 0 idealStateMap.put("tableA__0__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__0__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__0__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 1 (has reached end of life) idealStateMap.put("tableA__1__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__1__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); // partition 2 idealStateMap.put("tableA__2__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__2__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__2__2__20220601T1500Z", ImmutableMap.of("ServerX", "CONSUMING", "ServerY", "CONSUMING")); // partition 3 (has reached end of life) idealStateMap.put("tableA__3__0__20220601T0900Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); idealStateMap.put("tableA__3__1__20220601T1200Z", ImmutableMap.of("ServerX", "ONLINE", "ServerY", "ONLINE")); Map<Integer, StreamPartitionMsgOffset> partitionGroupIdToLargestStreamOffsetMap = ImmutableMap.of( 0, new LongMsgOffset(1000), 1, new LongMsgOffset(701), 2, new LongMsgOffset(1002), 3, new LongMsgOffset(703) ); // setup segment metadata fetcher SegmentZKMetadata m1 = mock(SegmentZKMetadata.class); when(m1.getEndOffset()).thenReturn("701"); SegmentZKMetadata m3 = mock(SegmentZKMetadata.class); when(m3.getEndOffset()).thenReturn("703"); MissingConsumingSegmentFinder.SegmentMetadataFetcher metadataFetcher = mock(MissingConsumingSegmentFinder.SegmentMetadataFetcher.class); when(metadataFetcher.fetchSegmentZkMetadata("tableA", "tableA__1__1__20220601T1200Z")).thenReturn(m1); when(metadataFetcher.fetchSegmentZkMetadata("tableA", "tableA__3__1__20220601T1200Z")).thenReturn(m3); Instant now = Instant.parse("2022-06-01T18:00:00.00Z"); MissingConsumingSegmentFinder finder = new MissingConsumingSegmentFinder("tableA", metadataFetcher, partitionGroupIdToLargestStreamOffsetMap, _offsetFactory); MissingConsumingSegmentFinder.MissingSegmentInfo info = finder.findMissingSegments(idealStateMap, now); assertEquals(info._totalCount, 0); assertEquals(info._newPartitionGroupCount, 0); assertEquals(info._maxDurationInMinutes, 0); }
static CommandLineOptions parse(Iterable<String> options) { CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder(); List<String> expandedOptions = new ArrayList<>(); expandParamsFiles(options, expandedOptions); Iterator<String> it = expandedOptions.iterator(); while (it.hasNext()) { String option = it.next(); if (!option.startsWith("-")) { optionsBuilder.filesBuilder().add(option).addAll(it); break; } String flag; String value; int idx = option.indexOf('='); if (idx >= 0) { flag = option.substring(0, idx); value = option.substring(idx + 1); } else { flag = option; value = null; } // NOTE: update usage information in UsageException when new flags are added switch (flag) { case "-i": case "-r": case "-replace": case "--replace": optionsBuilder.inPlace(true); break; case "--lines": case "-lines": case "--line": case "-line": parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value)); break; case "--offset": case "-offset": optionsBuilder.addOffset(parseInteger(it, flag, value)); break; case "--length": case "-length": optionsBuilder.addLength(parseInteger(it, flag, value)); break; case "--aosp": case "-aosp": case "-a": optionsBuilder.aosp(true); break; case "--version": case "-version": case "-v": optionsBuilder.version(true); break; case "--help": case "-help": case "-h": optionsBuilder.help(true); break; case "--fix-imports-only": optionsBuilder.fixImportsOnly(true); break; case "--skip-sorting-imports": optionsBuilder.sortImports(false); break; case "--skip-removing-unused-imports": optionsBuilder.removeUnusedImports(false); break; case "--skip-reflowing-long-strings": optionsBuilder.reflowLongStrings(false); break; case "--skip-javadoc-formatting": optionsBuilder.formatJavadoc(false); break; case "-": optionsBuilder.stdin(true); break; case "-n": case "--dry-run": optionsBuilder.dryRun(true); break; case "--set-exit-if-changed": optionsBuilder.setExitIfChanged(true); break; case "-assume-filename": case "--assume-filename": optionsBuilder.assumeFilename(getValue(flag, it, value)); break; default: throw new IllegalArgumentException("unexpected flag: " + flag); } } return optionsBuilder.build(); }
@Test public void version() { assertThat(CommandLineOptionsParser.parse(Arrays.asList("-v")).version()).isTrue(); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Override public @Nullable <InputT> TransformEvaluator<InputT> forApplication( AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) { return createEvaluator((AppliedPTransform) application); }
@Test public void evaluatorReusesReaderAndClosesAtTheEnd() throws Exception { int numElements = 1000; ContiguousSet<Long> elems = ContiguousSet.create(Range.openClosed(0L, (long) numElements), DiscreteDomain.longs()); TestUnboundedSource<Long> source = new TestUnboundedSource<>(BigEndianLongCoder.of(), elems.toArray(new Long[0])); source.advanceWatermarkToInfinity = true; PCollection<Long> pcollection = p.apply(Read.from(source)); SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReads(p); DirectGraph graph = DirectGraphs.getGraph(p); AppliedPTransform<?, ?, ?> sourceTransform = graph.getProducer(pcollection); when(context.createRootBundle()).thenReturn(bundleFactory.createRootBundle()); UncommittedBundle<Long> output = mock(UncommittedBundle.class); when(context.createBundle(pcollection)).thenReturn(output); WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>> shard = WindowedValue.valueInGlobalWindow( UnboundedSourceShard.unstarted(source, NeverDeduplicator.create())); CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> inputBundle = bundleFactory .<UnboundedSourceShard<Long, TestCheckpointMark>>createRootBundle() .add(shard) .commit(Instant.now()); UnboundedReadEvaluatorFactory factory = new UnboundedReadEvaluatorFactory(context, p.getOptions(), 1.0 /* Always reuse */); new UnboundedReadEvaluatorFactory.InputProvider(context, p.getOptions()) .getInitialInputs(sourceTransform, 1); CommittedBundle<UnboundedSourceShard<Long, TestCheckpointMark>> residual = inputBundle; do { TransformEvaluator<UnboundedSourceShard<Long, TestCheckpointMark>> evaluator = factory.forApplication(sourceTransform, residual); evaluator.processElement(Iterables.getOnlyElement(residual.getElements())); TransformResult<UnboundedSourceShard<Long, TestCheckpointMark>> result = evaluator.finishBundle(); residual = inputBundle.withElements( (Iterable<WindowedValue<UnboundedSourceShard<Long, TestCheckpointMark>>>) result.getUnprocessedElements()); } while (!Iterables.isEmpty(residual.getElements())); verify(output, times(numElements)).add(any()); assertThat(TestUnboundedSource.readerCreatedCount, equalTo(1)); assertThat(TestUnboundedSource.readerClosedCount, equalTo(1)); }
@Override public boolean equals(final Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } final CreateConnector that = (CreateConnector) o; return Objects.equals(name, that.name) && Objects.equals(config, that.config) && Objects.equals(notExists, that.notExists) && Objects.equals(type, that.type); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup( new CreateConnector(Optional.of(SOME_LOCATION), NAME, CONFIG, CreateConnector.Type.SOURCE, false), new CreateConnector(Optional.of(OTHER_LOCATION), NAME, CONFIG, CreateConnector.Type.SOURCE, false), new CreateConnector(NAME, CONFIG, CreateConnector.Type.SOURCE, false) ) .addEqualityGroup( new CreateConnector(OTHER_NAME, CONFIG, CreateConnector.Type.SOURCE, false) ) .addEqualityGroup( new CreateConnector(NAME, OTHER_CONFIG, CreateConnector.Type.SOURCE, false) ) .addEqualityGroup( new CreateConnector(NAME, CONFIG, CreateConnector.Type.SINK, false) ) .addEqualityGroup( new CreateConnector(NAME, CONFIG, CreateConnector.Type.SOURCE, true) ) .testEquals(); }
@Override public Message postProcessMessage(Message message) { MessageProducerRequest request = new MessageProducerRequest(message); TraceContext maybeParent = currentTraceContext.get(); // Unlike message consumers, we try current span before trying extraction. This is the proper // order because the span in scope should take precedence over a potentially stale header entry. // // NOTE: Brave instrumentation used properly does not result in stale header entries, as we // always clear message headers after reading. Span span; if (maybeParent == null) { TraceContextOrSamplingFlags extracted = springRabbitTracing.extractAndClearTraceIdHeaders(extractor, request, message); span = springRabbitTracing.nextMessagingSpan(sampler, request, extracted); } else { // If we have a span in scope assume headers were cleared before span = tracer.newChild(maybeParent); } if (!span.isNoop()) { span.kind(PRODUCER).name("publish"); if (remoteServiceName != null) span.remoteServiceName(remoteServiceName); // incur timestamp overhead only once long timestamp = tracing.clock(span.context()).currentTimeMicroseconds(); span.start(timestamp).finish(timestamp); } injector.inject(span.context(), request); return message; }
@Test void should_prefer_current_span() { // Will be either a bug, or a missing processor stage which can result in an old span in headers Message message = MessageBuilder.withBody(new byte[0]).build(); message.getMessageProperties().setHeader("b3", B3SingleFormat.writeB3SingleFormat(grandparent)); Message postProcessMessage; try (Scope scope = tracing.currentTraceContext().newScope(parent)) { postProcessMessage = tracingMessagePostProcessor.postProcessMessage(message); } assertThat(spans.get(0).parentId()).isEqualTo(parent.spanIdString()); Map<String, Object> headers = postProcessMessage.getMessageProperties().getHeaders(); assertThat(headers.get("b3").toString()).endsWith("-" + spans.get(0).id() + "-1"); }
public synchronized KsqlScalarFunction getFunction(final List<SqlArgument> argTypes) { return udfIndex.getFunction(argTypes); }
@Test public void shouldThrowIfNoVariantFoundThatAcceptsSuppliedParamTypes() { // When: final Exception e = assertThrows( KafkaException.class, () -> factory.getFunction(of(SqlArgument.of(STRING), SqlArgument.of(BIGINT))) ); // Then: assertThat(e.getMessage(), containsString( "Function 'TestFunc' does not accept parameters (STRING, BIGINT)")); }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertEmptyObjectMessageToAmqpMessageWithDataBody() throws Exception { ActiveMQObjectMessage outbound = createObjectMessage(); outbound.onSend(); outbound.storeContent(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof Data); assertEquals(0, ((Data) amqp.getBody()).getValue().getLength()); }
public static Select select(String fieldName) { return new Select(fieldName); }
@Test void numeric_operations() { String q = Q.select("*") .from("sd1") .where("f1").le(1) .and("f2").lt(2) .and("f3").ge(3) .and("f4").gt(4) .and("f5").eq(5) .and("f6").inRange(6, 7) .build(); assertEquals(q, "yql=select * from sd1 where f1 <= 1 and f2 < 2 and f3 >= 3 and f4 > 4 and f5 = 5 and range(f6, 6, 7)"); }
@Override public void deleteUser(String username) { String sql = "DELETE FROM users WHERE username=?"; try { EmbeddedStorageContextHolder.addSqlContext(sql, username); databaseOperate.blockUpdate(); } finally { EmbeddedStorageContextHolder.cleanAllContext(); } }
@Test void testDeleteUser() { embeddedUserPersistService.deleteUser("username"); Mockito.verify(databaseOperate).blockUpdate(); }
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) { Objects.requireNonNull(metric); if (batchMeasure == null) { return Optional.empty(); } Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(builder, batchMeasure); case LONG: return toLongMeasure(builder, batchMeasure); case DOUBLE: return toDoubleMeasure(builder, batchMeasure); case BOOLEAN: return toBooleanMeasure(builder, batchMeasure); case STRING: return toStringMeasure(builder, batchMeasure); case LEVEL: return toLevelMeasure(builder, batchMeasure); case NO_VALUE: return toNoValueMeasure(builder); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_maps_data_and_alert_properties_in_dto_for_Int_Metric() { ScannerReport.Measure batchMeasure = ScannerReport.Measure.newBuilder() .setIntValue(IntValue.newBuilder().setValue(10).setData(SOME_DATA)) .build(); Optional<Measure> measure = underTest.toMeasure(batchMeasure, SOME_INT_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.INT); assertThat(measure.get().getIntValue()).isEqualTo(10); assertThat(measure.get().getData()).isEqualTo(SOME_DATA); }
public static boolean equals(ByteBuf a, int aStartIndex, ByteBuf b, int bStartIndex, int length) { checkNotNull(a, "a"); checkNotNull(b, "b"); // All indexes and lengths must be non-negative checkPositiveOrZero(aStartIndex, "aStartIndex"); checkPositiveOrZero(bStartIndex, "bStartIndex"); checkPositiveOrZero(length, "length"); if (a.writerIndex() - length < aStartIndex || b.writerIndex() - length < bStartIndex) { return false; } final int longCount = length >>> 3; final int byteCount = length & 7; if (a.order() == b.order()) { for (int i = longCount; i > 0; i --) { if (a.getLong(aStartIndex) != b.getLong(bStartIndex)) { return false; } aStartIndex += 8; bStartIndex += 8; } } else { for (int i = longCount; i > 0; i --) { if (a.getLong(aStartIndex) != swapLong(b.getLong(bStartIndex))) { return false; } aStartIndex += 8; bStartIndex += 8; } } for (int i = byteCount; i > 0; i --) { if (a.getByte(aStartIndex) != b.getByte(bStartIndex)) { return false; } aStartIndex ++; bStartIndex ++; } return true; }
@Test public void notEqualsBufferOverflow() { byte[] b1 = new byte[8]; byte[] b2 = new byte[16]; Random rand = new Random(); rand.nextBytes(b1); rand.nextBytes(b2); final int iB1 = b1.length / 2; final int iB2 = iB1 + b1.length; final int length = b1.length - iB1; System.arraycopy(b1, iB1, b2, iB2, length - 1); assertFalse(ByteBufUtil.equals(Unpooled.wrappedBuffer(b1), iB1, Unpooled.wrappedBuffer(b2), iB2, Math.max(b1.length, b2.length) * 2)); }
public RepositoryElementInterface dataNodeToElement( final DataNode rootNode ) throws KettleException { JobMeta jobMeta = new JobMeta(); dataNodeToElement( rootNode, jobMeta ); return jobMeta; }
@Test public void testDataNodeToElement() throws KettleException { DataNode dataNode = jobDelegate.elementToDataNode( mockJobMeta ); setIds( dataNode ); JobMeta jobMeta = new JobMeta(); jobDelegate.dataNodeToElement( dataNode, jobMeta ); assertThat( jobMeta.getJobCopies().size(), equalTo( 1 ) ); assertThat( jobMeta.getJobEntry( 0 ).getName(), equalTo( "MOCK_NAME" ) ); assertTrue( "Job Entry should have link back to parent job meta.", jobMeta.getJobEntry( 0 ).getParentJobMeta() == jobMeta ); }
public static <T> T runAs(String username, Callable<T> callable) { final Subject subject = new Subject.Builder() .principals(new SimplePrincipalCollection(username, "runAs-context")) .authenticated(true) .sessionCreationEnabled(false) .buildSubject(); return subject.execute(callable); }
@Test void runAs() { // Simulate what we do in the DefaultSecurityManagerProvider DefaultSecurityManager sm = new DefaultSecurityManager(); SecurityUtils.setSecurityManager(sm); final DefaultSubjectDAO subjectDAO = new DefaultSubjectDAO(); final DefaultSessionStorageEvaluator sessionStorageEvaluator = new DefaultSessionStorageEvaluator() { @Override public boolean isSessionStorageEnabled(Subject subject) { // save to session if we already have a session. do not create on just for saving the subject return subject.getSession(false) != null; } }; sessionStorageEvaluator.setSessionStorageEnabled(false); subjectDAO.setSessionStorageEvaluator(sessionStorageEvaluator); sm.setSubjectDAO(subjectDAO); final User user = new UserImpl(mock(PasswordAlgorithmFactory.class), mock(Permissions.class), mock(ClusterConfigService.class), ImmutableMap.of()); when(userService.load(anyString())).thenReturn(user); when(userService.loadById(anyString())).thenReturn(user); final String USERID = "123456"; UserContext.<Void>runAs(USERID, () -> { final UserContext userContext = new UserContext.Factory(userService).create(); assertThat(userContext.getUserId()).isEqualTo(USERID); assertThat(userContext.getUser()).isEqualTo(user); return null; }); }
@VisibleForTesting static ConnectionConfig configureConnectionConfig(Map<String, String> properties) { Long connectionTimeoutMillis = PropertyUtil.propertyAsNullableLong(properties, REST_CONNECTION_TIMEOUT_MS); Integer socketTimeoutMillis = PropertyUtil.propertyAsNullableInt(properties, REST_SOCKET_TIMEOUT_MS); if (connectionTimeoutMillis == null && socketTimeoutMillis == null) { return null; } ConnectionConfig.Builder connConfigBuilder = ConnectionConfig.custom(); if (connectionTimeoutMillis != null) { connConfigBuilder.setConnectTimeout(connectionTimeoutMillis, TimeUnit.MILLISECONDS); } if (socketTimeoutMillis != null) { connConfigBuilder.setSocketTimeout(socketTimeoutMillis, TimeUnit.MILLISECONDS); } return connConfigBuilder.build(); }
@Test public void testSocketAndConnectionTimeoutSet() { long connectionTimeoutMs = 10L; int socketTimeoutMs = 10; Map<String, String> properties = ImmutableMap.of( HTTPClient.REST_CONNECTION_TIMEOUT_MS, String.valueOf(connectionTimeoutMs), HTTPClient.REST_SOCKET_TIMEOUT_MS, String.valueOf(socketTimeoutMs)); ConnectionConfig connectionConfig = HTTPClient.configureConnectionConfig(properties); assertThat(connectionConfig).isNotNull(); assertThat(connectionConfig.getConnectTimeout().getDuration()).isEqualTo(connectionTimeoutMs); assertThat(connectionConfig.getSocketTimeout().getDuration()).isEqualTo(socketTimeoutMs); }
@Override public void publish(ScannerReportWriter writer) { AbstractProjectOrModule rootProject = moduleHierarchy.root(); ScannerReport.Metadata.Builder builder = ScannerReport.Metadata.newBuilder() .setAnalysisDate(projectInfo.getAnalysisDate().getTime()) // Here we want key without branch .setProjectKey(rootProject.key()) .setCrossProjectDuplicationActivated(cpdSettings.isCrossProjectDuplicationEnabled()) .setRootComponentRef(rootProject.scannerId()); projectInfo.getProjectVersion().ifPresent(builder::setProjectVersion); projectInfo.getBuildString().ifPresent(builder::setBuildString); if (branchConfiguration.branchName() != null) { addBranchInformation(builder); } String newCodeReferenceBranch = referenceBranchSupplier.getFromProperties(); if (newCodeReferenceBranch != null) { builder.setNewCodeReferenceBranch(newCodeReferenceBranch); } addScmInformation(builder); addNotAnalyzedFileCountsByLanguage(builder); for (QProfile qp : qProfiles.findAll()) { builder.putQprofilesPerLanguage(qp.getLanguage(), ScannerReport.Metadata.QProfile.newBuilder() .setKey(qp.getKey()) .setLanguage(qp.getLanguage()) .setName(qp.getName()) .setRulesUpdatedAt(qp.getRulesUpdatedAt().getTime()).build()); } for (Entry<String, ScannerPlugin> pluginEntry : pluginRepository.getPluginsByKey().entrySet()) { builder.putPluginsByKey(pluginEntry.getKey(), ScannerReport.Metadata.Plugin.newBuilder() .setKey(pluginEntry.getKey()) .setUpdatedAt(pluginEntry.getValue().getUpdatedAt()).build()); } addRelativePathFromScmRoot(builder); writer.writeMetadata(builder.build()); }
@Test public void write_branch_info() { String branchName = "name"; String targetName = "target"; when(branches.branchName()).thenReturn(branchName); when(branches.branchType()).thenReturn(BranchType.BRANCH); when(branches.targetBranchName()).thenReturn(targetName); underTest.publish(writer); ScannerReport.Metadata metadata = reader.readMetadata(); assertThat(metadata.getBranchName()).isEqualTo(branchName); assertThat(metadata.getBranchType()).isEqualTo(ScannerReport.Metadata.BranchType.BRANCH); assertThat(metadata.getReferenceBranchName()).isEmpty(); assertThat(metadata.getTargetBranchName()).isEqualTo(targetName); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotIdWithEmptyTable() { ScanContext scanContextWithInvalidSnapshotId = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID) .startSnapshotId(1L) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl( TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null); assertThatThrownBy(() -> splitPlanner.planSplits(null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Start snapshot id not found in history: 1"); }
@VisibleForTesting PlanNodeStatsEstimate calculateJoinComplementStats( Optional<RowExpression> filter, List<EquiJoinClause> criteria, PlanNodeStatsEstimate leftStats, PlanNodeStatsEstimate rightStats) { if (rightStats.getOutputRowCount() == 0) { // no left side rows are matched return leftStats; } if (criteria.isEmpty()) { // TODO: account for non-equi conditions if (filter.isPresent()) { return PlanNodeStatsEstimate.unknown(); } return normalizer.normalize(leftStats.mapOutputRowCount(rowCount -> 0.0)); } // TODO: add support for non-equality conditions (e.g: <=, !=, >) int numberOfFilterClauses = filter.map(expression -> extractConjuncts(expression).size()).orElse(0); // Heuristics: select the most selective criteria for join complement clause. // Principals behind this heuristics is the same as in computeInnerJoinStats: // select "driving join clause" that reduces matched rows the most. return criteria.stream() .map(drivingClause -> calculateJoinComplementStats(leftStats, rightStats, drivingClause, criteria.size() - 1 + numberOfFilterClauses)) .filter(estimate -> !estimate.isOutputRowCountUnknown()) .max(comparingDouble(PlanNodeStatsEstimate::getOutputRowCount)) .map(estimate -> normalizer.normalize(estimate)) .orElse(PlanNodeStatsEstimate.unknown()); }
@Test public void testLeftJoinComplementStatsWithMultipleClauses() { PlanNodeStatsEstimate expected = planNodeStats( LEFT_ROWS_COUNT * (LEFT_JOIN_COLUMN_NULLS + LEFT_JOIN_COLUMN_NON_NULLS / 4), variableStatistics(LEFT_JOIN_COLUMN, 0.0, 20.0, LEFT_JOIN_COLUMN_NULLS / (LEFT_JOIN_COLUMN_NULLS + LEFT_JOIN_COLUMN_NON_NULLS / 4), 5), LEFT_OTHER_COLUMN_STATS) .mapOutputRowCount(rowCount -> rowCount / UNKNOWN_FILTER_COEFFICIENT); PlanNodeStatsEstimate actual = JOIN_STATS_RULE.calculateJoinComplementStats( Optional.empty(), ImmutableList.of( new EquiJoinClause(LEFT_JOIN_COLUMN, RIGHT_JOIN_COLUMN), new EquiJoinClause(LEFT_OTHER_COLUMN, RIGHT_OTHER_COLUMN)), LEFT_STATS, RIGHT_STATS); assertEquals(actual, expected); }
@Override protected Map<ApplicationId, String> applicationsNeedingMaintenance() { if (deployer().bootstrapping()) return Map.of(); return nodesNeedingMaintenance().stream() .map(node -> node.allocation().get().owner()) .distinct() .filter(this::shouldMaintain) .filter(this::canDeployNow) .collect(toMap(applicationId -> applicationId, applicationId -> "current deployment being too old")); }
@Test(timeout = 60_000) public void queues_all_eligible_applications_for_deployment() throws Exception { fixture.activate(); // Exhaust initial wait period and set bootstrapping to be done clock.advance(Duration.ofMinutes(30).plus(Duration.ofSeconds(1))); fixture.setBootstrapping(false); // Lock deployer to simulate slow deployments fixture.deployer.lock().lockInterruptibly(); try { // Queues all eligible applications assertEquals(2, fixture.maintainer.applicationsNeedingMaintenance().size()); fixture.runApplicationMaintainer(false); assertEquals(2, fixture.maintainer.pendingDeployments()); // Enough time passes to make applications eligible for another periodic deployment clock.advance(Duration.ofMinutes(30).plus(Duration.ofSeconds(1))); fixture.runApplicationMaintainer(false); // Deployments are not re-queued as previous deployments are still pending assertEquals(2, fixture.maintainer.pendingDeployments()); // Slow deployments complete fixture.deployer.lock().unlock(); fixture.runApplicationMaintainer(); Instant deployTime = clock.instant(); assertEquals(deployTime, fixture.deployer.deployTime(fixture.app1).get()); assertEquals(deployTime, fixture.deployer.deployTime(fixture.app2).get()); // Too soon: Already deployed recently clock.advance(Duration.ofMinutes(5)); assertEquals(0, fixture.maintainer.applicationsNeedingMaintenance().size()); } finally { if (fixture.deployer.lock().isHeldByCurrentThread()) { fixture.deployer.lock().unlock(); } } }
public double bearingTo(final IGeoPoint other) { final double lat1 = Math.toRadians(this.mLatitude); final double long1 = Math.toRadians(this.mLongitude); final double lat2 = Math.toRadians(other.getLatitude()); final double long2 = Math.toRadians(other.getLongitude()); final double delta_long = long2 - long1; final double a = Math.sin(delta_long) * Math.cos(lat2); final double b = Math.cos(lat1) * Math.sin(lat2) - Math.sin(lat1) * Math.cos(lat2) * Math.cos(delta_long); final double bearing = Math.toDegrees(Math.atan2(a, b)); final double bearing_normalized = (bearing + 360) % 360; return bearing_normalized; }
@Test public void test_bearingTo_east() { final GeoPoint target = new GeoPoint(0.0, 0.0); final GeoPoint other = new GeoPoint(0.0, 10.0); assertEquals("directly east", 90, Math.round(target.bearingTo(other))); }
public static PDImageXObject createFromImage(PDDocument document, BufferedImage image) throws IOException { if (isGrayImage(image)) { return createFromGrayImage(image, document); } // We try to encode the image with predictor if (USE_PREDICTOR_ENCODER) { PDImageXObject pdImageXObject = new PredictorEncoder(document, image).encode(); if (pdImageXObject != null) { if (pdImageXObject.getColorSpace() == PDDeviceRGB.INSTANCE && pdImageXObject.getBitsPerComponent() < 16 && image.getWidth() * image.getHeight() <= 50 * 50) { // also create classic compressed image, compare sizes PDImageXObject pdImageXObjectClassic = createFromRGBImage(image, document); if (pdImageXObjectClassic.getCOSObject().getLength() < pdImageXObject.getCOSObject().getLength()) { pdImageXObject.getCOSObject().close(); return pdImageXObjectClassic; } else { pdImageXObjectClassic.getCOSObject().close(); } } return pdImageXObject; } } // Fallback: We export the image as 8-bit sRGB and might lose color information return createFromRGBImage(image, document); }
@Test void testCreateLosslessFromImageUSHORT_555_RGB() throws IOException { PDDocument document = new PDDocument(); BufferedImage image = ImageIO.read(this.getClass().getResourceAsStream("png.png")); // create an USHORT_555_RGB image int w = image.getWidth(); int h = image.getHeight(); BufferedImage rgbImage = new BufferedImage(w, h, BufferedImage.TYPE_USHORT_555_RGB); Graphics ag = rgbImage.getGraphics(); ag.drawImage(image, 0, 0, null); ag.dispose(); for (int x = 0; x < rgbImage.getWidth(); ++x) { for (int y = 0; y < rgbImage.getHeight(); ++y) { rgbImage.setRGB(x, y, (rgbImage.getRGB(x, y) & 0xFFFFFF) | ((y / 10 * 10) << 24)); } } PDImageXObject ximage = LosslessFactory.createFromImage(document, rgbImage); validate(ximage, 8, w, h, "png", PDDeviceRGB.INSTANCE.getName()); checkIdent(rgbImage, ximage.getImage()); checkIdentRGB(rgbImage, ximage.getOpaqueImage(null, 1)); assertNull(ximage.getSoftMask()); doWritePDF(document, ximage, TESTRESULTSDIR, "ushort555rgb.pdf"); }
@DeleteMapping("/rule") public ShenyuAdminResult deleteRule(@RequestBody @Valid @NotNull final DataPermissionDTO dataPermissionDTO) { return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, dataPermissionService.deleteRule(dataPermissionDTO)); }
@Test public void deleteRule() throws Exception { DataPermissionDTO dataPermissionDTO = new DataPermissionDTO(); dataPermissionDTO.setDataId("testDataId"); dataPermissionDTO.setUserId("testUserId"); given(this.dataPermissionService.deleteRule(dataPermissionDTO)).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.delete("/data-permission/rule") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(dataPermissionDTO))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS))) .andExpect(jsonPath("$.data", is(1))) .andReturn(); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldThrowOnSerializingKeyError() { // Given: final ConfiguredStatement<InsertValues> statement = givenInsertValues( allAndPseudoColumnNames(SCHEMA), ImmutableList.of( new LongLiteral(1L), new StringLiteral("str"), new StringLiteral("str"), new LongLiteral(2L)) ); when(keySerializer.serialize(any(), any())).thenThrow(new SerializationException("Jibberish!")); // When: final Exception e = assertThrows( KsqlException.class, () -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext) ); // Then: assertThat(e.getCause(), (hasMessage(containsString("Could not serialize key")))); }
@Override public long localCountForNode(String nodeId) { final List<BasicDBObject> forThisNode = ImmutableList.of(new BasicDBObject(MessageInput.FIELD_NODE_ID, nodeId)); final List<BasicDBObject> query = ImmutableList.of( new BasicDBObject(MessageInput.FIELD_GLOBAL, false), new BasicDBObject("$or", forThisNode)); return count(InputImpl.class, new BasicDBObject("$and", query)); }
@Test @MongoDBFixtures("InputServiceImplTest.json") public void localCountForNodeReturnsNumberOfLocalInputs() { assertThat(inputService.localCountForNode("cd03ee44-b2a7-cafe-babe-0000deadbeef")).isEqualTo(2); assertThat(inputService.localCountForNode("cd03ee44-b2a7-0000-0000-000000000000")).isEqualTo(0); }
@Override public Optional<OffsetExpirationCondition> offsetExpirationCondition() { if (protocolType.isPresent()) { if (isInState(EMPTY)) { // No members exist in the group => // - If current state timestamp exists and retention period has passed since group became Empty, // expire all offsets with no pending offset commit; // - If there is no current state timestamp (old group metadata schema) and retention period has passed // since the last commit timestamp, expire the offset return Optional.of(new OffsetExpirationConditionImpl( offsetAndMetadata -> currentStateTimestamp.orElse(offsetAndMetadata.commitTimestampMs)) ); } else if (usesConsumerGroupProtocol() && subscribedTopics.isPresent() && isInState(STABLE)) { // Consumers exist in the group and group is Stable => // - If the group is aware of the subscribed topics and retention period has passed since the // last commit timestamp, expire the offset. return Optional.of(new OffsetExpirationConditionImpl(offsetAndMetadata -> offsetAndMetadata.commitTimestampMs)); } } else { // protocolType is None => standalone (simple) consumer, that uses Kafka for offset storage. Only // expire offsets where retention period has passed since their last commit. return Optional.of(new OffsetExpirationConditionImpl(offsetAndMetadata -> offsetAndMetadata.commitTimestampMs)); } // If none of the conditions above are met, do not expire any offsets. return Optional.empty(); }
@Test public void testOffsetExpirationCondition() { long currentTimestamp = 30000L; long commitTimestamp = 20000L; long offsetsRetentionMs = 10000L; OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(15000L, OptionalInt.empty(), "", commitTimestamp, OptionalLong.empty()); MockTime time = new MockTime(); long currentStateTimestamp = time.milliseconds(); ClassicGroup group = new ClassicGroup(new LogContext(), "groupId", EMPTY, time, mock(GroupCoordinatorMetricsShard.class)); // 1. Test no protocol type. Simple consumer case, Base timestamp based off of last commit timestamp. Optional<OffsetExpirationCondition> offsetExpirationCondition = group.offsetExpirationCondition(); assertTrue(offsetExpirationCondition.isPresent()); OffsetExpirationConditionImpl condition = (OffsetExpirationConditionImpl) offsetExpirationCondition.get(); assertEquals(commitTimestamp, condition.baseTimestamp().apply(offsetAndMetadata)); assertTrue(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs)); // 2. Test non-consumer protocol type + Empty state. Base timestamp based off of current state timestamp. JoinGroupRequestProtocolCollection protocols = new JoinGroupRequestProtocolCollection(); protocols.add(new JoinGroupRequestProtocol() .setName("range") .setMetadata(ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription(Collections.singletonList("topic"))).array())); ClassicGroupMember memberWithNonConsumerProtocol = new ClassicGroupMember( "memberWithNonConsumerProtocol", Optional.empty(), clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, "My Protocol", protocols ); group.add(memberWithNonConsumerProtocol); assertEquals("My Protocol", group.protocolType().get()); offsetExpirationCondition = group.offsetExpirationCondition(); assertTrue(offsetExpirationCondition.isPresent()); condition = (OffsetExpirationConditionImpl) offsetExpirationCondition.get(); assertEquals(currentStateTimestamp, condition.baseTimestamp().apply(offsetAndMetadata)); assertTrue(condition.isOffsetExpired(offsetAndMetadata, currentStateTimestamp + offsetsRetentionMs, offsetsRetentionMs)); // 3. Test non-consumer protocol type + non-Empty state. Do not expire any offsets. group.transitionTo(PREPARING_REBALANCE); offsetExpirationCondition = group.offsetExpirationCondition(); assertFalse(offsetExpirationCondition.isPresent()); // 4. Test consumer protocol type + subscribed topics + Stable state. Base timestamp based off of last commit timestamp. group.remove("memberWithNonConsumerProtocol"); ClassicGroupMember memberWithConsumerProtocol = new ClassicGroupMember( "memberWithConsumerProtocol", Optional.empty(), clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, "consumer", protocols ); group.add(memberWithConsumerProtocol); group.initNextGeneration(); group.transitionTo(STABLE); assertTrue(group.subscribedTopics().get().contains("topic")); offsetExpirationCondition = group.offsetExpirationCondition(); assertTrue(offsetExpirationCondition.isPresent()); condition = (OffsetExpirationConditionImpl) offsetExpirationCondition.get(); assertEquals(commitTimestamp, condition.baseTimestamp().apply(offsetAndMetadata)); assertTrue(condition.isOffsetExpired(offsetAndMetadata, currentTimestamp, offsetsRetentionMs)); // 5. Test consumer protocol type + subscribed topics + non-Stable state. Do not expire any offsets. group.transitionTo(PREPARING_REBALANCE); offsetExpirationCondition = group.offsetExpirationCondition(); assertFalse(offsetExpirationCondition.isPresent()); }
public static <K, V> Reshuffle<K, V> of() { return new Reshuffle<>(); }
@Test public void testRequestVeryOldUpdateCompatibility() { pipeline.enableAbandonedNodeEnforcement(false); pipeline.getOptions().as(StreamingOptions.class).setUpdateCompatibilityVersion("2.46.0"); pipeline.apply(Create.of(KV.of("arbitrary", "kv"))).apply(Reshuffle.of()); OldTransformSeeker seeker = new OldTransformSeeker(); pipeline.traverseTopologically(seeker); assertTrue(seeker.isOldTransformFound); }
public Set<String> getResourceNamesByCapacityType( ResourceUnitCapacityType capacityType) { return new HashSet<>(capacityTypePerResource.getOrDefault(capacityType, Collections.emptySet())); }
@Test public void getResourceNamesByCapacityType() { QueueCapacityVector capacityVector = QueueCapacityVector.newInstance(); capacityVector.setResource(MEMORY_URI, 10, ResourceUnitCapacityType.PERCENTAGE); capacityVector.setResource(VCORES_URI, 6, ResourceUnitCapacityType.PERCENTAGE); // custom is not set, defaults to 0 Assert.assertEquals(1, capacityVector.getResourceNamesByCapacityType( ResourceUnitCapacityType.ABSOLUTE).size()); Assert.assertTrue(capacityVector.getResourceNamesByCapacityType( ResourceUnitCapacityType.ABSOLUTE).contains(CUSTOM_RESOURCE)); Assert.assertEquals(2, capacityVector.getResourceNamesByCapacityType( ResourceUnitCapacityType.PERCENTAGE).size()); Assert.assertTrue(capacityVector.getResourceNamesByCapacityType( ResourceUnitCapacityType.PERCENTAGE).contains(VCORES_URI)); Assert.assertTrue(capacityVector.getResourceNamesByCapacityType( ResourceUnitCapacityType.PERCENTAGE).contains(MEMORY_URI)); Assert.assertEquals(10, capacityVector.getResource(MEMORY_URI).getResourceValue(), EPSILON); Assert.assertEquals(6, capacityVector.getResource(VCORES_URI).getResourceValue(), EPSILON); }
@Override public String resolveExtensionVersion(String pluginId, String extensionType, final List<String> goSupportedExtensionVersions) { List<String> pluginSupportedVersions = getRequiredExtensionVersionsByPlugin(pluginId, extensionType); String resolvedExtensionVersion = "0"; for (String pluginSupportedVersion : pluginSupportedVersions) { if (goSupportedExtensionVersions.contains(pluginSupportedVersion) && parseDouble(resolvedExtensionVersion) < parseDouble(pluginSupportedVersion)) { resolvedExtensionVersion = pluginSupportedVersion; } } if ("0".equals(resolvedExtensionVersion)) { throw new RuntimeException(String.format("Could not find matching extension version between Plugin[%s] and Go", pluginId)); } return resolvedExtensionVersion; }
@Test void shouldThrowExceptionIfMatchingExtensionVersionNotFound() { String pluginId = "plugin-id"; String extensionType = "sample-extension"; GoPlugin goPlugin = mock(GoPlugin.class); GoPlugginOSGiFrameworkStub osGiFrameworkStub = new GoPlugginOSGiFrameworkStub(goPlugin); osGiFrameworkStub.addHasReferenceFor(GoPlugin.class, pluginId, extensionType, true); when(goPlugin.pluginIdentifier()).thenReturn(new GoPluginIdentifier(extensionType, List.of("1.0", "2.0"))); DefaultPluginManager pluginManager = new DefaultPluginManager(monitor, registry, osGiFrameworkStub, jarChangeListener, pluginRequestProcessorRegistry, systemEnvironment, pluginLoader); try { pluginManager.resolveExtensionVersion(pluginId, extensionType, List.of("3.0", "4.0")); fail("should have thrown exception for not finding matching extension version"); } catch (Exception e) { assertThat(e.getMessage()).isEqualTo("Could not find matching extension version between Plugin[plugin-id] and Go"); } }
public File dumpHeap() throws MalformedObjectNameException, InstanceNotFoundException, ReflectionException, MBeanException, IOException { return dumpHeap(localDumpFolder); }
@Test public void heapDumpTwice() throws Exception { File folder = tempFolder.newFolder(); File dump1 = MemoryMonitor.dumpHeap(folder); assertNotNull(dump1); assertTrue(dump1.exists()); assertThat(dump1.getParentFile(), Matchers.equalTo(folder)); File dump2 = MemoryMonitor.dumpHeap(folder); assertNotNull(dump2); assertTrue(dump2.exists()); assertThat(dump2.getParentFile(), Matchers.equalTo(folder)); }
static Point projectPointAtNewTime(Point point, Instant newTime) { //skip projection when data doesn't support it if (point.speed() == null || point.course() == null) { return new PointBuilder(point).time(newTime).build(); } Duration timeDelta = Duration.between(point.time(), newTime); //can be negative....but that's ok.. double distanceInNM = distTraveledInNM(point.speed().inKnots(), timeDelta); LatLong startPoint = point.latLong(); LatLong endPoint = startPoint.projectOut(point.course().inDegrees(), distanceInNM); return new PointBuilder(point) .latLong(endPoint) .time(newTime) .build(); }
@Test public void testProjectPointAtNewTime_backwardInTime() { //1 knot -- due east Point testPoint = (new PointBuilder()) .time(Instant.EPOCH) .latLong(0.0, 0.0) .altitude(Distance.ofFeet(0.0)) .speedInKnots(1.0) .courseInDegrees(90.0) .build(); Instant newTime = Instant.EPOCH.minusSeconds(60 * 60); Point projection = Distances.projectPointAtNewTime(testPoint, newTime); assertTrue(projection.time().equals(newTime)); assertTrue(projection != testPoint); assertEquals(0.0, projection.latLong().latitude(), 0.00001, "Latitude should be 0"); assertTrue(projection.latLong().longitude() < 0.0, "Longitude should be positive"); assertTrue(projection.altitude().inFeet() == 0.0); LatLong start = new LatLong(0.0, 0.0); LatLong end = projection.latLong(); assertEquals( 1.0, start.distanceInNM(end), 0.0001, "Traveling 1 hour at 1 knot should move 1 NM" ); }
@Override public Object getValue() { try { return mBeanServerConn.getAttribute(getObjectName(), attributeName); } catch (IOException e) { return null; } catch (JMException e) { return null; } }
@Test public void returnsNullIfMBeanNotFound() throws Exception { ObjectName objectName = new ObjectName("foo.bar:type=NoSuchMBean"); JmxAttributeGauge gauge = new JmxAttributeGauge(mBeanServer, objectName, "LoadedClassCount"); assertThat(gauge.getValue()).isNull(); }
public B token(String token) { this.token = token; return getThis(); }
@Test void token() { ServiceBuilder builder = new ServiceBuilder(); builder.token("token"); Assertions.assertEquals("token", builder.build().getToken()); }
@Override public void accept(final MeterEntity entity, final DataTable value) { this.entityId = entity.id(); this.serviceId = entity.serviceId(); this.value.append(value); }
@Test public void testAccept() { function.accept( MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1 ); function.accept( MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2 ); Assertions.assertEquals(function.getValue(), new DataTable("200,3|301,2|404,7|502,9|505,1")); }
static void valueMustBeValid(EvaluationContext ctx, Object value) { if (!(value instanceof BigDecimal) && !(value instanceof LocalDate)) { ctx.notifyEvt(() -> new ASTEventBase(FEELEvent.Severity.ERROR, Msg.createMessage(Msg.VALUE_X_NOT_A_VALID_ENDPOINT_FOR_RANGE_BECAUSE_NOT_A_NUMBER_NOT_A_DATE, value), null)); throw new EndpointOfRangeNotValidTypeException(); } }
@Test void valueMustBeValidTrueTest() { valueMustBeValid(ctx, BigDecimal.valueOf(1)); verify(listener, never()).onEvent(any(FEELEvent.class)); valueMustBeValid(ctx, LocalDate.of(2021, 1, 3)); verify(listener, never()).onEvent(any(FEELEvent.class)); }
protected static void checkJavaVersion(final PrintStream logger, String javaCommand, final BufferedReader r) throws IOException { String line; Pattern p = Pattern.compile("(?i)(?:java|openjdk) version \"([0-9.]+).*\".*"); while (null != (line = r.readLine())) { Matcher m = p.matcher(line); if (m.matches()) { final String versionStr = m.group(1); logger.println(Messages.ComputerLauncher_JavaVersionResult(javaCommand, versionStr)); try { if (new VersionNumber(versionStr).isOlderThan(new VersionNumber("1.8"))) { throw new IOException(Messages .ComputerLauncher_NoJavaFound(line)); } } catch (NumberFormatException x) { throw new IOException(Messages.ComputerLauncher_NoJavaFound(line), x); } return; } } logger.println(Messages.ComputerLauncher_UnknownJavaVersion(javaCommand)); throw new IOException(Messages.ComputerLauncher_UnknownJavaVersion(javaCommand)); }
@Test public void jdk5() { assertThrows( IOException.class, () -> ComputerLauncher.checkJavaVersion( new PrintStream(OutputStream.nullOutputStream()), "-", new BufferedReader( new StringReader( "java version \"1.5.0_22\"\n" + "Java(TM) 2 Runtime Environment, Standard Edition (build 1.5.0_22-b03)\n" + "Java HotSpot(TM) Server VM (build 1.5.0_22-b03, mixed mode)\n")))); }
@Override public void scrubRegistrationProperties() { if (!exist()) { return; } try { PropertiesConfiguration config = new PropertiesConfiguration(); config.setIOFactory(new FilteringOutputWriterFactory()); PropertiesConfigurationLayout layout = new PropertiesConfigurationLayout(); layout.setLineSeparator("\n"); layout.load(config, reader()); try (FileWriter out = new FileWriter(this.configFile)) { layout.save(config, out); } loadProperties(); } catch (ConfigurationException | IOException e) { LOG.warn("[Agent Auto Registration] Unable to scrub registration key.", e); } }
@Test void shouldScrubTheAutoRegistrationProperties() throws Exception { String originalContents = """ # # file autogenerated by chef, any changes will be lost # # the registration key agent.auto.register.key = some secret key # the resources on this agent agent.auto.register.resources = some,resources # The hostname of this agent agent.auto.register.hostname = agent42.example.com # The environments this agent belongs to agent.auto.register.environments = production,blue """; FileUtils.writeStringToFile(configFile, originalContents, UTF_8); AgentAutoRegistrationProperties properties = new AgentAutoRegistrationPropertiesImpl(configFile); properties.scrubRegistrationProperties(); String newContents = """ # # file autogenerated by chef, any changes will be lost # # the registration key # The autoregister key has been intentionally removed by Go as a security measure. # agent.auto.register.key = some secret key # the resources on this agent # This property has been removed by Go after attempting to auto-register with the Go server. # agent.auto.register.resources = some,resources # The hostname of this agent # This property has been removed by Go after attempting to auto-register with the Go server. # agent.auto.register.hostname = agent42.example.com # The environments this agent belongs to # This property has been removed by Go after attempting to auto-register with the Go server. # agent.auto.register.environments = production,blue """; assertThat(FileUtils.readFileToString(configFile, UTF_8)).isEqualTo(newContents); }
@Override public HttpResponseOutputStream<BaseB2Response> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { // Submit store call to background thread final DelayedHttpEntityCallable<BaseB2Response> command = new DelayedHttpEntityCallable<BaseB2Response>(file) { /** * @return The SHA-1 returned by the server for the uploaded object */ @Override public BaseB2Response call(final HttpEntity entity) throws BackgroundException { try { final Checksum checksum = status.getChecksum(); if(status.isSegment()) { final B2GetUploadPartUrlResponse uploadUrl = session.getClient().getUploadPartUrl(status.getParameters().get("fileId")); return session.getClient().uploadLargeFilePart(uploadUrl, status.getPart(), entity, checksum.hash); } else { if(null == urls.get()) { final B2GetUploadUrlResponse uploadUrl = session.getClient().getUploadUrl(fileid.getVersionId(containerService.getContainer(file))); if(log.isDebugEnabled()) { log.debug(String.format("Obtained upload URL %s for file %s", uploadUrl, file)); } urls.set(uploadUrl); return this.upload(uploadUrl, entity, checksum); } else { final B2GetUploadUrlResponse uploadUrl = urls.get(); if(log.isDebugEnabled()) { log.debug(String.format("Use cached upload URL %s for file %s", uploadUrl, file)); } try { return this.upload(uploadUrl, entity, checksum); } catch(IOException | B2ApiException e) { // Upload many files to the same upload_url until that URL gives an error log.warn(String.format("Remove cached upload URL after failure %s", e)); urls.remove(); // Retry return this.upload(uploadUrl, entity, checksum); } } } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } } private BaseB2Response upload(final B2GetUploadUrlResponse uploadUrl, final HttpEntity entity, final Checksum checksum) throws B2ApiException, IOException { final Map<String, String> fileinfo = new HashMap<>(status.getMetadata()); if(null != status.getModified()) { fileinfo.put(X_BZ_INFO_SRC_LAST_MODIFIED_MILLIS, String.valueOf(status.getModified())); } if(null != status.getCreated()) { fileinfo.put(X_BZ_INFO_SRC_CREATION_DATE_MILLIS, String.valueOf(status.getCreated())); } final B2FileResponse response = session.getClient().uploadFile(uploadUrl, containerService.getKey(file), entity, checksum.algorithm == HashAlgorithm.sha1 ? checksum.hash : "do_not_verify", status.getMime(), fileinfo); fileid.cache(file, response.getFileId()); return response; } @Override public long getContentLength() { return status.getLength(); } }; return this.write(file, status, command); }
@Test public void testWriteChecksumFailure() throws Exception { final Path bucket = new Path("test-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path file = new Path(bucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus(); final byte[] content = RandomUtils.nextBytes(1); status.setLength(content.length); status.setChecksum(Checksum.parse("da39a3ee5e6b4b0d3255bfef95601890afd80709")); final HttpResponseOutputStream<BaseB2Response> out = new B2WriteFeature(session, new B2VersionIdProvider(session)).write(file, status, new DisabledConnectionCallback()); IOUtils.write(content, out); try { out.close(); fail(); } catch(IOException e) { assertTrue(e.getCause() instanceof ChecksumException); } }
@VisibleForTesting static OptionalDouble calculateAverageSizePerPartition(Collection<PartitionStatistics> statistics) { return statistics.stream() .map(PartitionStatistics::getBasicStatistics) .map(HiveBasicStatistics::getInMemoryDataSizeInBytes) .filter(OptionalLong::isPresent) .mapToLong(OptionalLong::getAsLong) .peek(size -> verify(size >= 0, "size must be greater than or equal to zero")) .average(); }
@Test public void testCalculateAverageSizePerPartition() { assertThat(calculateAverageSizePerPartition(ImmutableList.of())).isEmpty(); assertThat(calculateAverageSizePerPartition(ImmutableList.of(PartitionStatistics.empty()))).isEmpty(); assertThat(calculateAverageSizePerPartition(ImmutableList.of(PartitionStatistics.empty(), PartitionStatistics.empty()))).isEmpty(); assertEquals(calculateAverageSizePerPartition(ImmutableList.of(inMemorySize(10))), OptionalDouble.of(10)); assertEquals(calculateAverageSizePerPartition(ImmutableList.of(inMemorySize(10), PartitionStatistics.empty())), OptionalDouble.of(10)); assertEquals(calculateAverageSizePerPartition(ImmutableList.of(inMemorySize(10), inMemorySize(20))), OptionalDouble.of(15)); assertEquals(calculateAverageSizePerPartition(ImmutableList.of(inMemorySize(10), inMemorySize(20), PartitionStatistics.empty())), OptionalDouble.of(15)); }
@Override public long getMax() { if (values.length == 0) { return 0; } return values[values.length - 1]; }
@Test public void calculatesAMaxOfZeroForAnEmptySnapshot() throws Exception { final Snapshot emptySnapshot = new WeightedSnapshot( WeightedArray(new long[]{}, new double[]{}) ); assertThat(emptySnapshot.getMax()) .isZero(); }
@Override public StageBundleFactory forStage(ExecutableStage executableStage) { return new SimpleStageBundleFactory(executableStage); }
@Test public void createsMultipleEnvironmentOfSingleType() throws Exception { ServerFactory serverFactory = ServerFactory.createDefault(); Environment environmentA = Environment.newBuilder() .setUrn("env:urn:a") .setPayload(ByteString.copyFrom(new byte[1])) .build(); Environment environmentAA = Environment.newBuilder() .setUrn("env:urn:a") .setPayload(ByteString.copyFrom(new byte[2])) .build(); EnvironmentFactory envFactoryA = mock(EnvironmentFactory.class); when(envFactoryA.createEnvironment(eq(environmentA), any())).thenReturn(remoteEnvironment); when(envFactoryA.createEnvironment(eq(environmentAA), any())).thenReturn(remoteEnvironment); EnvironmentFactory.Provider environmentProviderFactoryA = mock(EnvironmentFactory.Provider.class); when(environmentProviderFactoryA.createEnvironmentFactory( any(), any(), any(), any(), any(), any())) .thenReturn(envFactoryA); when(environmentProviderFactoryA.getServerFactory()).thenReturn(serverFactory); Environment environmentB = Environment.newBuilder().setUrn("env:urn:b").build(); EnvironmentFactory envFactoryB = mock(EnvironmentFactory.class); when(envFactoryB.createEnvironment(eq(environmentB), any())).thenReturn(remoteEnvironment); EnvironmentFactory.Provider environmentProviderFactoryB = mock(EnvironmentFactory.Provider.class); when(environmentProviderFactoryB.createEnvironmentFactory( any(), any(), any(), any(), any(), any())) .thenReturn(envFactoryB); when(environmentProviderFactoryB.getServerFactory()).thenReturn(serverFactory); Map<String, Provider> environmentFactoryProviderMap = ImmutableMap.of( environmentA.getUrn(), environmentProviderFactoryA, environmentB.getUrn(), environmentProviderFactoryB); try (DefaultJobBundleFactory bundleFactory = createDefaultJobBundleFactory(environmentFactoryProviderMap)) { bundleFactory.forStage(getExecutableStage(environmentA)); verify(environmentProviderFactoryA, Mockito.times(1)) .createEnvironmentFactory(any(), any(), any(), any(), any(), any()); verify(environmentProviderFactoryB, Mockito.times(0)) .createEnvironmentFactory(any(), any(), any(), any(), any(), any()); verify(envFactoryA, Mockito.times(1)).createEnvironment(eq(environmentA), any()); verify(envFactoryA, Mockito.times(0)).createEnvironment(eq(environmentAA), any()); bundleFactory.forStage(getExecutableStage(environmentAA)); verify(environmentProviderFactoryA, Mockito.times(2)) .createEnvironmentFactory(any(), any(), any(), any(), any(), any()); verify(environmentProviderFactoryB, Mockito.times(0)) .createEnvironmentFactory(any(), any(), any(), any(), any(), any()); verify(envFactoryA, Mockito.times(1)).createEnvironment(eq(environmentA), any()); verify(envFactoryA, Mockito.times(1)).createEnvironment(eq(environmentAA), any()); } }
public static CharSequence commonPrefix(CharSequence str1, CharSequence str2) { if (isEmpty(str1) || isEmpty(str2)) { return EMPTY; } final int minLength = Math.min(str1.length(), str2.length()); int index = 0; for (; index < minLength; index++) { if (str1.charAt(index) != str2.charAt(index)) { break; } } return str1.subSequence(0, index); }
@Test public void commonPrefixTest() throws Exception{ // -------------------------- None match ----------------------- assertEquals("", CharSequenceUtil.commonPrefix("", "abc")); assertEquals("", CharSequenceUtil.commonPrefix(null, "abc")); assertEquals("", CharSequenceUtil.commonPrefix("abc", null)); assertEquals("", CharSequenceUtil.commonPrefix("abc", "")); assertEquals("", CharSequenceUtil.commonPrefix("azzzj", "bzzzj")); assertEquals("", CharSequenceUtil.commonPrefix("english中文", "french中文")); // -------------------------- Matched ----------------------- assertEquals("name_", CharSequenceUtil.commonPrefix("name_abc", "name_efg")); assertEquals("zzzj", CharSequenceUtil.commonPrefix("zzzja", "zzzjb")); assertEquals("中文", CharSequenceUtil.commonPrefix("中文english", "中文french")); // { space * 10 } + "abc" final String str1 = CharSequenceUtil.repeat(CharSequenceUtil.SPACE, 10) + "abc"; // { space * 5 } + "efg" final String str2 = CharSequenceUtil.repeat(CharSequenceUtil.SPACE, 5) + "efg"; // Expect common prefix: { space * 5 } assertEquals(CharSequenceUtil.repeat(CharSequenceUtil.SPACE, 5), CharSequenceUtil.commonPrefix(str1, str2)); }
public static String removeIdentifierFromMetadataURL(String metadataURL) { MetadataStoreProvider provider = findProvider(metadataURL); if (metadataURL.startsWith(provider.urlScheme() + ":")) { return metadataURL.substring(provider.urlScheme().length() + 1); } return metadataURL; }
@Test public void testRemoveIdentifierFromMetadataURL() { assertEquals(MetadataStoreFactoryImpl.removeIdentifierFromMetadataURL("zk:host:port"), "host:port"); assertEquals(MetadataStoreFactoryImpl.removeIdentifierFromMetadataURL("rocksdb:/data/dir"), "/data/dir"); assertEquals(MetadataStoreFactoryImpl.removeIdentifierFromMetadataURL("etcd:host:port"), "host:port"); assertEquals(MetadataStoreFactoryImpl.removeIdentifierFromMetadataURL("memory:name"), "name"); assertEquals(MetadataStoreFactoryImpl.removeIdentifierFromMetadataURL("http://unknown/url/scheme"), "http://unknown/url/scheme"); assertEquals(MetadataStoreFactoryImpl.removeIdentifierFromMetadataURL("custom:suffix"), "suffix"); }
public static boolean isBasicInfoChanged(Member actual, Member expected) { if (null == expected) { return null != actual; } if (!expected.getIp().equals(actual.getIp())) { return true; } if (expected.getPort() != actual.getPort()) { return true; } if (!expected.getAddress().equals(actual.getAddress())) { return true; } if (!expected.getState().equals(actual.getState())) { return true; } // if change if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) { return true; } return isBasicInfoChangedInExtendInfo(expected, actual); }
@Test void testIsBasicInfoChangedNoChangeWithExtendInfo() { Member newMember = buildMember(); newMember.setExtendVal("test", "test"); assertFalse(MemberUtil.isBasicInfoChanged(newMember, originalMember)); }
@SuppressWarnings("deprecation") public static void setupDistributedCache(Configuration conf, Map<String, LocalResource> localResources) throws IOException { LocalResourceBuilder lrb = new LocalResourceBuilder(); lrb.setConf(conf); // Cache archives lrb.setType(LocalResourceType.ARCHIVE); lrb.setUris(JobContextImpl.getCacheArchives(conf)); lrb.setTimestamps(JobContextImpl.getArchiveTimestamps(conf)); lrb.setSizes(getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES)); lrb.setVisibilities(DistributedCache.getArchiveVisibilities(conf)); lrb.setSharedCacheUploadPolicies( Job.getArchiveSharedCacheUploadPolicies(conf)); lrb.createLocalResources(localResources); // Cache files lrb.setType(LocalResourceType.FILE); lrb.setUris(JobContextImpl.getCacheFiles(conf)); lrb.setTimestamps(JobContextImpl.getFileTimestamps(conf)); lrb.setSizes(getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES)); lrb.setVisibilities(DistributedCache.getFileVisibilities(conf)); lrb.setSharedCacheUploadPolicies( Job.getFileSharedCacheUploadPolicies(conf)); lrb.createLocalResources(localResources); }
@SuppressWarnings("deprecation") @Test @Timeout(30000) public void testSetupDistributedCache() throws Exception { Configuration conf = new Configuration(); conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class); URI mockUri = URI.create("mockfs://mock/"); FileSystem mockFs = ((FilterFileSystem) FileSystem.get(mockUri, conf)) .getRawFileSystem(); URI archive = new URI("mockfs://mock/tmp/something.zip"); Path archivePath = new Path(archive); URI file = new URI("mockfs://mock/tmp/something.txt#something"); Path filePath = new Path(file); when(mockFs.resolvePath(archivePath)).thenReturn(archivePath); when(mockFs.resolvePath(filePath)).thenReturn(filePath); Job.addCacheArchive(archive, conf); conf.set(MRJobConfig.CACHE_ARCHIVES_TIMESTAMPS, "10"); conf.set(MRJobConfig.CACHE_ARCHIVES_SIZES, "10"); conf.set(MRJobConfig.CACHE_ARCHIVES_VISIBILITIES, "true"); Job.addCacheFile(file, conf); conf.set(MRJobConfig.CACHE_FILE_TIMESTAMPS, "11"); conf.set(MRJobConfig.CACHE_FILES_SIZES, "11"); conf.set(MRJobConfig.CACHE_FILE_VISIBILITIES, "true"); Map<String, LocalResource> localResources = new HashMap<String, LocalResource>(); MRApps.setupDistributedCache(conf, localResources); assertEquals(2, localResources.size()); LocalResource lr = localResources.get("something.zip"); assertNotNull(lr); assertEquals(10l, lr.getSize()); assertEquals(10l, lr.getTimestamp()); assertEquals(LocalResourceType.ARCHIVE, lr.getType()); lr = localResources.get("something"); assertNotNull(lr); assertEquals(11l, lr.getSize()); assertEquals(11l, lr.getTimestamp()); assertEquals(LocalResourceType.FILE, lr.getType()); }
protected void grantRoleToUser(List<String> parentRoleName, UserIdentity user) throws PrivilegeException { userWriteLock(); try { UserPrivilegeCollectionV2 userPrivilegeCollection = getUserPrivilegeCollectionUnlocked(user); roleReadLock(); try { for (String parentRole : parentRoleName) { long roleId = getRoleIdByNameNoLock(parentRole); // public cannot be revoked! if (roleId == PrivilegeBuiltinConstants.PUBLIC_ROLE_ID) { throw new PrivilegeException("Granting role PUBLIC has no effect. " + "Every user and role has role PUBLIC implicitly granted."); } // temporarily add parent role to user to verify predecessors userPrivilegeCollection.grantRole(roleId); boolean verifyDone = false; try { Set<Long> result = getAllPredecessorRoleIdsUnlocked(userPrivilegeCollection); if (result.size() > Config.privilege_max_total_roles_per_user) { LOG.warn("too many predecessor roles {} for user {}", result, user); throw new PrivilegeException(String.format( "%s has total %d predecessor roles > %d!", user, result.size(), Config.privilege_max_total_roles_per_user)); } verifyDone = true; } finally { if (!verifyDone) { userPrivilegeCollection.revokeRole(roleId); } } } } finally { roleReadUnlock(); } globalStateMgr.getEditLog().logUpdateUserPrivilege( user, userPrivilegeCollection, provider.getPluginId(), provider.getPluginVersion()); invalidateUserInCache(user); LOG.info("grant role {} to user {}", Joiner.on(", ").join(parentRoleName), user); } finally { userWriteUnlock(); } }
@Test public void testGrantRoleToUser() throws Exception { CreateUserStmt createUserStmt = (CreateUserStmt) UtFrameUtils.parseStmtWithNewParser( "create user test_role_user", ctx); ctx.getGlobalStateMgr().getAuthenticationMgr().createUser(createUserStmt); UserIdentity testUser = createUserStmt.getUserIdentity(); AuthorizationMgr manager = ctx.getGlobalStateMgr().getAuthorizationMgr(); setCurrentUserAndRoles(ctx, UserIdentity.ROOT); // grant create table on database db to role1 DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "create role role1;", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant create table on database db to role role1;", ctx), ctx); // can't create table assertDbActionsOnTest(false, false, testUser); // grant role1 to test_user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant role1 to test_role_user", ctx), ctx); // can create table but can't drop assertDbActionsOnTest(true, false, testUser); // grant role2 to test_user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "create role role2;", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant drop on database db to role role2;", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant role2 to test_role_user;", ctx), ctx); // can create table & drop assertDbActionsOnTest(true, true, testUser); // grant drop to test_user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant drop on database db to test_role_user;", ctx), ctx); // still, can create table & drop assertDbActionsOnTest(true, true, testUser); // revoke role1 from test_user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke role1 from test_role_user;", ctx), ctx); // can drop but can't create table assertDbActionsOnTest(false, true, testUser); // grant role1 to test_user; revoke create table from role1 DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant role1 to test_role_user", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke create table on database db from role role1", ctx), ctx); // can drop but can't create table assertDbActionsOnTest(false, true, testUser); // revoke empty role role1 DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke role1 from test_role_user;", ctx), ctx); // can drop but can't create table assertDbActionsOnTest(false, true, testUser); // revoke role2 from test_user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke role2 from test_role_user;", ctx), ctx); // can drop assertDbActionsOnTest(false, true, testUser); // grant role2 to test_user; revoke drop from role2 DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant role2 to test_role_user", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke drop on database db from role role2", ctx), ctx); // can drop assertDbActionsOnTest(false, true, testUser); // grant drop on role2; revoke drop from user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant drop on database db to role role2", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke drop on database db from test_role_user", ctx), ctx); assertDbActionsOnTest(false, true, testUser); // revoke role2 from test_user DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke role2 from test_role_user;", ctx), ctx); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "grant role1, role2 to test_role_user;", ctx), ctx); List<String> expected = Arrays.asList("role1", "role2"); expected.sort(null); List<String> result = manager.getRoleNamesByUser( UserIdentity.createAnalyzedUserIdentWithIp("test_role_user", "%")); result.sort(null); Assert.assertEquals(expected, result); DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser( "revoke role1, role2 from test_role_user;", ctx), ctx); Assert.assertEquals("[]", manager.getRoleNamesByUser( UserIdentity.createAnalyzedUserIdentWithIp("test_role_user", "%")).toString()); // can't drop assertDbActionsOnTest(false, false, testUser); }
@Override @TpsControl(pointName = "ConfigQuery") @Secured(action = ActionTypes.READ, signType = SignType.CONFIG) @ExtractorManager.Extractor(rpcExtractor = ConfigRequestParamExtractor.class) public ConfigQueryResponse handle(ConfigQueryRequest request, RequestMeta meta) throws NacosException { try { return getContext(request, meta, request.isNotify()); } catch (Exception e) { return ConfigQueryResponse.buildFailResponse(ResponseCode.FAIL.getCode(), e.getMessage()); } }
@Test void testGetConfigNotExistAndConflict() throws Exception { String dataId = "dataId" + System.currentTimeMillis(); String group = "group" + System.currentTimeMillis(); String tenant = "tenant" + System.currentTimeMillis(); //test config not exist configCacheServiceMockedStatic.when(() -> ConfigCacheService.tryConfigReadLock(GroupKey2.getKey(dataId, group, tenant))) .thenReturn(0); final String groupKey = GroupKey2.getKey(dataId, group, tenant); when(ConfigCacheService.getContentCache(eq(groupKey))).thenReturn(null); ConfigQueryRequest configQueryRequest = new ConfigQueryRequest(); configQueryRequest.setDataId(dataId); configQueryRequest.setGroup(group); configQueryRequest.setTenant(tenant); RequestMeta requestMeta = new RequestMeta(); requestMeta.setClientIp("127.0.0.1"); ConfigQueryResponse response = configQueryRequestHandler.handle(configQueryRequest, requestMeta); assertEquals(CONFIG_NOT_FOUND, response.getErrorCode()); assertNull(response.getContent()); assertNull(response.getMd5()); assertFalse(response.isBeta()); assertNull(response.getTag()); //test config conflict when(ConfigCacheService.getContentCache(eq(groupKey))).thenReturn(new CacheItem(groupKey)); configCacheServiceMockedStatic.when(() -> ConfigCacheService.tryConfigReadLock(GroupKey2.getKey(dataId, group, tenant))) .thenReturn(-1); ConfigQueryResponse responseConflict = configQueryRequestHandler.handle(configQueryRequest, requestMeta); assertEquals(ConfigQueryResponse.CONFIG_QUERY_CONFLICT, responseConflict.getErrorCode()); assertNull(responseConflict.getContent()); assertNull(responseConflict.getMd5()); assertFalse(responseConflict.isBeta()); assertNull(responseConflict.getTag()); }
public HttpRequest body(String body) { return this.body(body, null); }
@Test @Disabled public void bodyTest() { final String ddddd1 = HttpRequest.get("https://baijiahao.baidu.com/s").body("id=1625528941695652600").execute().body(); Console.log(ddddd1); }
@Override public void execute(Context context) { editionProvider.get().ifPresent(edition -> { if (!edition.equals(EditionProvider.Edition.COMMUNITY)) { return; } Map<String, Integer> filesPerLanguage = reportReader.readMetadata().getNotAnalyzedFilesByLanguageMap() .entrySet() .stream() .filter(entry -> entry.getValue() > 0) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); if (filesPerLanguage.isEmpty()) { return; } ceTaskMessages.add(constructMessage(filesPerLanguage)); computeMeasures(filesPerLanguage); }); }
@Test public void add_warning_and_measures_in_SQ_community_edition_if_there_are_c_or_cpp_files() { when(editionProvider.get()).thenReturn(Optional.of(EditionProvider.Edition.COMMUNITY)); ScannerReport.AnalysisWarning warning1 = ScannerReport.AnalysisWarning.newBuilder().setText("warning 1").build(); ScannerReport.AnalysisWarning warning2 = ScannerReport.AnalysisWarning.newBuilder().setText("warning 2").build(); ImmutableList<ScannerReport.AnalysisWarning> warnings = of(warning1, warning2); reportReader.setAnalysisWarnings(warnings); reportReader.setMetadata(ScannerReport.Metadata.newBuilder() .putNotAnalyzedFilesByLanguage("C++", 20) .putNotAnalyzedFilesByLanguage("C", 10) .putNotAnalyzedFilesByLanguage("SomeLang", 1000) .build()); underTest.execute(new TestComputationStepContext()); verify(ceTaskMessages, times(1)).add(argumentCaptor.capture()); assertThat(argumentCaptor.getAllValues()) .extracting(Message::getText, Message::getType) .containsExactly(tuple( "10 unanalyzed C, 20 unanalyzed C++ and 1000 unanalyzed SomeLang files were detected in this project during the last analysis. C," + " C++ and SomeLang cannot be analyzed with your current SonarQube edition. Please consider" + " <a target=\"_blank\" href=\"https://www.sonarsource.com/plans-and-pricing/developer/?referrer=sonarqube-cpp\">upgrading to Developer Edition</a> to find Bugs," + " Code Smells, Vulnerabilities and Security Hotspots in these files.", MessageType.SUGGEST_DEVELOPER_EDITION_UPGRADE)); assertThat(measureRepository.getAddedRawMeasure(PROJECT_REF, UNANALYZED_C_KEY).get().getIntValue()).isEqualTo(10); assertThat(measureRepository.getAddedRawMeasure(PROJECT_REF, UNANALYZED_CPP_KEY).get().getIntValue()).isEqualTo(20); }
public static byte[] nullToEmpty(final byte[] bytes) { return bytes == null ? EMPTY_BYTES : bytes; }
@Test public void testNullToEmpty() { Assert.assertArrayEquals(new byte[] {}, BytesUtil.nullToEmpty(null)); Assert.assertArrayEquals(new byte[] { 1, 2 }, BytesUtil.nullToEmpty(new byte[] { 1, 2 })); }
public List<InterpreterResultMessage> message() { return msg; }
@Test void testComplexMagicType() { InterpreterResult result = null; result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "some text before %table col1\tcol2\naaa\t123\n"); assertEquals(InterpreterResult.Type.TEXT, result.message().get(0).getType(), "some text before magic return magic"); result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "some text before\n%table col1\tcol2\naaa\t123\n"); assertEquals(InterpreterResult.Type.TEXT, result.message().get(0).getType(), "some text before magic return magic"); assertEquals(InterpreterResult.Type.TABLE, result.message().get(1).getType(), "some text before magic return magic"); result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "%html <h3> This is a hack </h3> %table\n col1\tcol2\naaa\t123\n"); assertEquals(InterpreterResult.Type.HTML, result.message().get(0).getType(), "magic A before magic B return magic A"); result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "some text before magic word %table col1\tcol2\naaa\t123\n %html " + "<h3> This is a hack </h3>"); assertEquals(InterpreterResult.Type.TEXT, result.message().get(0).getType(), "text & magic A before magic B return magic A"); result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "%table col1\tcol2\naaa\t123\n %html <h3> This is a hack </h3> %table col1\naaa\n123\n"); assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType(), "magic A, magic B, magic a' return magic A"); }
@Udf public int instr(final String str, final String substring) { return instr(str, substring, 1); }
@Test public void shouldReturnZeroWhenSubstringNotFound() { assertThat(udf.instr("CORPORATE FLOOR", "ABC"), is(0)); }
@Override public Set<Path> getPaths(ElementId src, ElementId dst) { return super.getPaths(src, dst, (LinkWeigher) null); }
@Test(expected = NullPointerException.class) public void testGetPathsWithNullDest() { VirtualNetwork vnet = setupVnet(); PathService pathService = manager.get(vnet.id(), PathService.class); pathService.getPaths(DID1, null); }
public static int CCITT_Kermit(@NonNull final byte[] data, final int offset, final int length) { return CRC(0x1021, 0x0000, data, offset, length, true, true, 0x0000); }
@Test public void CCITT_Kermit_empty() { final byte[] data = new byte[0]; assertEquals(0x0000, CRC16.CCITT_Kermit(data, 0, 1)); }