focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testIgnoreStepFailure() { fail = true; run("ignore-step-failure.feature"); Report report = SuiteReports.DEFAULT.featureReport(fr.suite, fr.result); report.render("target/report-test"); // error log will should have logs on all failures }
public URL getInterNodeListener( final Function<URL, Integer> portResolver ) { return getInterNodeListener(portResolver, LOGGER); }
@Test public void shouldResolveInterNodeListenerToInternalListenerWithAutoPortAssignment() { // Given: final URL autoPort = url("https://example.com:0"); when(portResolver.apply(any())).thenReturn(2222); final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") .put(INTERNAL_LISTENER_CONFIG, autoPort.toString()) .build() ); // When: final URL actual = config.getInterNodeListener(portResolver, logger); // Then: final URL expected = url("https://example.com:2222"); assertThat(actual, is(expected)); verifyLogsInterNodeListener(expected, QUOTED_INTERNAL_LISTENER_CONFIG); verifyNoMoreInteractions(logger); }
public void assignActive(final TaskId task) { assertNotAssigned(task); assignedActiveTasks.taskIds().add(task); }
@Test public void shouldRefuseDoubleActiveTask() { final ClientState clientState = new ClientState(1); clientState.assignActive(TASK_0_0); assertThrows(IllegalArgumentException.class, () -> clientState.assignActive(TASK_0_0)); }
public static void onlyOneIsTrue(final String message, final boolean... expressions) { if (!onlyOneIsTrueNonThrow(expressions)) { throw new IllegalArgumentException(message); } }
@Test public void testOnlyOneIsTrueThrow1() { Assertions.assertThrows(IllegalArgumentException.class, () -> Utils.onlyOneIsTrue("foo", false, false)); }
public static <T> SerializedValue<T> fromBytes(byte[] serializedData) { return new SerializedValue<>(serializedData); }
@Test void testFromNullBytes() { assertThatThrownBy(() -> SerializedValue.fromBytes(null)) .isInstanceOf(NullPointerException.class); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void doNotRetryFromPredicateUsingObservable() { RetryConfig config = RetryConfig.custom() .retryOnException(t -> t instanceof IOException) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .assertError(HelloWorldException.class) .assertNotComplete(); then(helloWorldService).should().returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(1); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero(); }
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) { SourceConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getTopicName())) { mergedConfig.setTopicName(newConfig.getTopicName()); } if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) { mergedConfig.setSerdeClassName(newConfig.getSerdeClassName()); } if (!StringUtils.isEmpty(newConfig.getSchemaType())) { mergedConfig.setSchemaType(newConfig.getSchemaType()); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (isBatchSource(existingConfig) != isBatchSource(newConfig)) { throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource"); } if (newConfig.getBatchSourceConfig() != null) { validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig()); mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test public void testMergeDifferentClassName() { SourceConfig sourceConfig = createSourceConfig(); SourceConfig newSourceConfig = createUpdatedSourceConfig("className", "Different"); SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig); assertEquals( mergedConfig.getClassName(), "Different" ); mergedConfig.setClassName(sourceConfig.getClassName()); assertEquals( new Gson().toJson(sourceConfig), new Gson().toJson(mergedConfig) ); }
public static String evaluate(final co.elastic.logstash.api.Event event, final String template) throws JsonProcessingException { if (event instanceof Event) { return evaluate((Event) event, template); } else { throw new IllegalStateException("Unknown event concrete class: " + event.getClass().getName()); } }
@Test public void TestMixDateAndFieldsJavaSyntax() throws IOException { Event event = getTestEvent(); String path = "/full/%{{YYYY-DDD}}/weeee/%{bar}"; assertEquals("/full/2015-274/weeee/foo", StringInterpolation.evaluate(event, path)); }
@Override public void upgrade() { Map<String, Set<String>> encryptedFieldsByInputType = getEncryptedFieldsByInputType(); if (getMigratedField().equals(encryptedFieldsByInputType)) { LOG.debug("Migration already completed."); return; } final MongoCollection<Document> collection = getCollection(); final FindIterable<Document> documents = collection.find(in(FIELD_TYPE, encryptedFieldsByInputType.keySet())); documents.forEach(doc -> { @SuppressWarnings("unchecked") final Map<String, Object> config = new HashMap<>((Map<String, Object>) doc.getOrDefault(FIELD_CONFIGURATION, Map.of())); final Set<String> encryptedFields = encryptedFieldsByInputType.getOrDefault((String) doc.get(FIELD_TYPE), Set.of()); encryptedFields.forEach(fieldName -> { final Object value = config.get(fieldName); // Assume that in case of a Map, the value is already encrypted and doesn't need conversion. if (config.containsKey(fieldName) && !(value instanceof Map)) { final EncryptedValue encryptedValue = objectMapper.convertValue(value, EncryptedValue.class); config.put(fieldName, dbObjectMapper.convertValue(encryptedValue, TypeReferences.MAP_STRING_OBJECT)); } }); collection.updateOne(eq(FIELD_ID, doc.getObjectId(FIELD_ID)), Updates.set(FIELD_CONFIGURATION, config)); }); saveMigrationCompleted(encryptedFieldsByInputType); }
@SuppressWarnings("unchecked") @Test public void migrateEmptySecret() { migration.upgrade(); final Document migrated = collection.find(Filters.eq(FIELD_TITLE, "empty-secret")).first(); assertThat(migrated).isNotNull().satisfies(doc -> assertThat((Map<String, Object>) doc.get(FIELD_CONFIGURATION)).satisfies(config -> { final EncryptedValue encryptedValue = dbObjectMapper.convertValue(config.get(ENCRYPTED_FIELD), EncryptedValue.class); assertThat(encryptedValue.isSet()).isFalse(); }) ); }
public Response get(URL url, Request request) throws IOException { return call(HttpMethods.GET, url, request); }
@Test public void testGet_sendCredentialsOverHttp() throws IOException { FailoverHttpClient insecureHttpClient = newHttpClient(true, true); // sendCredentialsOverHttp try (Response response = insecureHttpClient.get(new URL("http://plain.http"), fakeRequest(null))) { // intentionally empty } Assert.assertEquals(1, urlCaptor.getAllValues().size()); Assert.assertEquals( "Basic ZmFrZS11c2VybmFtZTpmYWtlLXNlY3JldA==", httpHeadersCaptor.getValue().getAuthorization()); }
@Override public String toString() { StringBuilder b = new StringBuilder(); if (StringUtils.isNotBlank(protocol)) { b.append(protocol); b.append("://"); } if (StringUtils.isNotBlank(host)) { b.append(host); } if (!isPortDefault() && port != -1) { b.append(':'); b.append(port); } if (StringUtils.isNotBlank(path)) { // If no scheme/host/port, leave the path as is if (b.length() > 0 && !path.startsWith("/")) { b.append('/'); } b.append(encodePath(path)); } if (queryString != null && !queryString.isEmpty()) { b.append(queryString.toString()); } if (fragment != null) { b.append("#"); b.append(encodePath(fragment)); } return b.toString(); }
@Test public void testHttpsProtocolNoPort() { s = "https://www.example.com/blah"; t = "https://www.example.com/blah"; assertEquals(t, new HttpURL(s).toString()); }
@Override public boolean match(Message msg, StreamRule rule) { final boolean inverted = rule.getInverted(); final Object field = msg.getField(rule.getField()); if (field != null) { final String value = field.toString(); return inverted ^ value.contains(rule.getValue()); } else { return inverted; } }
@Test public void testNonExistentField() { msg.addField("someother", "hello foo"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
@Override public Object intercept(final Invocation invocation) throws Throwable { Object[] args = invocation.getArgs(); MappedStatement ms = (MappedStatement) args[0]; Object parameter = args[1]; Executor executor = (Executor) invocation.getTarget(); for (Class<?> superClass = parameter.getClass(); superClass != Object.class; superClass = superClass.getSuperclass()) { Arrays.stream(superClass.getDeclaredFields()) .filter(f -> matchParam(parameter, f)) .forEach(f -> ReflectUtils.setFieldValue(parameter, f.getName(), new Timestamp(System.currentTimeMillis()))); } return executor.update(ms, parameter); }
@Test public void interceptTest() throws SQLException { final OpenGaussSqlUpdateInterceptor openGaussSqlUpdateInterceptor = new OpenGaussSqlUpdateInterceptor(); final Invocation invocation = mock(Invocation.class); Object[] args = new Object[2]; args[0] = mock(MappedStatement.class); args[1] = mock(RuleData.class); final Executor executor = mock(Executor.class); when(invocation.getTarget()).thenReturn(executor); when(invocation.getArgs()).thenReturn(args); when(executor.update(any(), any())).thenReturn(1); Assertions.assertDoesNotThrow(() -> openGaussSqlUpdateInterceptor.intercept(invocation)); }
public static Map<PCollection<?>, ReplacementOutput> singleton( Map<TupleTag<?>, PCollection<?>> original, POutput replacement) { Entry<TupleTag<?>, PCollection<?>> originalElement = Iterables.getOnlyElement(original.entrySet()); Entry<TupleTag<?>, PCollection<?>> replacementElement = Iterables.getOnlyElement(PValues.expandOutput(replacement).entrySet()); return Collections.singletonMap( replacementElement.getValue(), ReplacementOutput.of( TaggedPValue.of(originalElement.getKey(), originalElement.getValue()), TaggedPValue.of(replacementElement.getKey(), replacementElement.getValue()))); }
@Test public void singletonMultipleOriginalsThrows() { thrown.expect(IllegalArgumentException.class); ReplacementOutputs.singleton( ImmutableMap.<TupleTag<?>, PCollection<?>>builder() .putAll(PValues.expandValue(ints)) .putAll(PValues.fullyExpand(moreInts.expand())) .build(), replacementInts); }
public PullRequestCommentProducer(GitHubEndpoint endpoint) throws Exception { super(endpoint); Registry registry = endpoint.getCamelContext().getRegistry(); Object service = registry.lookupByName(GitHubConstants.GITHUB_PULL_REQUEST_SERVICE); if (service != null) { LOG.debug("Using PullRequestService found in registry {}", service.getClass().getCanonicalName()); pullRequestService = (PullRequestService) service; } else { pullRequestService = new PullRequestService(); } initService(pullRequestService); service = registry.lookupByName("githbIssueService"); if (service != null) { issueService = (IssueService) service; } else { issueService = new IssueService(); } initService(issueService); }
@Test public void testPullRequestCommentProducer() throws Exception { PullRequest pullRequest = pullRequestService.addPullRequest("testPullRequestCommentProducer"); latestPullRequestId = pullRequest.getId(); Endpoint commentProducerEndpoint = getMandatoryEndpoint("direct:validPullRequest"); Exchange exchange = commentProducerEndpoint.createExchange(); String commentText = "Pushed this comment at " + new Date(); exchange.getIn().setBody(commentText); template.send(commentProducerEndpoint, exchange); Thread.sleep(1 * 1000); // Verify that the mock pull request service received this comment. List<CommitComment> commitComments = pullRequestService.getComments(null, (int) pullRequest.getId()); assertEquals(1, commitComments.size()); CommitComment commitComment = commitComments.get(0); assertEquals(Long.toString(pullRequest.getId()), commitComment.getCommitId(), "Commit IDs did not match"); assertEquals(commentText, commitComment.getBodyText(), "Comment text did not match"); }
public void writePDF( OutputStream output ) throws IOException { output.write(formatString().getBytes(StandardCharsets.ISO_8859_1)); }
@Test void testWritePDF() { WritePDFTester writePDFTester = new WritePDFTester(); writePDFTester.runTests(); // test a corner case as described in PDFBOX-1778 writePDFTester.runTest(0.000000000000000000000000000000001f); }
public void send(SlackMessage message, String webhookUrl) throws TemporaryEventNotificationException, PermanentEventNotificationException, JsonProcessingException { final Request request = new Request.Builder() .url(webhookUrl) .post(RequestBody.create(MediaType.parse(APPLICATION_JSON), objectMapper.writeValueAsString(message))) .build(); LOG.debug("Posting to webhook url <{}> the payload is <{}>", webhookUrl, ""); try (final Response r = httpClient.newCall(request).execute()) { if (!r.isSuccessful()) { //ideally this should not happen and the user is expected to fill the //right configuration , while setting up a notification throw new PermanentEventNotificationException( "Expected successful HTTP response [2xx] but got [" + r.code() + "]. " + webhookUrl); } } catch (IOException e) { throw new TemporaryEventNotificationException("Unable to send the slack Message. " + e.getMessage()); } }
@Test(expected = TemporaryEventNotificationException.class) public void sendThrowsTempNotifException_whenHttpClientThrowsIOException() throws Exception { final OkHttpClient httpClient = this.httpClient.newBuilder().readTimeout(1, TimeUnit.MILLISECONDS).build(); SlackClient slackClient = new SlackClient(httpClient, objectMapper); slackClient.send(getMessage(), server.url("/").toString()); }
public static ConnectionSpec create(String spec) { return create(spec, spec); }
@Test public void create() { HostName.setHostNameForTestingOnly("host2"); Config config = new Config(List.of(new Config.Server("host1", 10001), new Config.Server("host2", 10002), new Config.Server("host3", 10003))); { ConnectionSpec spec = ConnectionSpec.create(config.servers, Config.Server::hostname, Config.Server::port, false); assertEquals("host1:10001,host2:10002,host3:10003", spec.local()); assertEquals("host1:10001,host2:10002,host3:10003", spec.ensemble()); assertEquals(3, spec.ensembleSize()); } { ConnectionSpec specLocalAffinity = ConnectionSpec.create(config.servers, Config.Server::hostname, Config.Server::port, true); assertEquals("host2:10002", specLocalAffinity.local()); assertEquals("host1:10001,host2:10002,host3:10003", specLocalAffinity.ensemble()); assertEquals(3, specLocalAffinity.ensembleSize()); } { ConnectionSpec specFromString = ConnectionSpec.create("host1:10001", "host1:10001,host2:10002"); assertEquals("host1:10001", specFromString.local()); assertEquals("host1:10001,host2:10002", specFromString.ensemble()); assertEquals(2, specFromString.ensembleSize()); } }
public static String toJavaCode( final String argName, final Class<?> argType, final String lambdaBody ) { return toJavaCode(ImmutableList.of(new Pair<>(argName, argType)), lambdaBody); }
@Test public void shouldGenerateTriFunction() { // Given: final Pair<String, Class<?>> argName1 = new Pair<>("fred", Long.class); final Pair<String, Class<?>> argName2 = new Pair<>("bob", Long.class); final Pair<String, Class<?>> argName3 = new Pair<>("tim", Long.class); final List<Pair<String, Class<?>>> argList = ImmutableList.of(argName1, argName2, argName3); // When: final String javaCode = LambdaUtil.toJavaCode(argList, "fred + bob + tim + 1"); // Then: final Object result = CodeGenTestUtil.cookAndEval(javaCode, TriFunction.class); assertThat(result, is(instanceOf(TriFunction.class))); assertThat(((TriFunction<Object, Object, Object, Object>) result).apply(10L, 15L, 3L), is(29L)); }
@Override public EncodedMessage transform(ActiveMQMessage message) throws Exception { if (message == null) { return null; } long messageFormat = 0; Header header = null; Properties properties = null; Map<Symbol, Object> daMap = null; Map<Symbol, Object> maMap = null; Map<String,Object> apMap = null; Map<Object, Object> footerMap = null; Section body = convertBody(message); if (message.isPersistent()) { if (header == null) { header = new Header(); } header.setDurable(true); } byte priority = message.getPriority(); if (priority != Message.DEFAULT_PRIORITY) { if (header == null) { header = new Header(); } header.setPriority(UnsignedByte.valueOf(priority)); } String type = message.getType(); if (type != null) { if (properties == null) { properties = new Properties(); } properties.setSubject(type); } MessageId messageId = message.getMessageId(); if (messageId != null) { if (properties == null) { properties = new Properties(); } properties.setMessageId(getOriginalMessageId(message)); } ActiveMQDestination destination = message.getDestination(); if (destination != null) { if (properties == null) { properties = new Properties(); } properties.setTo(destination.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_DEST_TYPE_MSG_ANNOTATION, destinationType(destination)); } ActiveMQDestination replyTo = message.getReplyTo(); if (replyTo != null) { if (properties == null) { properties = new Properties(); } properties.setReplyTo(replyTo.getQualifiedName()); if (maMap == null) { maMap = new HashMap<>(); } maMap.put(JMS_REPLY_TO_TYPE_MSG_ANNOTATION, destinationType(replyTo)); } String correlationId = message.getCorrelationId(); if (correlationId != null) { if (properties == null) { properties = new Properties(); } try { properties.setCorrelationId(AMQPMessageIdHelper.INSTANCE.toIdObject(correlationId)); } catch (AmqpProtocolException e) { properties.setCorrelationId(correlationId); } } long expiration = message.getExpiration(); if (expiration != 0) { long ttl = expiration - System.currentTimeMillis(); if (ttl < 0) { ttl = 1; } if (header == null) { header = new Header(); } header.setTtl(new UnsignedInteger((int) ttl)); if (properties == null) { properties = new Properties(); } properties.setAbsoluteExpiryTime(new Date(expiration)); } long timeStamp = message.getTimestamp(); if (timeStamp != 0) { if (properties == null) { properties = new Properties(); } properties.setCreationTime(new Date(timeStamp)); } // JMSX Message Properties int deliveryCount = message.getRedeliveryCounter(); if (deliveryCount > 0) { if (header == null) { header = new Header(); } header.setDeliveryCount(UnsignedInteger.valueOf(deliveryCount)); } String userId = message.getUserID(); if (userId != null) { if (properties == null) { properties = new Properties(); } properties.setUserId(new Binary(userId.getBytes(StandardCharsets.UTF_8))); } String groupId = message.getGroupID(); if (groupId != null) { if (properties == null) { properties = new Properties(); } properties.setGroupId(groupId); } int groupSequence = message.getGroupSequence(); if (groupSequence > 0) { if (properties == null) { properties = new Properties(); } properties.setGroupSequence(UnsignedInteger.valueOf(groupSequence)); } final Map<String, Object> entries; try { entries = message.getProperties(); } catch (IOException e) { throw JMSExceptionSupport.create(e); } for (Map.Entry<String, Object> entry : entries.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); if (key.startsWith(JMS_AMQP_PREFIX)) { if (key.startsWith(NATIVE, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(ORIGINAL_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { // skip transformer appended properties continue; } else if (key.startsWith(MESSAGE_FORMAT, JMS_AMQP_PREFIX_LENGTH)) { messageFormat = (long) TypeConversionSupport.convert(entry.getValue(), Long.class); continue; } else if (key.startsWith(HEADER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } continue; } else if (key.startsWith(PROPERTIES, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } continue; } else if (key.startsWith(MESSAGE_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (maMap == null) { maMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_MESSAGE_ANNOTATION_PREFIX.length()); maMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FIRST_ACQUIRER, JMS_AMQP_PREFIX_LENGTH)) { if (header == null) { header = new Header(); } header.setFirstAcquirer((boolean) TypeConversionSupport.convert(value, Boolean.class)); continue; } else if (key.startsWith(CONTENT_TYPE, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentType(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(CONTENT_ENCODING, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setContentEncoding(Symbol.getSymbol((String) TypeConversionSupport.convert(value, String.class))); continue; } else if (key.startsWith(REPLYTO_GROUP_ID, JMS_AMQP_PREFIX_LENGTH)) { if (properties == null) { properties = new Properties(); } properties.setReplyToGroupId((String) TypeConversionSupport.convert(value, String.class)); continue; } else if (key.startsWith(DELIVERY_ANNOTATION_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (daMap == null) { daMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_DELIVERY_ANNOTATION_PREFIX.length()); daMap.put(Symbol.valueOf(name), value); continue; } else if (key.startsWith(FOOTER_PREFIX, JMS_AMQP_PREFIX_LENGTH)) { if (footerMap == null) { footerMap = new HashMap<>(); } String name = key.substring(JMS_AMQP_FOOTER_PREFIX.length()); footerMap.put(Symbol.valueOf(name), value); continue; } } else if (key.startsWith(AMQ_SCHEDULED_MESSAGE_PREFIX )) { // strip off the scheduled message properties continue; } // The property didn't map into any other slot so we store it in the // Application Properties section of the message. if (apMap == null) { apMap = new HashMap<>(); } apMap.put(key, value); int messageType = message.getDataStructureType(); if (messageType == CommandTypes.ACTIVEMQ_MESSAGE) { // Type of command to recognize advisory message Object data = message.getDataStructure(); if(data != null) { apMap.put("ActiveMqDataStructureType", data.getClass().getSimpleName()); } } } final AmqpWritableBuffer buffer = new AmqpWritableBuffer(); encoder.setByteBuffer(buffer); if (header != null) { encoder.writeObject(header); } if (daMap != null) { encoder.writeObject(new DeliveryAnnotations(daMap)); } if (maMap != null) { encoder.writeObject(new MessageAnnotations(maMap)); } if (properties != null) { encoder.writeObject(properties); } if (apMap != null) { encoder.writeObject(new ApplicationProperties(apMap)); } if (body != null) { encoder.writeObject(body); } if (footerMap != null) { encoder.writeObject(new Footer(footerMap)); } return new EncodedMessage(messageFormat, buffer.getArray(), 0, buffer.getArrayLength()); }
@Test public void testConvertTextMessageContentNotStoredCreatesBodyUsingOriginalEncodingWithDataSection() throws Exception { String contentString = "myTextMessageContent"; ActiveMQTextMessage outbound = createTextMessage(contentString); outbound.setShortProperty(JMS_AMQP_ORIGINAL_ENCODING, AMQP_DATA); outbound.onSend(); JMSMappingOutboundTransformer transformer = new JMSMappingOutboundTransformer(); EncodedMessage encoded = transformer.transform(outbound); assertNotNull(encoded); Message amqp = encoded.decode(); assertNotNull(amqp.getBody()); assertTrue(amqp.getBody() instanceof Data); assertTrue(((Data) amqp.getBody()).getValue() instanceof Binary); Binary data = ((Data) amqp.getBody()).getValue(); String contents = new String(data.getArray(), data.getArrayOffset(), data.getLength(), StandardCharsets.UTF_8); assertEquals(contentString, contents); }
Double applyRescaleConstant(double predictionDouble) { return predictionDouble + targetField.getRescaleConstant(); }
@Test void applyRescaleConstant() { TargetField targetField = new TargetField(Collections.emptyList(), null, "string", null, null, null, null, null); KiePMMLTarget kiePMMLTarget = getBuilder(targetField).build(); assertThat(kiePMMLTarget.applyRescaleConstant(6.0)).isCloseTo(6.0, Offset.offset(0.0)); targetField = new TargetField(Collections.emptyList(), null, "string", null, null, null, 2.0, null); kiePMMLTarget = getBuilder(targetField).build(); assertThat(kiePMMLTarget.applyRescaleConstant(6.0)).isCloseTo(8.0, Offset.offset(0.0)); }
public final URI toUri( URI fileSystemUri, String root, Iterable<String> names, boolean directory) { String path = toUriPath(root, names, directory); try { // it should not suck this much to create a new URI that's the same except with a path set =( // need to do it this way for automatic path escaping return new URI( fileSystemUri.getScheme(), fileSystemUri.getUserInfo(), fileSystemUri.getHost(), fileSystemUri.getPort(), path, null, null); } catch (URISyntaxException e) { throw new AssertionError(e); } }
@Test public void testToUri() { URI fileUri = type.toUri(fileSystemUri, "$", ImmutableList.of("foo", "bar"), false); assertThat(fileUri.toString()).isEqualTo("jimfs://foo/$/foo/bar"); assertThat(fileUri.getPath()).isEqualTo("/$/foo/bar"); URI directoryUri = type.toUri(fileSystemUri, "$", ImmutableList.of("foo", "bar"), true); assertThat(directoryUri.toString()).isEqualTo("jimfs://foo/$/foo/bar/"); assertThat(directoryUri.getPath()).isEqualTo("/$/foo/bar/"); URI rootUri = type.toUri(fileSystemUri, "$", ImmutableList.<String>of(), true); assertThat(rootUri.toString()).isEqualTo("jimfs://foo/$/"); assertThat(rootUri.getPath()).isEqualTo("/$/"); }
public Future<Void> reconcile(boolean isOpenShift, ImagePullPolicy imagePullPolicy, List<LocalObjectReference> imagePullSecrets, Clock clock) { return serviceAccount() .compose(i -> entityOperatorRole()) .compose(i -> topicOperatorRole()) .compose(i -> userOperatorRole()) .compose(i -> networkPolicy()) .compose(i -> topicOperatorRoleBindings()) .compose(i -> userOperatorRoleBindings()) .compose(i -> topicOperatorConfigMap()) .compose(i -> userOperatorConfigMap()) .compose(i -> topicOperatorCruiseControlApiSecret()) .compose(i -> deleteOldEntityOperatorSecret()) .compose(i -> topicOperatorSecret(clock)) .compose(i -> userOperatorSecret(clock)) .compose(i -> deployment(isOpenShift, imagePullPolicy, imagePullSecrets)) .compose(i -> waitForDeploymentReadiness()); }
@Test public void reconcileWithUoOnly(VertxTestContext context) { ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); DeploymentOperator mockDepOps = supplier.deploymentOperations; SecretOperator mockSecretOps = supplier.secretOperations; ServiceAccountOperator mockSaOps = supplier.serviceAccountOperations; RoleOperator mockRoleOps = supplier.roleOperations; RoleBindingOperator mockRoleBindingOps = supplier.roleBindingOperations; NetworkPolicyOperator mockNetPolicyOps = supplier.networkPolicyOperator; ConfigMapOperator mockCmOps = supplier.configMapOperations; ArgumentCaptor<ServiceAccount> saCaptor = ArgumentCaptor.forClass(ServiceAccount.class); when(mockSaOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), saCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(KafkaResources.entityUserOperatorSecretName(NAME)))).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> operatorSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorSecretName(NAME)), operatorSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> toSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorSecretName(NAME)), toSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Secret> uoSecretCaptor = ArgumentCaptor.forClass(Secret.class); when(mockSecretOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorSecretName(NAME)), uoSecretCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Role> operatorRoleCaptor = ArgumentCaptor.forClass(Role.class); when(mockRoleOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), operatorRoleCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<RoleBinding> toRoleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); when(mockRoleBindingOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorRoleBinding(NAME)), toRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<RoleBinding> uoRoleBindingCaptor = ArgumentCaptor.forClass(RoleBinding.class); when(mockRoleBindingOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorRoleBinding(NAME)), uoRoleBindingCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<NetworkPolicy> netPolicyCaptor = ArgumentCaptor.forClass(NetworkPolicy.class); when(mockNetPolicyOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), netPolicyCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<ConfigMap> toCmCaptor = ArgumentCaptor.forClass(ConfigMap.class); when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityTopicOperatorLoggingConfigMapName(NAME)), toCmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<ConfigMap> uoCmCaptor = ArgumentCaptor.forClass(ConfigMap.class); when(mockCmOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityUserOperatorLoggingConfigMapName(NAME)), uoCmCaptor.capture())).thenReturn(Future.succeededFuture()); ArgumentCaptor<Deployment> depCaptor = ArgumentCaptor.forClass(Deployment.class); when(mockDepOps.reconcile(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), depCaptor.capture())).thenReturn(Future.succeededFuture()); when(mockDepOps.waitForObserved(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); when(mockDepOps.readiness(any(), eq(NAMESPACE), eq(KafkaResources.entityOperatorDeploymentName(NAME)), anyLong(), anyLong())).thenReturn(Future.succeededFuture()); Kafka kafka = new KafkaBuilder(KAFKA) .editSpec() .withNewEntityOperator() .withNewUserOperator() .endUserOperator() .endEntityOperator() .endSpec() .build(); EntityOperatorReconciler rcnclr = new EntityOperatorReconciler( Reconciliation.DUMMY_RECONCILIATION, ResourceUtils.dummyClusterOperatorConfig(), supplier, kafka, CLUSTER_CA ); Checkpoint async = context.checkpoint(); rcnclr.reconcile(false, null, null, Clock.systemUTC()) .onComplete(context.succeeding(v -> context.verify(() -> { assertThat(saCaptor.getAllValues().size(), is(1)); assertThat(saCaptor.getValue(), is(notNullValue())); assertThat(operatorSecretCaptor.getAllValues().size(), is(1)); assertThat(operatorSecretCaptor.getAllValues().get(0), is(nullValue())); assertThat(toSecretCaptor.getAllValues().size(), is(1)); assertThat(toSecretCaptor.getAllValues().get(0), is(nullValue())); assertThat(uoSecretCaptor.getAllValues().size(), is(1)); assertThat(uoSecretCaptor.getAllValues().get(0), is(notNullValue())); assertThat(netPolicyCaptor.getAllValues().size(), is(1)); assertThat(netPolicyCaptor.getValue(), is(notNullValue())); assertThat(operatorRoleCaptor.getAllValues().size(), is(1)); assertThat(operatorRoleCaptor.getValue(), is(notNullValue())); assertThat(toRoleBindingCaptor.getAllValues().size(), is(1)); assertThat(toRoleBindingCaptor.getAllValues().get(0), is(nullValue())); assertThat(uoRoleBindingCaptor.getAllValues().size(), is(1)); assertThat(uoRoleBindingCaptor.getAllValues().get(0), is(notNullValue())); assertThat(toCmCaptor.getAllValues().size(), is(1)); assertThat(toCmCaptor.getValue(), is(nullValue())); assertThat(uoCmCaptor.getAllValues().size(), is(1)); assertThat(uoCmCaptor.getValue(), is(notNullValue())); assertThat(depCaptor.getAllValues().size(), is(1)); assertThat(depCaptor.getValue(), is(notNullValue())); assertThat(depCaptor.getValue().getSpec().getTemplate().getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_CERT_GENERATION), is("0")); assertThat(depCaptor.getValue().getSpec().getTemplate().getMetadata().getAnnotations().get(Ca.ANNO_STRIMZI_IO_CLUSTER_CA_KEY_GENERATION), is("0")); assertThat(depCaptor.getValue().getSpec().getTemplate().getMetadata().getAnnotations().get(Annotations.ANNO_STRIMZI_SERVER_CERT_HASH), is("4d715cdd")); async.flag(); }))); }
public static boolean isPrimitive(Class<?> cls) { return cls.isPrimitive() || cls == String.class || cls == Boolean.class || cls == Character.class || Number.class.isAssignableFrom(cls) || Date.class.isAssignableFrom(cls); }
@Test void testIsPrimitive() { assertTrue(ReflectUtils.isPrimitive(boolean.class)); assertTrue(ReflectUtils.isPrimitive(String.class)); assertTrue(ReflectUtils.isPrimitive(Boolean.class)); assertTrue(ReflectUtils.isPrimitive(Character.class)); assertTrue(ReflectUtils.isPrimitive(Number.class)); assertTrue(ReflectUtils.isPrimitive(Date.class)); assertFalse(ReflectUtils.isPrimitive(Map.class)); }
public BlockLease flatten(Block block) { requireNonNull(block, "block is null"); if (block instanceof DictionaryBlock) { return flattenDictionaryBlock((DictionaryBlock) block); } if (block instanceof RunLengthEncodedBlock) { return flattenRunLengthEncodedBlock((RunLengthEncodedBlock) block); } return newLease(block); }
@Test public void testShortArrayIdentityDecode() { Block block = createShortArrayBlock(1, 2, 3, 4); try (BlockLease blockLease = flattener.flatten(block)) { Block flattenedBlock = blockLease.get(); assertSame(flattenedBlock, block); } }
@Override public void getConfig(ZookeeperServerConfig.Builder builder) { ConfigServer[] configServers = getConfigServers(); int[] zookeeperIds = getConfigServerZookeeperIds(); if (configServers.length != zookeeperIds.length) { throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " + "same as number of provided config server zookeeper ids (%d)", configServers.length, zookeeperIds.length)); } String myhostname = HostName.getLocalhost(); // TODO: Server index should be in interval [1, 254] according to doc, // however, we cannot change this id for an existing server for (int i = 0; i < configServers.length; i++) { if (zookeeperIds[i] < 0) { throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s", zookeeperIds[i], configServers[i].hostName)); } if (configServers[i].hostName.equals(myhostname)) { builder.myid(zookeeperIds[i]); } builder.server(getZkServer(configServers[i], zookeeperIds[i])); } if (options.zookeeperClientPort().isPresent()) { builder.clientPort(options.zookeeperClientPort().get()); } if (options.hostedVespa().orElse(false)) { builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json")); } boolean isHostedVespa = options.hostedVespa().orElse(false); builder.dynamicReconfiguration(isHostedVespa); builder.reconfigureEnsemble(!isHostedVespa); builder.snapshotMethod(options.zooKeeperSnapshotMethod()); builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer()); }
@Test void model_evaluation_bundles_are_not_installed_via_config() { // These bundles must be pre-installed because they are used by config-model. PlatformBundlesConfig config = getConfig(PlatformBundlesConfig.class); assertFalse(config.bundlePaths().contains(ContainerModelEvaluation.MODEL_INTEGRATION_BUNDLE_FILE.toString())); assertFalse(config.bundlePaths().contains(ContainerModelEvaluation.MODEL_EVALUATION_BUNDLE_FILE.toString())); }
@Override protected Endpoint createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { if (remaining.split("/").length > 1) { throw new IllegalArgumentException("Invalid URI: " + URISupport.sanitizeUri(uri)); } SplunkHECEndpoint answer = new SplunkHECEndpoint(uri, this, new SplunkHECConfiguration()); setProperties(answer, parameters); answer.setSplunkURL(remaining); return answer; }
@Test public void testValidWithOptions() throws Exception { SplunkHECEndpoint endpoint = (SplunkHECEndpoint) component.createEndpoint( "splunk-hec:localhost:18808?token=11111111-1111-1111-1111-111111111111&index=foo"); endpoint.init(); assertEquals("localhost:18808", endpoint.getSplunkURL()); assertEquals("11111111-1111-1111-1111-111111111111", endpoint.getConfiguration().getToken()); assertEquals("foo", endpoint.getConfiguration().getIndex()); }
@Restricted(NoExternalUse.class) public static long daysBetween(@NonNull Date a, @NonNull Date b) { LocalDate aLocal = a.toInstant().atZone(ZoneId.systemDefault()).toLocalDate(); LocalDate bLocal = b.toInstant().atZone(ZoneId.systemDefault()).toLocalDate(); return ChronoUnit.DAYS.between(aLocal, bLocal); }
@Test public void testDifferenceDays() throws Exception { Date may_6_10am = parseDate("2018-05-06 10:00:00"); Date may_6_11pm55 = parseDate("2018-05-06 23:55:00"); Date may_7_01am = parseDate("2018-05-07 01:00:00"); Date may_7_11pm = parseDate("2018-05-07 11:00:00"); Date may_8_08am = parseDate("2018-05-08 08:00:00"); Date june_3_08am = parseDate("2018-06-03 08:00:00"); Date june_9_08am = parseDate("2018-06-09 08:00:00"); Date june_9_08am_nextYear = parseDate("2019-06-09 08:00:00"); assertEquals(0, Util.daysBetween(may_6_10am, may_6_11pm55)); assertEquals(1, Util.daysBetween(may_6_10am, may_7_01am)); assertEquals(1, Util.daysBetween(may_6_11pm55, may_7_01am)); assertEquals(2, Util.daysBetween(may_6_10am, may_8_08am)); assertEquals(1, Util.daysBetween(may_7_11pm, may_8_08am)); // larger scale assertEquals(28, Util.daysBetween(may_6_10am, june_3_08am)); assertEquals(34, Util.daysBetween(may_6_10am, june_9_08am)); assertEquals(365 + 34, Util.daysBetween(may_6_10am, june_9_08am_nextYear)); // reverse order assertEquals(-1, Util.daysBetween(may_8_08am, may_7_11pm)); }
public static void delete(final File file, final boolean ignoreFailures) { if (file.exists()) { if (file.isDirectory()) { final File[] files = file.listFiles(); if (null != files) { for (final File f : files) { delete(f, ignoreFailures); } } } if (!file.delete() && !ignoreFailures) { try { Files.delete(file.toPath()); } catch (final IOException ex) { LangUtil.rethrowUnchecked(ex); } } } }
@Test void deleteErrorHandlerShouldCatchExceptionIfDeleteOfAFileFails() { final ErrorHandler errorHandler = mock(ErrorHandler.class); final File file = mock(File.class); when(file.exists()).thenReturn(true); when(file.delete()).thenReturn(false); IoUtil.delete(file, errorHandler); verify(errorHandler).onError(isA(NullPointerException.class)); }
@Udf public <T extends Comparable<? super T>> T arrayMax(@UdfParameter( description = "Array of values from which to find the maximum") final List<T> input) { if (input == null) { return null; } T candidate = null; for (T thisVal : input) { if (thisVal != null) { if (candidate == null) { candidate = thisVal; } else if (thisVal.compareTo(candidate) > 0) { candidate = thisVal; } } } return candidate; }
@Test public void shouldFindStringMaxMixedCase() { final List<String> input = Arrays.asList("foo", "Food", "bar"); assertThat(udf.arrayMax(input), is("foo")); }
@Override public boolean shouldSample() { long now = nanoClock.nanoTimeNow(); long period = now / periodLengthInNanos; synchronized (this) { if (period != currentSamplingPeriod) { currentSamplingPeriod = period; samplesInCurrentPeriod = 1; return true; } if (samplesInCurrentPeriod >= maxSamplesPerPeriod) { return false; } ++samplesInCurrentPeriod; return true; } }
@Test void sample_in_new_period_returns_true() { var clock = MockUtils.mockedClockReturning(1000L, 1900L, 2000L, 2900L); var sampler = new MaxSamplesPerPeriod(clock, 1000L, 1L); assertTrue(sampler.shouldSample()); assertFalse(sampler.shouldSample()); assertTrue(sampler.shouldSample()); assertFalse(sampler.shouldSample()); }
public static RowCoder of(Schema schema) { return new RowCoder(schema); }
@Test public void testLogicalTypeInCollection() throws Exception { EnumerationType enumeration = EnumerationType.create("one", "two", "three"); Schema schema = Schema.builder().addArrayField("f_enum_array", FieldType.logicalType(enumeration)).build(); Row row = Row.withSchema(schema) .addArray(enumeration.valueOf("two"), enumeration.valueOf("three")) .build(); CoderProperties.coderDecodeEncodeEqual(RowCoder.of(schema), row); }
@VisibleForTesting static boolean customWorkerResourcesEquality(WorkerResources first, WorkerResources second) { if (first == null) { return false; } if (second == null) { return false; } if (first == second) { return true; } if (first.equals(second)) { return true; } if (first.get_cpu() != second.get_cpu()) { return false; } if (first.get_mem_on_heap() != second.get_mem_on_heap()) { return false; } if (first.get_mem_off_heap() != second.get_mem_off_heap()) { return false; } if (first.get_shared_mem_off_heap() != second.get_shared_mem_off_heap()) { return false; } if (first.get_shared_mem_on_heap() != second.get_shared_mem_on_heap()) { return false; } if (!customResourceMapEquality(first.get_resources(), second.get_resources())) { return false; } if (!customResourceMapEquality(first.get_shared_resources(), second.get_shared_resources())) { return false; } return true; }
@Test public void testWorkerResourceEquality() { WorkerResources resourcesRNull = mkWorkerResources(100.0, 100.0, 100.0, null); WorkerResources resourcesREmpty = mkWorkerResources(100.0, 100.0, 100.0, Maps.newHashMap()); assertTrue(EquivalenceUtils.customWorkerResourcesEquality(resourcesRNull,resourcesREmpty)); Map<String, Double> resources = new HashMap<>(); resources.put("network.resource.units", 0.0); WorkerResources resourcesRNetwork = mkWorkerResources(100.0, 100.0, 100.0,resources); assertTrue(EquivalenceUtils.customWorkerResourcesEquality(resourcesREmpty, resourcesRNetwork)); Map<String, Double> resourcesNetwork = new HashMap<>(); resourcesNetwork.put("network.resource.units", 50.0); WorkerResources resourcesRNetworkNonZero = mkWorkerResources(100.0, 100.0, 100.0,resourcesNetwork); assertFalse(EquivalenceUtils.customWorkerResourcesEquality(resourcesREmpty, resourcesRNetworkNonZero)); Map<String, Double> resourcesNetworkOne = new HashMap<>(); resourcesNetworkOne.put("network.resource.units", 50.0); WorkerResources resourcesRNetworkOne = mkWorkerResources(100.0, 100.0, 100.0,resourcesNetworkOne); assertTrue(EquivalenceUtils.customWorkerResourcesEquality(resourcesRNetworkOne, resourcesRNetworkNonZero)); Map<String, Double> resourcesNetworkTwo = new HashMap<>(); resourcesNetworkTwo.put("network.resource.units", 100.0); WorkerResources resourcesRNetworkTwo = mkWorkerResources(100.0, 100.0, 100.0,resourcesNetworkTwo); assertFalse(EquivalenceUtils.customWorkerResourcesEquality(resourcesRNetworkOne, resourcesRNetworkTwo)); WorkerResources resourcesCpuNull = mkWorkerResources(null, 100.0,100.0); WorkerResources resourcesCPUZero = mkWorkerResources(0.0, 100.0,100.0); assertTrue(EquivalenceUtils.customWorkerResourcesEquality(resourcesCpuNull, resourcesCPUZero)); WorkerResources resourcesOnHeapMemNull = mkWorkerResources(100.0, null,100.0); WorkerResources resourcesOnHeapMemZero = mkWorkerResources(100.0, 0.0,100.0); assertTrue(EquivalenceUtils.customWorkerResourcesEquality(resourcesOnHeapMemNull, resourcesOnHeapMemZero)); WorkerResources resourcesOffHeapMemNull = mkWorkerResources(100.0, 100.0,null); WorkerResources resourcesOffHeapMemZero = mkWorkerResources(100.0, 100.0,0.0); assertTrue(EquivalenceUtils.customWorkerResourcesEquality(resourcesOffHeapMemNull, resourcesOffHeapMemZero)); assertFalse(EquivalenceUtils.customWorkerResourcesEquality(resourcesOffHeapMemNull, null)); }
@VisibleForTesting static SocketAddress pickAddressInternal(ResolverResult chosenServer, @Nullable OriginName originName) { String rawHost; int port; rawHost = chosenServer.getHost(); port = chosenServer.getPort(); InetSocketAddress serverAddr; try { InetAddress ipAddr = InetAddresses.forString(rawHost); serverAddr = new InetSocketAddress(ipAddr, port); } catch (IllegalArgumentException e1) { LOG.warn("NettyClientConnectionFactory got an unresolved address, addr: {}", rawHost); Counter unresolvedDiscoveryHost = SpectatorUtils.newCounter( "unresolvedDiscoveryHost", originName == null ? "unknownOrigin" : originName.getTarget()); unresolvedDiscoveryHost.increment(); try { serverAddr = new InetSocketAddress(rawHost, port); } catch (RuntimeException e2) { e1.addSuppressed(e2); throw e1; } } return serverAddr; }
@Test void pickAddressInternal_discovery_unresolved() { InstanceInfo instanceInfo = Builder.newBuilder() .setAppName("app") .setHostName("localhost") .setPort(443) .build(); DiscoveryResult s = DiscoveryResult.from(instanceInfo, true); SocketAddress addr = DefaultClientChannelManager.pickAddressInternal(s, OriginName.fromVip("vip")); Truth.assertThat(addr).isInstanceOf(InetSocketAddress.class); InetSocketAddress socketAddress = (InetSocketAddress) addr; assertTrue(socketAddress.getAddress().isLoopbackAddress(), socketAddress.toString()); assertEquals(443, socketAddress.getPort()); }
public static boolean checkpointsMatch( Collection<CompletedCheckpoint> first, Collection<CompletedCheckpoint> second) { if (first.size() != second.size()) { return false; } List<Tuple2<Long, JobID>> firstInterestingFields = new ArrayList<>(first.size()); for (CompletedCheckpoint checkpoint : first) { firstInterestingFields.add( new Tuple2<>(checkpoint.getCheckpointID(), checkpoint.getJobId())); } List<Tuple2<Long, JobID>> secondInterestingFields = new ArrayList<>(second.size()); for (CompletedCheckpoint checkpoint : second) { secondInterestingFields.add( new Tuple2<>(checkpoint.getCheckpointID(), checkpoint.getJobId())); } return firstInterestingFields.equals(secondInterestingFields); }
@Test void testCompareCheckpointsWithSameJobID() { JobID jobID = new JobID(); CompletedCheckpoint checkpoint1 = new CompletedCheckpoint( jobID, 0, 0, 1, new HashMap<>(), Collections.emptyList(), CheckpointProperties.forCheckpoint( CheckpointRetentionPolicy.RETAIN_ON_FAILURE), new TestCompletedCheckpointStorageLocation(), null); CompletedCheckpoint checkpoint2 = new CompletedCheckpoint( jobID, 1, 0, 1, new HashMap<>(), Collections.emptyList(), CheckpointProperties.forCheckpoint( CheckpointRetentionPolicy.RETAIN_ON_FAILURE), new TestCompletedCheckpointStorageLocation(), null); List<CompletedCheckpoint> checkpoints1 = new ArrayList<>(); checkpoints1.add(checkpoint1); List<CompletedCheckpoint> checkpoints2 = new ArrayList<>(); checkpoints2.add(checkpoint2); assertThat(CompletedCheckpoint.checkpointsMatch(checkpoints1, checkpoints2)).isFalse(); }
public static String getViewContent(View view) { return getViewContent(view, false); }
@Test public void getViewContent() { TextView textView1 = new TextView(mApplication); textView1.setText("child1"); Assert.assertEquals("child1", SAViewUtils.getViewContent(textView1)); }
public Object extract(Object target, String attributeName, Object metadata) { return extract(target, attributeName, metadata, true); }
@Test public void when_extractByReflection_then_correctValue() { // WHEN Object power = createExtractors(null).extract(bond, "car.power", null); // THEN assertThat((Integer) power).isEqualTo(550); }
public static long usedMemory(final Runtime runtime) { long totalBegin; long totalEnd; long used; do { totalBegin = runtime.totalMemory(); used = totalBegin - runtime.freeMemory(); totalEnd = runtime.totalMemory(); } while (totalBegin != totalEnd); return used; }
@Test public void testUsedMemory() { Assert.assertTrue(JVMUtil.usedMemory(Runtime.getRuntime()) > 0); }
void reindex() throws ReindexingLockException { if (phaser.isTerminated()) throw new IllegalStateException("Already shut down"); // Keep metrics in sync across cluster controller containers. AtomicReference<Reindexing> reindexing = new AtomicReference<>(database.readReindexing(cluster.name())); metrics.dump(reindexing.get()); try (Lock lock = database.lockReindexing(cluster.name())) { reindexing.set(updateWithReady(ready, database.readReindexing(cluster.name()), clock.instant())); database.writeReindexing(reindexing.get(), cluster.name()); metrics.dump(reindexing.get()); // We consider only document types for which we have config. for (Trigger trigger : ready) { if (trigger.readyAt().isAfter(clock.instant())) log.log(INFO, "Received config for reindexing which is ready in the future — will process later " + "(" + trigger.readyAt() + " is after " + clock.instant() + ")"); else if (trigger.speed() > 0) progress(trigger.type(), trigger.speed(), reindexing, new AtomicReference<>(reindexing.get().status().get(trigger.type()))); if (phaser.isTerminated()) break; } } }
@Test @Timeout(10) void nothingToDoWithEmptyConfig() throws ReindexingLockException { new Reindexer(cluster, triggers(), database, ReindexerTest::failIfCalled, metric, clock).reindex(); assertEquals(Map.of(), metric.metrics()); }
public String decode(byte[] val) { return codecs[0].decode(val, 0, val.length); }
@Test public void testDecodeJapanesePersonNameASCII() { assertEquals(JAPANESE_PERSON_NAME_ASCII, jisX0208().decode(JAPANESE_PERSON_NAME_ASCII_BYTES)); }
public PGReplicationStream createReplicationStream(final Connection connection, final String slotName, final BaseLogSequenceNumber startPosition) throws SQLException { return connection.unwrap(PGConnection.class).getReplicationAPI() .replicationStream() .logical() .withStartPosition((LogSequenceNumber) startPosition.get()) .withSlotName(slotName) .withSlotOption("include-xids", true) .withSlotOption("skip-empty-xacts", true) .start(); }
@Test void assertCreateReplicationStreamSuccess() throws SQLException { LogSequenceNumber startPosition = LogSequenceNumber.valueOf(100L); when(connection.unwrap(PGConnection.class)).thenReturn(connection); when(connection.getReplicationAPI()).thenReturn(pgReplicationConnection); when(pgReplicationConnection.replicationStream()).thenReturn(chainedStreamBuilder); when(chainedStreamBuilder.logical()).thenReturn(chainedLogicalStreamBuilder); when(chainedLogicalStreamBuilder.withStartPosition(startPosition)).thenReturn(chainedLogicalStreamBuilder); when(chainedLogicalStreamBuilder.withSlotName("")).thenReturn(chainedLogicalStreamBuilder); when(chainedLogicalStreamBuilder.withSlotOption(anyString(), eq(true))).thenReturn(chainedLogicalStreamBuilder, chainedLogicalStreamBuilder); BaseLogSequenceNumber basePosition = new PostgreSQLLogSequenceNumber(startPosition); logicalReplication.createReplicationStream(connection, "", basePosition); verify(chainedLogicalStreamBuilder).start(); }
@Override public double getValue(double quantile) { if (quantile < 0.0 || quantile > 1.0 || Double.isNaN( quantile )) { throw new IllegalArgumentException(quantile + " is not in [0..1]"); } if (values.length == 0) { return 0.0; } int posx = Arrays.binarySearch(quantiles, quantile); if (posx < 0) posx = ((-posx) - 1) - 1; if (posx < 1) { return values[0]; } if (posx >= values.length) { return values[values.length - 1]; } return values[posx]; }
@Test(expected = IllegalArgumentException.class) public void disallowsNotANumberQuantile() { snapshot.getValue( Double.NaN ); }
@Override public void updateBrand(ProductBrandUpdateReqVO updateReqVO) { // 校验存在 validateBrandExists(updateReqVO.getId()); validateBrandNameUnique(updateReqVO.getId(), updateReqVO.getName()); // 更新 ProductBrandDO updateObj = ProductBrandConvert.INSTANCE.convert(updateReqVO); brandMapper.updateById(updateObj); }
@Test public void testUpdateBrand_success() { // mock 数据 ProductBrandDO dbBrand = randomPojo(ProductBrandDO.class); brandMapper.insert(dbBrand);// @Sql: 先插入出一条存在的数据 // 准备参数 ProductBrandUpdateReqVO reqVO = randomPojo(ProductBrandUpdateReqVO.class, o -> { o.setId(dbBrand.getId()); // 设置更新的 ID }); // 调用 brandService.updateBrand(reqVO); // 校验是否更新正确 ProductBrandDO brand = brandMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, brand); }
public final boolean isInstanceExternallyManaged() { return delegates.stream().anyMatch(ManagedInstanceService::isInstanceExternallyManaged); }
@Test public void isInstanceExternallyManaged_whenNoManagedInstanceService_returnsFalse() { assertThat(NO_MANAGED_SERVICES.isInstanceExternallyManaged()).isFalse(); }
public boolean isSuppressed(Device device) { if (suppressedDeviceType.contains(device.type())) { return true; } final Annotations annotations = device.annotations(); if (containsSuppressionAnnotation(annotations)) { return true; } return false; }
@Test public void testSuppressedDeviceAnnotationExact() { Annotations annotation = DefaultAnnotations.builder() .set("sendLLDP", "false") .build(); Device device = new DefaultDevice(PID, NON_SUPPRESSED_DID, Device.Type.SWITCH, MFR, HW, SW1, SN, CID, annotation); assertTrue(rules.isSuppressed(device)); }
@GET @Path("{id}") @Produces(MediaType.APPLICATION_JSON) public Response getDeviceKey(@PathParam("id") String id) { DeviceKey deviceKey = nullIsNotFound(get(DeviceKeyService.class).getDeviceKey(DeviceKeyId.deviceKeyId(id)), DEVICE_KEY_NOT_FOUND); return ok(codec(DeviceKey.class).encode(deviceKey, this)).build(); }
@Test public void testDeleteNonExistentDeviceKey() { expect(mockDeviceKeyService.getDeviceKey(anyObject())) .andReturn(null) .anyTimes(); expectLastCall(); replay(mockDeviceKeyService); replay(mockDeviceKeyAdminService); WebTarget wt = target(); try { wt.path("keys/" + "NON_EXISTENT_DEVICE_KEY").request() .delete(String.class); fail("Delete of a non-existent device key did not throw an exception"); } catch (NotFoundException ex) { assertThat(ex.getMessage(), containsString("HTTP 404 Not Found")); } verify(mockDeviceKeyService); verify(mockDeviceKeyAdminService); }
public static Combine.CombineFn<Boolean, ?, Long> combineFn() { return new CountIfFn(); }
@Test public void testExtractsOutput() { Combine.CombineFn countIfFn = CountIf.combineFn(); assertEquals(0L, countIfFn.extractOutput(countIfFn.createAccumulator())); }
public List<TBrokerFileStatus> listPath(String path, boolean fileNameOnly, Map<String, String> properties) { List<TBrokerFileStatus> resultFileStatus = null; WildcardURI pathUri = new WildcardURI(path); BrokerFileSystem fileSystem = getFileSystem(path, properties); Path pathPattern = new Path(pathUri.getPath()); try { FileStatus[] files = fileSystem.getDFSFileSystem().globStatus(pathPattern); if (files == null) { resultFileStatus = new ArrayList<>(0); return resultFileStatus; } resultFileStatus = new ArrayList<>(files.length); for (FileStatus fileStatus : files) { TBrokerFileStatus brokerFileStatus = new TBrokerFileStatus(); brokerFileStatus.setIsDir(fileStatus.isDirectory()); if (fileStatus.isDirectory()) { brokerFileStatus.setIsSplitable(false); brokerFileStatus.setSize(-1); } else { brokerFileStatus.setSize(fileStatus.getLen()); brokerFileStatus.setIsSplitable(true); } if (fileNameOnly) { // return like this: file.txt brokerFileStatus.setPath(fileStatus.getPath().getName()); } else { // return like this: //path/to/your/file.txt brokerFileStatus.setPath(fileStatus.getPath().toString()); } resultFileStatus.add(brokerFileStatus); } } catch (FileNotFoundException e) { logger.info("file not found: " + e.getMessage()); throw new BrokerException(TBrokerOperationStatusCode.FILE_NOT_FOUND, e, "file not found"); } catch (IllegalArgumentException e) { logger.error("The arguments of blob store(S3/Azure) may be wrong. You can check " + "the arguments like region, IAM, instance profile and so on."); throw new BrokerException(TBrokerOperationStatusCode.TARGET_STORAGE_SERVICE_ERROR, e, "The arguments of blob store(S3/Azure) may be wrong. " + "You can check the arguments like region, IAM, " + "instance profile and so on."); } catch (Exception e) { logger.error("errors while get file status ", e); throw new BrokerException(TBrokerOperationStatusCode.TARGET_STORAGE_SERVICE_ERROR, e, "unknown error when get file status"); } return resultFileStatus; }
@Test public void testListPaths() { Map<String, String> properties = new HashMap<String, String>(); properties.put("username", "user"); properties.put("password", "passwd"); List<TBrokerFileStatus> files2 = fileSystemManager.listPath(testHdfsHost + "/data/abc/logs/*.out", false, properties); assertEquals(files2.size(), 2); }
public static <C> AsyncBuilder<C> builder() { return new AsyncBuilder<>(); }
@Test void nonInterface() { assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(() -> { AsyncFeign.builder().target(NonInterface.class, "http://localhost"); }); }
public static int compare(short x, short y) { boolean xIsNaN = isNaN(x); boolean yIsNaN = isNaN(y); if (!xIsNaN && !yIsNaN) { int first = ((x & SIGN_MASK) != 0 ? 0x8000 - (x & 0xffff) : x & 0xffff); int second = ((y & SIGN_MASK) != 0 ? 0x8000 - (y & 0xffff) : y & 0xffff); // Returns true if the first half-precision float value is less // (smaller toward negative infinity) than the second half-precision float value. if (first < second) { return -1; } // Returns true if the first half-precision float value is greater // (larger toward positive infinity) than the second half-precision float value. if (first > second) { return 1; } } // Collapse NaNs, akin to halfToIntBits(), but we want to keep // (signed) short value types to preserve the ordering of -0.0 // and +0.0 short xBits = xIsNaN ? NaN : x; short yBits = yIsNaN ? NaN : y; return (xBits == yBits ? 0 : (xBits < yBits ? -1 : 1)); }
@Test public void testCompare() { assertEquals(0, Float16.compare(NaN, NaN)); assertEquals(0, Float16.compare(NaN, (short) 0xfc98)); assertEquals(1, Float16.compare(NaN, POSITIVE_INFINITY)); assertEquals(-1, Float16.compare(POSITIVE_INFINITY, NaN)); assertEquals(0, Float16.compare(POSITIVE_INFINITY, POSITIVE_INFINITY)); assertEquals(0, Float16.compare(NEGATIVE_INFINITY, NEGATIVE_INFINITY)); assertEquals(1, Float16.compare(POSITIVE_INFINITY, NEGATIVE_INFINITY)); assertEquals(-1, Float16.compare(NEGATIVE_INFINITY, POSITIVE_INFINITY)); assertEquals(0, Float16.compare(POSITIVE_ZERO, POSITIVE_ZERO)); assertEquals(0, Float16.compare(NEGATIVE_ZERO, NEGATIVE_ZERO)); assertEquals(1, Float16.compare(POSITIVE_ZERO, NEGATIVE_ZERO)); assertEquals(-1, Float16.compare(NEGATIVE_ZERO, POSITIVE_ZERO)); assertEquals(0, Float16.compare(Float16.toFloat16(12.462f), Float16.toFloat16(12.462f))); assertEquals(0, Float16.compare(Float16.toFloat16(-12.462f), Float16.toFloat16(-12.462f))); assertEquals(1, Float16.compare(Float16.toFloat16(12.462f), Float16.toFloat16(-12.462f))); assertEquals(-1, Float16.compare(Float16.toFloat16(-12.462f), Float16.toFloat16(12.462f))); }
public static BlockLocation getCached( long workerId, String tierAlias, String mediumType) { BlockLocation location = BlockLocation .newBuilder() .setWorkerId(workerId) .setTier(tierAlias) .setMediumType(mediumType) .build(); return getCached(location); }
@Test(expected = IllegalStateException.class) public void testInvalidValue() { BlockLocationUtils.getCached(1, "INVALID", "SSD"); }
@Operation(summary = "Create the organisation") @PostMapping(value = "", consumes = "application/json") @ResponseStatus(HttpStatus.CREATED) public Organization create(@RequestBody Organization organization) { organization.getOrganizationRoles().forEach(role -> role.setOrganization(organization)); organizationService.saveOrganization(organization); return organization; }
@Test public void createOrganization() { doNothing().when(organizationServiceMock).saveOrganization(any(Organization.class)); Organization result = controllerMock.create(new Organization()); verify(organizationServiceMock, times(1)).saveOrganization(any(Organization.class)); assertNotNull(result); }
public static boolean isValidOrigin(String sourceHost, ZeppelinConfiguration zConf) throws UnknownHostException, URISyntaxException { String sourceUriHost = ""; if (sourceHost != null && !sourceHost.isEmpty()) { sourceUriHost = new URI(sourceHost).getHost(); sourceUriHost = (sourceUriHost == null) ? "" : sourceUriHost.toLowerCase(); } sourceUriHost = sourceUriHost.toLowerCase(); String currentHost = InetAddress.getLocalHost().getHostName().toLowerCase(); return zConf.getAllowedOrigins().contains("*") || currentHost.equals(sourceUriHost) || "localhost".equals(sourceUriHost) || zConf.getAllowedOrigins().contains(sourceHost); }
@Test void isInvalidFromConfig() throws URISyntaxException, UnknownHostException { assertFalse(CorsUtils.isValidOrigin("http://otherinvalidhost.com", ZeppelinConfiguration.load("zeppelin-site.xml"))); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key); StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next(); final Window wnd = next.key.window(); if (!windowStart.contains(wnd.startTime())) { continue; } if (!windowEnd.contains(wnd.endTime())) { continue; } final long rowTime = wnd.end(); final WindowedRow row = WindowedRow.of( stateStore.schema(), next.key, next.value, rowTime ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValueIfSessionEndsBetweenBounds() { // Given: final Instant wstart = LOWER_INSTANT.minusMillis(5); final Instant wend = UPPER_INSTANT.minusMillis(1); givenSingleSession(wstart, wend); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(A_KEY, PARTITION, Range.all(), WINDOW_END_BOUNDS); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is( WindowedRow.of( SCHEMA, sessionKey(wstart, wend), A_VALUE, wend.toEpochMilli() ) )); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
@Bean public BulkheadRegistry bulkheadRegistry( BulkheadConfigurationProperties bulkheadConfigurationProperties, EventConsumerRegistry<BulkheadEvent> bulkheadEventConsumerRegistry, RegistryEventConsumer<Bulkhead> bulkheadRegistryEventConsumer, @Qualifier("compositeBulkheadCustomizer") CompositeCustomizer<BulkheadConfigCustomizer> compositeBulkheadCustomizer) { BulkheadRegistry bulkheadRegistry = createBulkheadRegistry(bulkheadConfigurationProperties, bulkheadRegistryEventConsumer, compositeBulkheadCustomizer); registerEventConsumer(bulkheadRegistry, bulkheadEventConsumerRegistry, bulkheadConfigurationProperties); initBulkheadRegistry(bulkheadConfigurationProperties, compositeBulkheadCustomizer, bulkheadRegistry); return bulkheadRegistry; }
@Test public void testCreateBulkHeadRegistryWithUnknownConfig() { BulkheadConfigurationProperties bulkheadConfigurationProperties = new BulkheadConfigurationProperties(); io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties instanceProperties = new io.github.resilience4j.common.bulkhead.configuration.CommonBulkheadConfigurationProperties.InstanceProperties(); instanceProperties.setBaseConfig("unknownConfig"); bulkheadConfigurationProperties.getInstances().put("backend", instanceProperties); BulkheadConfiguration bulkheadConfiguration = new BulkheadConfiguration(); DefaultEventConsumerRegistry<BulkheadEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); //When assertThatThrownBy(() -> bulkheadConfiguration .bulkheadRegistry(bulkheadConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), new CompositeCustomizer<>(Collections.emptyList()))) .isInstanceOf(ConfigurationNotFoundException.class) .hasMessage("Configuration with name 'unknownConfig' does not exist"); }
public static long max(long first, long... rest) { long max = first; for (long r : rest) { if (r > max) max = r; } return max; }
@Test public void testMax() { assertEquals(1, Utils.max(1)); assertEquals(3, Utils.max(1, 2, 3)); assertEquals(3, Utils.max(2, 1, 3, 3)); assertEquals(100, Utils.max(0, 2, 2, 100)); assertEquals(-1, Utils.max(-1, -2, -2, -10, -100, -1000)); assertEquals(0, Utils.max(-1, -2, -2, -10, -150, -1800, 0)); }
@Override public abstract String toString();
@Test public void testTransforming_both_toString() { assertThat(HYPHENS_MATCH_COLONS.toString()) .isEqualTo("has a hyphen at the same index as the colon in"); }
private static ResolvedSchema expandLegacyCompositeType(DataType dataType) { // legacy composite type CompositeType<?> compositeType = (CompositeType<?>) ((LegacyTypeInformationType<?>) dataType.getLogicalType()) .getTypeInformation(); String[] fieldNames = compositeType.getFieldNames(); DataType[] fieldTypes = Arrays.stream(fieldNames) .map(compositeType::getTypeAt) .map(TypeConversions::fromLegacyInfoToDataType) .toArray(DataType[]::new); return ResolvedSchema.physical(fieldNames, fieldTypes); }
@Test void testExpandLegacyCompositeType() { DataType dataType = TypeConversions.fromLegacyInfoToDataType( new TupleTypeInfo<>(Types.STRING, Types.INT, Types.SQL_TIMESTAMP)); ResolvedSchema schema = DataTypeUtils.expandCompositeTypeToSchema(dataType); assertThat(schema) .isEqualTo( ResolvedSchema.of( Column.physical("f0", STRING()), Column.physical("f1", INT()), Column.physical("f2", TIMESTAMP(3).bridgedTo(Timestamp.class)))); }
@Override public String getName() { return "dijkstrabi|ch"; }
@Test public void testStallingNodesReducesNumberOfVisitedNodes() { BaseGraph graph = createGHStorage(); graph.edge(8, 9).setDistance(100).set(carSpeedEnc, 60, 0); graph.edge(8, 3).setDistance(2).set(carSpeedEnc, 60, 0); graph.edge(8, 5).setDistance(1).set(carSpeedEnc, 60, 0); graph.edge(8, 6).setDistance(1).set(carSpeedEnc, 60, 0); graph.edge(8, 7).setDistance(1).set(carSpeedEnc, 60, 0); graph.edge(1, 2).setDistance(2).set(carSpeedEnc, 60, 0); graph.edge(1, 8).setDistance(1).set(carSpeedEnc, 60, 0); graph.edge(2, 3).setDistance(3).set(carSpeedEnc, 60, 0); for (int i = 3; i < 7; ++i) graph.edge(i, i + 1).setDistance(1).set(carSpeedEnc, 60, 0); graph.edge(9, 0).setDistance(1).set(carSpeedEnc, 60, 0); graph.edge(3, 9).setDistance(200).set(carSpeedEnc, 60, 0); graph.freeze(); Weighting weighting = new SpeedWeighting(carSpeedEnc); CHConfig chConfig = CHConfig.nodeBased(weighting.getName(), weighting); CHStorage store = CHStorage.fromGraph(graph, chConfig); // explicitly set the node levels equal to the node ids // the graph contraction with this ordering yields no shortcuts new CHStorageBuilder(store).setIdentityLevels(); RoutingCHGraph routingCHGraph = RoutingCHGraphImpl.fromGraph(graph, store, chConfig); RoutingAlgorithm algo = createCHAlgo(routingCHGraph, true); Path p = algo.calcPath(1, 0); // node 3 will be stalled and nodes 4-7 won't be explored --> we visit 7 nodes // note that node 9 will be visited by both forward and backward searches assertEquals(7, algo.getVisitedNodes()); assertEquals(102, p.getDistance(), 1.e-3); assertEquals(IntArrayList.from(1, 8, 9, 0), p.calcNodes(), p.toString()); // without stalling we visit 11 nodes RoutingAlgorithm algoNoSod = createCHAlgo(routingCHGraph, false); Path pNoSod = algoNoSod.calcPath(1, 0); assertEquals(11, algoNoSod.getVisitedNodes()); assertEquals(102, pNoSod.getDistance(), 1.e-3); assertEquals(IntArrayList.from(1, 8, 9, 0), pNoSod.calcNodes(), pNoSod.toString()); }
public SparkRInterpreter(Properties property) { super(property); }
@Test void testSparkRInterpreter() throws InterpreterException, InterruptedException { InterpreterResult result = sparkRInterpreter.interpret("1+1", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertTrue(result.message().get(0).getData().contains("2")); result = sparkRInterpreter.interpret("sparkR.version()", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); result = sparkRInterpreter.interpret("df <- as.DataFrame(faithful)\nhead(df)", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertTrue(result.message().get(0).getData().contains("eruptions waiting"), result.toString()); // spark job url is sent verify(mockRemoteIntpEventClient, atLeastOnce()).onParaInfosReceived(any(Map.class)); // cancel InterpreterContext context = getInterpreterContext(); InterpreterContext finalContext = context; Thread thread = new Thread() { @Override public void run() { try { InterpreterResult result = sparkRInterpreter.interpret("ldf <- dapplyCollect(\n" + " df,\n" + " function(x) {\n" + " Sys.sleep(3)\n" + " x <- cbind(x, \"waiting_secs\" = x$waiting * 60)\n" + " })\n" + "head(ldf, 3)", finalContext); assertTrue(result.message().get(0).getData().contains("cancelled")); } catch (InterpreterException e) { fail("Should not throw InterpreterException"); } } }; thread.setName("Cancel-Thread"); thread.start(); Thread.sleep(1000); sparkRInterpreter.cancel(context); // plotting context = getInterpreterContext(); context.getLocalProperties().put("imageWidth", "100"); result = sparkRInterpreter.interpret("hist(mtcars$mpg)", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(1, result.message().size()); assertEquals(InterpreterResult.Type.HTML, result.message().get(0).getType()); assertTrue(result.message().get(0).getData().contains("<img src=")); assertTrue(result.message().get(0).getData().contains("width=\"100\"")); result = sparkRInterpreter.interpret("library(ggplot2)\n" + "ggplot(diamonds, aes(x=carat, y=price, color=cut)) + geom_point()", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(1, result.message().size()); assertEquals(InterpreterResult.Type.HTML, result.message().get(0).getType()); assertTrue(result.message().get(0).getData().contains("<img src=")); // sparkr backend would be timeout after 10 seconds Thread.sleep(15 * 1000); result = sparkRInterpreter.interpret("1+1", getInterpreterContext()); assertEquals(InterpreterResult.Code.ERROR, result.code()); assertTrue(result.message().get(0).getData().contains("sparkR backend is dead")); }
public static KeyValueIterator<Windowed<GenericKey>, GenericRow> fetch( final ReadOnlySessionStore<GenericKey, GenericRow> store, final GenericKey key ) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlySessionStore<GenericKey, GenericRow>> stores = getStores(store); final Function<ReadOnlySessionStore<GenericKey, GenericRow>, KeyValueIterator<Windowed<GenericKey>, GenericRow>> fetchFunc = sessionStore -> fetchUncached(sessionStore, key); return findFirstNonEmptyIterator(stores, fetchFunc); }
@Test public void shouldThrowException_wrongStateStore() { when(provider.stores(any(), any())).thenReturn(ImmutableList.of(sessionStore)); final Exception e = assertThrows( IllegalStateException.class, () -> SessionStoreCacheBypass.fetch(store, SOME_KEY) ); assertThat(e.getMessage(), containsString("Expecting a MeteredSessionStore")); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testShouldThrownUnknownMemberIdExceptionWhenUnknownStaticMemberJoins() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId1 = Uuid.randomUuid().toString(); String memberId2 = Uuid.randomUuid().toString(); Uuid fooTopicId = Uuid.randomUuid(); String fooTopicName = "foo"; MockPartitionAssignor assignor = new MockPartitionAssignor("range"); // Consumer group with one static member. GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .withMetadataImage(new MetadataImageBuilder() .addTopic(fooTopicId, fooTopicName, 6) .build()) .withConsumerGroup(new ConsumerGroupBuilder(groupId, 10) .withMember(new ConsumerGroupMember.Builder(memberId1) .setState(MemberState.STABLE) .setInstanceId(memberId1) .setMemberEpoch(10) .setPreviousMemberEpoch(9) .setClientId(DEFAULT_CLIENT_ID) .setClientHost(DEFAULT_CLIENT_ADDRESS.toString()) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setServerAssignorName("range") .setAssignedPartitions(mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .build()) .withAssignment(memberId1, mkAssignment( mkTopicAssignment(fooTopicId, 0, 1, 2))) .withAssignmentEpoch(10)) .build(); // Member 2 joins the consumer group with a non-zero epoch assertThrows(UnknownMemberIdException.class, () -> context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId2) .setInstanceId(memberId2) .setMemberEpoch(10) .setRebalanceTimeoutMs(5000) .setServerAssignor("range") .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); }
@Override public void close() { registry.ifPresent(r -> Arrays.asList( LATEST_SNAPSHOT_GENERATED_BYTES, LATEST_SNAPSHOT_GENERATED_AGE_MS ).forEach(r::removeMetric)); }
@Test public void testMetricNames() { try (SnapshotEmitterMetricsTestContext ctx = new SnapshotEmitterMetricsTestContext()) { ControllerMetricsTestUtils.assertMetricsForTypeEqual(ctx.registry, "kafka.server:", new HashSet<>(Arrays.asList( "kafka.server:type=SnapshotEmitter,name=LatestSnapshotGeneratedBytes", "kafka.server:type=SnapshotEmitter,name=LatestSnapshotGeneratedAgeMs" ))); ctx.metrics.close(); ControllerMetricsTestUtils.assertMetricsForTypeEqual(ctx.registry, "KafkaController", Collections.emptySet()); } }
@Override public boolean hasPipeline(CaseInsensitiveString pipelineName) { for (PipelineConfigs part : this.parts) { if (part.hasPipeline(pipelineName)) return true; } return false; }
@Test public void shouldReturnFalseIfPipelineNotExist_When2ConfigParts() { PipelineConfigs part1 = new BasicPipelineConfigs(PipelineConfigMother.pipelineConfig("pipeline1")); PipelineConfigs part2 = new BasicPipelineConfigs(PipelineConfigMother.pipelineConfig("pipeline2")); MergePipelineConfigs merge = new MergePipelineConfigs(part2); assertThat("shouldReturnFalseIfPipelineNotExist", merge.hasPipeline(new CaseInsensitiveString("not-exist")), is(false)); }
public void test(NotificationDto notificationDto, String userName) throws NotFoundException, InternalServerErrorException { NotificationDto dto = prepareUpdate(notificationDto); final EventNotification.Factory eventNotificationFactory = eventNotificationFactories.get(dto.config().type()); if (eventNotificationFactory == null) { throw new NotFoundException("Couldn't find factory for notification type <" + dto.config().type() + ">"); } final EventNotificationContext notificationContext = NotificationTestData.getDummyContext(dto, userName); final EventNotification eventNotification = eventNotificationFactory.create(); try { eventNotification.execute(notificationContext); } catch (EventNotificationException e) { if (e.getCause() != null) { throw new InternalServerErrorException(e.getCause().getMessage(), e); } else { throw new InternalServerErrorException(e.getMessage(), e); } } }
@Test public void testExecution() throws EventNotificationException { notificationResourceHandler.test(getHttpNotification(), "testUser"); ArgumentCaptor<EventNotificationContext> captor = ArgumentCaptor.forClass(EventNotificationContext.class); verify(eventNotification, times(1)).execute(captor.capture()); assertThat(captor.getValue()).satisfies(ctx -> { assertThat(ctx.event().message()).isEqualTo("Notification test message triggered from user <testUser>"); assertThat(ctx.notificationId()).isEqualTo(NotificationTestData.TEST_NOTIFICATION_ID); assertThat(ctx.notificationConfig().type()).isEqualTo(HTTPEventNotificationConfig.TYPE_NAME); assertThat(ctx.eventDefinition().get().title()).isEqualTo("Event Definition Test Title"); }); }
@Override public boolean offer(Runnable runnable) { if (executor == null) { throw new RejectedExecutionException("The task queue does not have executor!"); } int currentPoolThreadSize = executor.getPoolSize(); // have free worker. put task into queue to let the worker deal with task. if (executor.getActiveCount() < currentPoolThreadSize) { return super.offer(runnable); } // return false to let executor create new worker. if (currentPoolThreadSize < executor.getMaximumPoolSize()) { return false; } // currentPoolThreadSize >= max return super.offer(runnable); }
@Test void testOffer1() throws Exception { Assertions.assertThrows(RejectedExecutionException.class, () -> { TaskQueue<Runnable> queue = new TaskQueue<Runnable>(1); queue.offer(mock(Runnable.class)); }); }
public void computeFeNodes() { for (Frontend fe : GlobalStateMgr.getCurrentState().getNodeMgr().getFrontends(null /* all */)) { if (!fe.isAlive()) { continue; } if (frontends == null) { frontends = Lists.newArrayList(); } TFrontend feInfo = new TFrontend(); feInfo.setId(fe.getNodeName()); feInfo.setIp(fe.getHost()); feInfo.setHttp_port(Config.http_port); frontends.add(feInfo); } }
@Test public void testComputeFeNodes(@Mocked GlobalStateMgr globalStateMgr) { List<Frontend> frontends = new ArrayList<>(); frontends.add(new Frontend()); Frontend frontend = new Frontend(); frontend.setAlive(true); frontends.add(frontend); new Expectations() { { GlobalStateMgr.getCurrentState(); minTimes = 0; result = globalStateMgr; globalStateMgr.getNodeMgr().getFrontends(null); minTimes = 0; result = frontends; } }; TupleDescriptor desc = new TupleDescriptor(new TupleId(0)); SystemTable table = new SystemTable(0, "fe_metrics", null, null, null); desc.setTable(table); SchemaScanNode scanNode = new SchemaScanNode(new PlanNodeId(0), desc); scanNode.computeFeNodes(); Assert.assertNotNull(scanNode.getFrontends()); }
@Retryable(DataAccessResourceFailureException.class) public void updateSearchEntry(Extension extension) { if (!isEnabled()) { return; } try { rwLock.writeLock().lock(); var stats = new SearchStats(repositories); var indexQuery = new IndexQueryBuilder() .withObject(relevanceService.toSearchEntry(extension, stats)) .build(); var indexOps = searchOperations.indexOps(ExtensionSearch.class); searchOperations.index(indexQuery, indexOps.getIndexCoordinates()); } finally { rwLock.writeLock().unlock(); } }
@Test public void testRelevanceTimestamp() { var index = mockIndex(true); var ext1 = mockExtension("foo", "n2", "u2",0.0, 0, 0, LocalDateTime.parse("2020-02-01T00:00"), false, false); var ext2 = mockExtension("bar", "n1", "u1",0.0, 0, 0, LocalDateTime.parse("2020-10-01T00:00"), false, false); search.updateSearchEntry(ext1); search.updateSearchEntry(ext2); assertThat(index.entries).hasSize(2); assertThat(index.entries.get(0).relevance).isLessThan(index.entries.get(1).relevance); }
@Override public int removeRangeByScore(double startScore, boolean startScoreInclusive, double endScore, boolean endScoreInclusive) { return get(removeRangeByScoreAsync(startScore, startScoreInclusive, endScore, endScoreInclusive)); }
@Test public void testRemoveRangeByScore() { RScoredSortedSet<String> set = redisson.getScoredSortedSet("simple"); set.add(0.1, "a"); set.add(0.2, "b"); set.add(0.3, "c"); set.add(0.4, "d"); set.add(0.5, "e"); set.add(0.6, "f"); set.add(0.7, "g"); Assertions.assertEquals(2, set.removeRangeByScore(0.1, false, 0.3, true)); assertThat(set).containsExactly("a", "d", "e", "f", "g"); }
@Override public JavaKeyStore load(SecureConfig config) { if (!exists(config)) { throw new SecretStoreException.LoadException( String.format("Can not find Logstash keystore at %s. Please verify this file exists and is a valid Logstash keystore.", config.getPlainText("keystore.file") == null ? "<undefined>" : new String(config.getPlainText("keystore.file")))); } try { init(config); lock.lock(); try (final InputStream is = Files.newInputStream(keyStorePath)) { try { keyStore.load(is, this.keyStorePass); } catch (IOException ioe) { if (ioe.getCause() instanceof UnrecoverableKeyException) { throw new SecretStoreException.AccessException( String.format("Can not access Logstash keystore at %s. Please verify correct file permissions and keystore password.", keyStorePath.toAbsolutePath()), ioe); } else { throw new SecretStoreException.LoadException(String.format("Found a file at %s, but it is not a valid Logstash keystore.", keyStorePath.toAbsolutePath().toString()), ioe); } } byte[] marker = retrieveSecret(LOGSTASH_MARKER); if (marker == null) { throw new SecretStoreException.LoadException(String.format("Found a keystore at %s, but it is not a Logstash keystore.", keyStorePath.toAbsolutePath().toString())); } LOGGER.debug("Using existing keystore at {}", keyStorePath.toAbsolutePath()); return this; } } catch (SecretStoreException sse) { throw sse; } catch (Exception e) { //should never happen throw new SecretStoreException.UnknownException("Error while trying to load the Logstash keystore", e); } finally { releaseLock(lock); config.clearValues(); } }
@Test public void testLoadNotCreated() throws IOException { Path altPath = folder.newFolder().toPath().resolve("alt.logstash.keystore"); SecureConfig secureConfig = new SecureConfig(); secureConfig.add("keystore.file", altPath.toString().toCharArray()); assertThrows(SecretStoreException.LoadException.class, () -> { new JavaKeyStore().load(secureConfig.clone()); }); }
public abstract List<DataType> getChildren();
@Test void testFields() { assertThat(ROW(FIELD("field1", CHAR(2)), FIELD("field2", BOOLEAN()))) .getChildren() .containsExactly(CHAR(2), BOOLEAN()); }
public static String validIdentifier(String value, int maxLen, String name) { Check.notEmpty(value, name); if (value.length() > maxLen) { throw new IllegalArgumentException( MessageFormat.format("[{0}] = [{1}] exceeds max len [{2}]", name, value, maxLen)); } if (!IDENTIFIER_PATTERN.matcher(value).find()) { throw new IllegalArgumentException( MessageFormat.format("[{0}] = [{1}] must be \"{2}\"", name, value, IDENTIFIER_PATTERN_STR)); } return value; }
@Test public void validIdentifierValid() throws Exception { assertEquals(Check.validIdentifier("a", 1, ""), "a"); assertEquals(Check.validIdentifier("a1", 2, ""), "a1"); assertEquals(Check.validIdentifier("a_", 3, ""), "a_"); assertEquals(Check.validIdentifier("_", 1, ""), "_"); }
public HealthCheck.Result runHealthCheck(String name) throws NoSuchElementException { final HealthCheck healthCheck = healthChecks.get(name); if (healthCheck == null) { throw new NoSuchElementException("No health check named " + name + " exists"); } return healthCheck.execute(); }
@Test public void runsHealthChecksByName() { assertThat(registry.runHealthCheck("hc1")).isEqualTo(r1); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void scans_class_path_root_for_features_by_default() { RuntimeOptions options = parser .parse() .addDefaultFeaturePathIfAbsent() .build(); assertThat(options.getFeaturePaths(), is(singletonList(rootPackageUri()))); assertThat(options.getLineFilters(), is(emptyMap())); }
public void appendSnapshot( final long recordingId, final long leadershipTermId, final long termBaseLogPosition, final long logPosition, final long timestamp, final int serviceId) { validateRecordingId(recordingId); if (!restoreInvalidSnapshot( recordingId, leadershipTermId, termBaseLogPosition, logPosition, timestamp, serviceId)) { append( ENTRY_TYPE_SNAPSHOT, recordingId, leadershipTermId, termBaseLogPosition, logPosition, timestamp, serviceId, null); } }
@Test void shouldNotAllowInvalidateOfSnapshotWithoutParentTerm() { try (RecordingLog recordingLog = new RecordingLog(tempDir, true)) { recordingLog.appendSnapshot(-10, 1L, 0, 777L, 0, 0); recordingLog.appendSnapshot(-11, 1L, 0, 777L, 0, SERVICE_ID); final ClusterException ex = assertThrows(ClusterException.class, recordingLog::invalidateLatestSnapshot); assertEquals("ERROR - no matching term for snapshot: leadershipTermId=1", ex.getMessage()); } }
public void createIndex(final String indexName) { try { client.indices().create(c -> c.index(indexName)); } catch (IOException e) { LogUtils.error(LOG, "create index error:", e); } }
@Test public void testCreateIndex() { ElasticSearchLogCollectConfig.INSTANCE.setElasticSearchLogConfig(elasticSearchLogConfig); elasticSearchLogCollectClient.initClient(elasticSearchLogConfig); elasticSearchLogCollectClient.createIndex("test"); }
@Override public Deserializer deserializer(String topic, Target type) { return (headers, data) -> { var schemaId = extractSchemaIdFromMsg(data); SchemaType format = getMessageFormatBySchemaId(schemaId); MessageFormatter formatter = schemaRegistryFormatters.get(format); return new DeserializeResult( formatter.format(topic, data), DeserializeResult.Type.JSON, Map.of( "schemaId", schemaId, "type", format.name() ) ); }; }
@Test void deserializeReturnsJsonAvroMsgJsonRepresentation() throws RestClientException, IOException { AvroSchema schema = new AvroSchema( "{" + " \"type\": \"record\"," + " \"name\": \"TestAvroRecord1\"," + " \"fields\": [" + " {" + " \"name\": \"field1\"," + " \"type\": \"string\"" + " }," + " {" + " \"name\": \"field2\"," + " \"type\": \"int\"" + " }" + " ]" + "}" ); String jsonValue = "{ \"field1\":\"testStr\", \"field2\": 123 }"; String topic = "test"; int schemaId = registryClient.register(topic + "-value", schema); byte[] data = toBytesWithMagicByteAndSchemaId(schemaId, jsonValue, schema); var result = serde.deserializer(topic, Serde.Target.VALUE).deserialize(null, data); assertJsonsEqual(jsonValue, result.getResult()); assertThat(result.getType()).isEqualTo(DeserializeResult.Type.JSON); assertThat(result.getAdditionalProperties()) .contains(Map.entry("type", "AVRO")) .contains(Map.entry("schemaId", schemaId)); }
@Override public Map<String, String> generationCodes(Long tableId) { // 校验是否已经存在 CodegenTableDO table = codegenTableMapper.selectById(tableId); if (table == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } List<CodegenColumnDO> columns = codegenColumnMapper.selectListByTableId(tableId); if (CollUtil.isEmpty(columns)) { throw exception(CODEGEN_COLUMN_NOT_EXISTS); } // 如果是主子表,则加载对应的子表信息 List<CodegenTableDO> subTables = null; List<List<CodegenColumnDO>> subColumnsList = null; if (CodegenTemplateTypeEnum.isMaster(table.getTemplateType())) { // 校验子表存在 subTables = codegenTableMapper.selectListByTemplateTypeAndMasterTableId( CodegenTemplateTypeEnum.SUB.getType(), tableId); if (CollUtil.isEmpty(subTables)) { throw exception(CODEGEN_MASTER_GENERATION_FAIL_NO_SUB_TABLE); } // 校验子表的关联字段存在 subColumnsList = new ArrayList<>(); for (CodegenTableDO subTable : subTables) { List<CodegenColumnDO> subColumns = codegenColumnMapper.selectListByTableId(subTable.getId()); if (CollUtil.findOne(subColumns, column -> column.getId().equals(subTable.getSubJoinColumnId())) == null) { throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, subTable.getId()); } subColumnsList.add(subColumns); } } // 执行生成 return codegenEngine.execute(table, columns, subTables, subColumnsList); }
@Test public void testGenerationCodes_master_success() { // mock 数据(CodegenTableDO) CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()) .setTemplateType(CodegenTemplateTypeEnum.MASTER_NORMAL.getType())); codegenTableMapper.insert(table); // mock 数据(CodegenColumnDO) CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column01); CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId())); codegenColumnMapper.insert(column02); // mock 数据(sub CodegenTableDO) CodegenTableDO subTable = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene()) .setTemplateType(CodegenTemplateTypeEnum.SUB.getType()) .setMasterTableId(table.getId()) .setSubJoinColumnId(1024L)); codegenTableMapper.insert(subTable); // mock 数据(sub CodegenColumnDO) CodegenColumnDO subColumn01 = randomPojo(CodegenColumnDO.class, o -> o.setId(1024L).setTableId(subTable.getId())); codegenColumnMapper.insert(subColumn01); // mock 执行生成 Map<String, String> codes = MapUtil.of(randomString(), randomString()); when(codegenEngine.execute(eq(table), argThat(columns -> { assertEquals(2, columns.size()); assertEquals(column01, columns.get(0)); assertEquals(column02, columns.get(1)); return true; }), argThat(tables -> { assertEquals(1, tables.size()); assertPojoEquals(subTable, tables.get(0)); return true; }), argThat(columns -> { assertEquals(1, columns.size()); assertPojoEquals(subColumn01, columns.size()); return true; }))).thenReturn(codes); // 准备参数 Long tableId = table.getId(); // 调用 Map<String, String> result = codegenService.generationCodes(tableId); // 断言 assertSame(codes, result); }
static boolean apply(@Nullable HttpStatus httpStatus) { if (Objects.isNull(httpStatus)) { return false; } RpcEnhancementReporterProperties reportProperties; try { reportProperties = ApplicationContextAwareUtils.getApplicationContext() .getBean(RpcEnhancementReporterProperties.class); } catch (BeansException e) { LOG.error("get RpcEnhancementReporterProperties bean err", e); reportProperties = new RpcEnhancementReporterProperties(); } // statuses > series List<HttpStatus> status = reportProperties.getStatuses(); if (status.isEmpty()) { List<HttpStatus.Series> series = reportProperties.getSeries(); // Check INTERNAL_SERVER_ERROR (500) status. if (reportProperties.isIgnoreInternalServerError() && Objects.equals(httpStatus, INTERNAL_SERVER_ERROR)) { return false; } if (series.isEmpty()) { return HTTP_STATUSES.contains(httpStatus); } return series.contains(httpStatus.series()); } // Use the user-specified fuse status code. return status.contains(httpStatus); }
@Test public void testApplyWithHttpStatus() { RpcEnhancementReporterProperties properties = new RpcEnhancementReporterProperties(); properties.setStatuses(Arrays.asList(HttpStatus.BAD_GATEWAY, HttpStatus.INTERNAL_SERVER_ERROR)); ApplicationContext applicationContext = mock(ApplicationContext.class); doReturn(properties) .when(applicationContext).getBean(RpcEnhancementReporterProperties.class); mockedApplicationContextAwareUtils.when(ApplicationContextAwareUtils::getApplicationContext) .thenReturn(applicationContext); // Assert assertThat(PolarisEnhancedPluginUtils.apply(null)).isEqualTo(false); assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.OK)).isEqualTo(false); assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.INTERNAL_SERVER_ERROR)).isEqualTo(true); assertThat(PolarisEnhancedPluginUtils.apply(HttpStatus.BAD_GATEWAY)).isEqualTo(true); }
public static Ip6Prefix valueOf(byte[] address, int prefixLength) { return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength); }
@Test public void testValueOfStringIPv6() { Ip6Prefix ipPrefix; ipPrefix = Ip6Prefix.valueOf("1111:2222:3333:4444:5555:6666:7777:8888/120"); assertThat(ipPrefix.toString(), is("1111:2222:3333:4444:5555:6666:7777:8800/120")); ipPrefix = Ip6Prefix.valueOf("1111:2222:3333:4444:5555:6666:7777:8888/128"); assertThat(ipPrefix.toString(), is("1111:2222:3333:4444:5555:6666:7777:8888/128")); ipPrefix = Ip6Prefix.valueOf("::/0"); assertThat(ipPrefix.toString(), is("::/0")); ipPrefix = Ip6Prefix.valueOf("::/128"); assertThat(ipPrefix.toString(), is("::/128")); ipPrefix = Ip6Prefix.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/0"); assertThat(ipPrefix.toString(), is("::/0")); ipPrefix = Ip6Prefix.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/64"); assertThat(ipPrefix.toString(), is("ffff:ffff:ffff:ffff::/64")); ipPrefix = Ip6Prefix.valueOf("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128"); assertThat(ipPrefix.toString(), is("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff/128")); }
public String render(Object o) { StringBuilder result = new StringBuilder(template.length()); render(o, result); return result.toString(); }
@Test public void plainTextRenderedAsIs() { Template template = new Template("Hello World"); assertEquals("Hello World", template.render(foo)); }
public DoubleArrayAsIterable usingExactEquality() { return new DoubleArrayAsIterable(EXACT_EQUALITY_CORRESPONDENCE, iterableSubject()); }
@Test public void usingExactEquality_containsExactly_primitiveDoubleArray_failure() { expectFailureWhenTestingThat(array(1.1, 2.2, 3.3)) .usingExactEquality() .containsExactly(array(2.2, 1.1)); assertFailureKeys( "value of", "unexpected (1)", "---", "expected", "testing whether", "but was"); assertFailureValue("unexpected (1)", "3.3"); }
@Override public Long del(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key: keys) { write(key, LongCodec.INSTANCE, RedisCommands.DEL, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key); } BatchResult<Long> b = (BatchResult<Long>) es.execute(); return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum(); }
@Test public void testDelPipeline() { testInCluster(connection -> { byte[] k = "key".getBytes(); byte[] v = "val".getBytes(); connection.set(k, v); connection.openPipeline(); connection.get(k); connection.del(k); List<Object> results = connection.closePipeline(); byte[] val = (byte[])results.get(0); assertThat(val).isEqualTo(v); Long res = (Long) results.get(1); assertThat(res).isEqualTo(1); }); }
@Override public int getCanaryDistributionPolicy() { switch (_serviceProperties.getDistribution()) { case STABLE: return 0; case CANARY: return 1; default: return -1; } }
@Test(dataProvider = "getCanaryDistributionPoliciesTestData") public void testGetCanaryDistributionPolicy(CanaryDistributionProvider.Distribution distribution, int expectedValue) { ServicePropertiesJmx servicePropertiesJmx = new ServicePropertiesJmx( new LoadBalancerStateItem<>( new ServiceProperties("Foo", "Bar", "/", Collections.singletonList("Random")), 0, 0, distribution ) ); Assert.assertEquals(servicePropertiesJmx.getCanaryDistributionPolicy(), expectedValue); }
public static boolean isBasicInfoChanged(Member actual, Member expected) { if (null == expected) { return null != actual; } if (!expected.getIp().equals(actual.getIp())) { return true; } if (expected.getPort() != actual.getPort()) { return true; } if (!expected.getAddress().equals(actual.getAddress())) { return true; } if (!expected.getState().equals(actual.getState())) { return true; } // if change if (expected.isGrpcReportEnabled() != actual.isGrpcReportEnabled()) { return true; } return isBasicInfoChangedInExtendInfo(expected, actual); }
@Test void testIsBasicInfoChangedNoChangeWithoutExtendInfo() { Member newMember = buildMember(); assertFalse(MemberUtil.isBasicInfoChanged(newMember, originalMember)); }
public static List<Descriptor> getDescriptors(Class<?> clz) { // TODO(chaokunyang) add cache by weak class key, see java.io.ObjectStreamClass.WeakClassKey. SortedMap<Field, Descriptor> allDescriptorsMap = getAllDescriptorsMap(clz); Map<String, List<Field>> duplicateNameFields = getDuplicateNameFields(allDescriptorsMap); checkArgument( duplicateNameFields.size() == 0, "%s has duplicate fields %s", clz, duplicateNameFields); return new ArrayList<>(allDescriptorsMap.values()); }
@Test public void getDescriptorsTest() throws IntrospectionException { Class<?> clz = BeanA.class; TypeRef<?> typeRef = TypeRef.of(clz); // sort to fix field order List<?> descriptors = Arrays.stream(Introspector.getBeanInfo(clz).getPropertyDescriptors()) .filter(d -> !d.getName().equals("class")) .filter(d -> !d.getName().equals("declaringClass")) .filter(d -> d.getReadMethod() != null && d.getWriteMethod() != null) .map( p -> { TypeRef<?> returnType = TypeRef.of(p.getReadMethod().getReturnType()); return Arrays.asList( p.getName(), returnType, p.getReadMethod().getName(), p.getWriteMethod().getName()); }) .collect(Collectors.toList()); Descriptor.getDescriptors(clz); }
@PUT @Path("{id}") @ApiOperation("Update view") @AuditEvent(type = ViewsAuditEventTypes.VIEW_UPDATE) public ViewDTO update(@ApiParam(name = "id") @PathParam("id") @NotEmpty String id, @ApiParam @Valid ViewDTO dto, @Context SearchUser searchUser) { final ViewDTO updatedDTO = dto.toBuilder().id(id).build(); if (!searchUser.canUpdateView(updatedDTO)) { throw new ForbiddenException("Not allowed to edit " + summarize(updatedDTO) + "."); } validateIntegrity(updatedDTO, searchUser, false); var result = dbService.update(updatedDTO); recentActivityService.update(result.id(), result.type().equals(ViewDTO.Type.DASHBOARD) ? GRNTypes.DASHBOARD : GRNTypes.SEARCH, searchUser); return result; }
@Test public void throwsExceptionWhenUpdatingDashboardWithFilterThatUserIsNotAllowedToSee() { final ViewsResource viewsResource = createViewsResource( mockViewService(TEST_DASHBOARD_VIEW), mock(StartPageService.class), mock(RecentActivityService.class), mock(ClusterEventBus.class), new ReferencedSearchFiltersHelper(), searchFilterVisibilityChecker(Collections.singletonList("<<You cannot see this filter>>")), EMPTY_VIEW_RESOLVERS, SEARCH ); Assertions.assertThatThrownBy(() -> viewsResource.update(VIEW_ID, TEST_DASHBOARD_VIEW, SEARCH_USER)) .isInstanceOf(BadRequestException.class) .hasMessageContaining("View cannot be saved, as it contains Search Filters which you are not privileged to view : [<<You cannot see this filter>>]"); }
public long getId() { return id; }
@Test public void testGetId() { // Test the getId method assertEquals(123456789L, event.getId()); }
public FEELFnResult<String> invoke(@ParameterName("from") Object val) { if ( val == null ) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) ); } }
@Test void invokeLocalDate() { final LocalDate localDate = LocalDate.now(); FunctionTestUtil.assertResult(stringFunction.invoke(localDate), localDate.toString()); }
public void cachePluginData(final PluginData pluginData) { Optional.ofNullable(pluginData).ifPresent(data -> PLUGIN_MAP.put(data.getName(), data)); }
@Test public void testCachePluginData() throws NoSuchFieldException, IllegalAccessException { PluginData pluginData = PluginData.builder().name(mockName1).build(); ConcurrentHashMap<String, PluginData> pluginMap = getFieldByName(pluginMapStr); assertNull(pluginMap.get(mockName1)); BaseDataCache.getInstance().cachePluginData(pluginData); assertNotNull(pluginMap.get(mockName1)); assertEquals(pluginData, pluginMap.get(mockName1)); }
@Override public Mono<ClearRegistrationLockResponse> clearRegistrationLock(final ClearRegistrationLockRequest request) { final AuthenticatedDevice authenticatedDevice = AuthenticationUtil.requireAuthenticatedPrimaryDevice(); return Mono.fromFuture(() -> accountsManager.getByAccountIdentifierAsync(authenticatedDevice.accountIdentifier())) .map(maybeAccount -> maybeAccount.orElseThrow(Status.UNAUTHENTICATED::asRuntimeException)) .flatMap(account -> Mono.fromFuture(() -> accountsManager.updateAsync(account, a -> a.setRegistrationLock(null, null)))) .map(ignored -> ClearRegistrationLockResponse.newBuilder().build()); }
@Test void clearRegistrationLock() { final Account account = mock(Account.class); when(accountsManager.getByAccountIdentifierAsync(AUTHENTICATED_ACI)) .thenReturn(CompletableFuture.completedFuture(Optional.of(account))); final ClearRegistrationLockResponse ignored = authenticatedServiceStub().clearRegistrationLock(ClearRegistrationLockRequest.newBuilder().build()); verify(account).setRegistrationLock(null, null); }
@Override public <KR> KGroupedStream<KR, V> groupBy(final KeyValueMapper<? super K, ? super V, KR> keySelector) { return groupBy(keySelector, Grouped.with(null, valueSerde)); }
@Test public void shouldNotAllowNullSelectorOnGroupBy() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.groupBy(null)); assertThat(exception.getMessage(), equalTo("keySelector can't be null")); }
public byte[] getData() { return this.data; }
@Test public void testDeserializePartOfBuffer() throws Exception { Data data = deserializer.deserialize(dataBuffer, 0, DATA_LENGTH); assertEquals(DATA_LENGTH, data.getData().length); }
@Override public synchronized PartitioningSpillResult partitionAndSpill(Page page, IntPredicate spillPartitionMask) { requireNonNull(page, "page is null"); requireNonNull(spillPartitionMask, "spillPartitionMask is null"); checkArgument(page.getChannelCount() == types.size(), "Wrong page channel count, expected %s but got %s", types.size(), page.getChannelCount()); checkState(!readingStarted, "reading already started"); IntArrayList unspilledPositions = partitionPage(page, spillPartitionMask); ListenableFuture<?> future = flushFullBuilders(); return new PartitioningSpillResult(future, page.getPositions(unspilledPositions.elements(), 0, unspilledPositions.size())); }
@Test public void testFileSpiller() throws Exception { try (PartitioningSpiller spiller = factory.create( TYPES, new FourFixedPartitionsPartitionFunction(0), mockSpillContext(), mockMemoryContext(scheduledExecutor))) { RowPagesBuilder builder = RowPagesBuilder.rowPagesBuilder(TYPES); builder.addSequencePage(10, SECOND_PARTITION_START, 5, 10, 15); builder.addSequencePage(10, FIRST_PARTITION_START, -5, 0, 5); List<Page> firstSpill = builder.build(); builder = RowPagesBuilder.rowPagesBuilder(TYPES); builder.addSequencePage(10, THIRD_PARTITION_START, 15, 20, 25); builder.addSequencePage(10, FOURTH_PARTITION_START, 25, 30, 35); List<Page> secondSpill = builder.build(); IntPredicate spillPartitionMask = ImmutableSet.of(1, 2)::contains; PartitioningSpillResult result = spiller.partitionAndSpill(firstSpill.get(0), spillPartitionMask); result.getSpillingFuture().get(); assertEquals(result.getRetained().getPositionCount(), 0); result = spiller.partitionAndSpill(firstSpill.get(1), spillPartitionMask); result.getSpillingFuture().get(); assertEquals(result.getRetained().getPositionCount(), 10); result = spiller.partitionAndSpill(secondSpill.get(0), spillPartitionMask); result.getSpillingFuture().get(); assertEquals(result.getRetained().getPositionCount(), 0); result = spiller.partitionAndSpill(secondSpill.get(1), spillPartitionMask); result.getSpillingFuture().get(); assertEquals(result.getRetained().getPositionCount(), 10); builder = RowPagesBuilder.rowPagesBuilder(TYPES); builder.addSequencePage(10, SECOND_PARTITION_START, 5, 10, 15); List<Page> secondPartition = builder.build(); builder = RowPagesBuilder.rowPagesBuilder(TYPES); builder.addSequencePage(10, THIRD_PARTITION_START, 15, 20, 25); List<Page> thirdPartition = builder.build(); assertSpilledPages( TYPES, spiller, ImmutableList.of(ImmutableList.of(), secondPartition, thirdPartition, ImmutableList.of())); } }
@ApiOperation(value = "Is edge upgrade enabled (isEdgeUpgradeAvailable)", notes = "Returns 'true' if upgrade available for connected edge, 'false' - otherwise.") @PreAuthorize("hasAnyAuthority('TENANT_ADMIN')") @GetMapping(value = "/edge/{edgeId}/upgrade/available") public boolean isEdgeUpgradeAvailable( @Parameter(description = EDGE_ID_PARAM_DESCRIPTION, required = true) @PathVariable("edgeId") String strEdgeId) throws Exception { if (isEdgesEnabled() && edgeUpgradeServiceOpt.isPresent()) { EdgeId edgeId = new EdgeId(toUUID(strEdgeId)); edgeId = checkNotNull(edgeId); Edge edge = checkEdgeId(edgeId, Operation.READ); return edgeUpgradeServiceOpt.get().isUpgradeAvailable(edge.getTenantId(), edge.getId()); } else { throw new ThingsboardException("Edges support disabled", ThingsboardErrorCode.GENERAL); } }
@Test public void testIsEdgeUpgradeAvailable() throws Exception { Edge edge = constructEdge("Edge Upgrade Available", "default"); Edge savedEdge = doPost("/api/edge", edge, Edge.class); // Test 3.5.0 Edge - upgrade not available String body = "{\"edgeVersion\": \"V_3_5_0\"}"; doPostAsync("/api/plugins/telemetry/EDGE/" + savedEdge.getId().getId() + "/attributes/SERVER_SCOPE", body, String.class, status().isOk()); edgeUpgradeInstructionsService.setAppVersion("3.6.0"); Assert.assertFalse(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); edgeUpgradeInstructionsService.setAppVersion("3.6.2"); Assert.assertFalse(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); edgeUpgradeInstructionsService.setAppVersion("3.6.2.7"); Assert.assertFalse(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); // Test 3.6.0 Edge - upgrade available body = "{\"edgeVersion\": \"V_3_6_0\"}"; doPostAsync("/api/plugins/telemetry/EDGE/" + savedEdge.getId().getId() + "/attributes/SERVER_SCOPE", body, String.class, status().isOk()); edgeUpgradeInstructionsService.setAppVersion("3.6.0"); Assert.assertFalse(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); edgeUpgradeInstructionsService.setAppVersion("3.6.1.5"); Assert.assertTrue(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); edgeUpgradeInstructionsService.setAppVersion("3.6.2"); Assert.assertTrue(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); // Test 3.6.1 Edge - upgrade available body = "{\"edgeVersion\": \"V_3_6_1\"}"; doPostAsync("/api/plugins/telemetry/EDGE/" + savedEdge.getId().getId() + "/attributes/SERVER_SCOPE", body, String.class, status().isOk()); edgeUpgradeInstructionsService.setAppVersion("3.6.1"); Assert.assertFalse(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); edgeUpgradeInstructionsService.setAppVersion("3.6.2"); Assert.assertTrue(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); edgeUpgradeInstructionsService.setAppVersion("3.6.2.6"); Assert.assertTrue(edgeUpgradeInstructionsService.isUpgradeAvailable(savedEdge.getTenantId(), savedEdge.getId())); }
@VisibleForTesting public void validateConfigKeyUnique(Long id, String key) { ConfigDO config = configMapper.selectByKey(key); if (config == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的参数配置 if (id == null) { throw exception(CONFIG_KEY_DUPLICATE); } if (!config.getId().equals(id)) { throw exception(CONFIG_KEY_DUPLICATE); } }
@Test public void testValidateConfigKeyUnique_success() { // 调用,成功 configService.validateConfigKeyUnique(randomLongId(), randomString()); }
public static Date parse(String date, ParsePosition pos) throws ParseException { Exception fail = null; try { int offset = pos.getIndex(); // extract year int year = parseInt(date, offset, offset += 4); if (checkOffset(date, offset, '-')) { offset += 1; } // extract month int month = parseInt(date, offset, offset += 2); if (checkOffset(date, offset, '-')) { offset += 1; } // extract day int day = parseInt(date, offset, offset += 2); // default time value int hour = 0; int minutes = 0; int seconds = 0; // always use 0 otherwise returned date will include millis of current time int milliseconds = 0; // if the value has no time component (and no time zone), we are done boolean hasT = checkOffset(date, offset, 'T'); if (!hasT && (date.length() <= offset)) { Calendar calendar = new GregorianCalendar(year, month - 1, day); calendar.setLenient(false); pos.setIndex(offset); return calendar.getTime(); } if (hasT) { // extract hours, minutes, seconds and milliseconds hour = parseInt(date, offset += 1, offset += 2); if (checkOffset(date, offset, ':')) { offset += 1; } minutes = parseInt(date, offset, offset += 2); if (checkOffset(date, offset, ':')) { offset += 1; } // second and milliseconds can be optional if (date.length() > offset) { char c = date.charAt(offset); if (c != 'Z' && c != '+' && c != '-') { seconds = parseInt(date, offset, offset += 2); if (seconds > 59 && seconds < 63) { seconds = 59; // truncate up to 3 leap seconds } // milliseconds can be optional in the format if (checkOffset(date, offset, '.')) { offset += 1; int endOffset = indexOfNonDigit(date, offset + 1); // assume at least one digit int parseEndOffset = Math.min(endOffset, offset + 3); // parse up to 3 digits int fraction = parseInt(date, offset, parseEndOffset); // compensate for "missing" digits switch (parseEndOffset - offset) { // number of digits parsed case 2: milliseconds = fraction * 10; break; case 1: milliseconds = fraction * 100; break; default: milliseconds = fraction; } offset = endOffset; } } } } // extract timezone if (date.length() <= offset) { throw new IllegalArgumentException("No time zone indicator"); } TimeZone timezone = null; char timezoneIndicator = date.charAt(offset); if (timezoneIndicator == 'Z') { timezone = TIMEZONE_UTC; offset += 1; } else if (timezoneIndicator == '+' || timezoneIndicator == '-') { String timezoneOffset = date.substring(offset); // When timezone has no minutes, we should append it, valid timezones are, for example: // +00:00, +0000 and +00 timezoneOffset = timezoneOffset.length() >= 5 ? timezoneOffset : timezoneOffset + "00"; offset += timezoneOffset.length(); // 18-Jun-2015, tatu: Minor simplification, skip offset of "+0000"/"+00:00" if (timezoneOffset.equals("+0000") || timezoneOffset.equals("+00:00")) { timezone = TIMEZONE_UTC; } else { // 18-Jun-2015, tatu: Looks like offsets only work from GMT, not UTC... // not sure why, but that's the way it looks. Further, Javadocs for // `java.util.TimeZone` specifically instruct use of GMT as base for // custom timezones... odd. String timezoneId = "GMT" + timezoneOffset; // String timezoneId = "UTC" + timezoneOffset; timezone = TimeZone.getTimeZone(timezoneId); String act = timezone.getID(); if (!act.equals(timezoneId)) { /* 22-Jan-2015, tatu: Looks like canonical version has colons, but we may be given * one without. If so, don't sweat. * Yes, very inefficient. Hopefully not hit often. * If it becomes a perf problem, add 'loose' comparison instead. */ String cleaned = act.replace(":", ""); if (!cleaned.equals(timezoneId)) { throw new IndexOutOfBoundsException( "Mismatching time zone indicator: " + timezoneId + " given, resolves to " + timezone.getID()); } } } } else { throw new IndexOutOfBoundsException( "Invalid time zone indicator '" + timezoneIndicator + "'"); } Calendar calendar = new GregorianCalendar(timezone); calendar.setLenient(false); calendar.set(Calendar.YEAR, year); calendar.set(Calendar.MONTH, month - 1); calendar.set(Calendar.DAY_OF_MONTH, day); calendar.set(Calendar.HOUR_OF_DAY, hour); calendar.set(Calendar.MINUTE, minutes); calendar.set(Calendar.SECOND, seconds); calendar.set(Calendar.MILLISECOND, milliseconds); pos.setIndex(offset); return calendar.getTime(); // If we get a ParseException it'll already have the right message/offset. // Other exception types can convert here. } catch (IndexOutOfBoundsException | IllegalArgumentException e) { fail = e; } String input = (date == null) ? null : ('"' + date + '"'); String msg = fail.getMessage(); if (msg == null || msg.isEmpty()) { msg = "(" + fail.getClass().getName() + ")"; } ParseException ex = new ParseException("Failed to parse date [" + input + "]: " + msg, pos.getIndex()); ex.initCause(fail); throw ex; }
@Test public void testDateParseInvalidMonth() { String dateStr = "2022-14-30"; assertThrows(ParseException.class, () -> ISO8601Utils.parse(dateStr, new ParsePosition(0))); }
public static ByteBuffer getIncreasingByteBuffer(int len) { return getIncreasingByteBuffer(0, len); }
@Test public void getIncreasingByteBuffer() { class TestCase { ByteBuffer mExpected; int mLength; int mStart; public TestCase(ByteBuffer expected, int length, int start) { mExpected = expected; mLength = length; mStart = start; } } ArrayList<TestCase> testCases = new ArrayList<>(); testCases.add(new TestCase(ByteBuffer.wrap(new byte[] {}), 0, 0)); testCases.add(new TestCase(ByteBuffer.wrap(new byte[] {}), 0, 3)); testCases.add(new TestCase(ByteBuffer.wrap(new byte[] {0}), 1, 0)); testCases.add(new TestCase(ByteBuffer.wrap(new byte[] {0, 1, 2}), 3, 0)); testCases.add(new TestCase(ByteBuffer.wrap(new byte[] {3}), 1, 3)); testCases.add(new TestCase(ByteBuffer.wrap(new byte[] {3, 4, 5}), 3, 3)); for (TestCase testCase : testCases) { ByteBuffer result = BufferUtils.getIncreasingByteBuffer(testCase.mStart, testCase.mLength); assertEquals(testCase.mExpected.capacity(), result.capacity()); for (int k = 0; k < result.capacity(); k++) { assertEquals(testCase.mExpected.get(k), result.get(k)); } } }
@Override public String hash(String token) { return DigestUtils.sha384Hex(token); }
@Test public void hash_token() { String hash = underTest.hash("1234567890123456789012345678901234567890"); assertThat(hash) .hasSize(96) .isEqualTo("b2501fc3833ae6feba7dc8a973a22d709b7c796ee97cbf66db2c22df873a9fa147b1b630878f771457b7769efd9ffa0d") .matches("[0-9a-f]+"); }
@Override public void serialize(ConfigurationWriter writer, ServerConfiguration configuration) { writer.writeStartElement(Element.SERVER); writer.writeDefaultNamespace(NAMESPACE + Version.getMajorMinor()); writeInterfaces(writer, configuration.interfaces); writeSocketBindings(writer, configuration.socketBindings); writeSecurity(writer, configuration.security()); writeDataSources(writer, configuration.dataSources()); writeEndpoints(writer, configuration.endpoints()); writer.writeEndElement(); }
@Test public void testConfigurationSerialization() throws IOException { Properties properties = new Properties(); properties.put("infinispan.server.config.path", config.getParent().getParent().toString()); properties.setProperty(Server.INFINISPAN_SERVER_HOME_PATH, Paths.get(System.getProperty("build.directory")).toString()); properties.setProperty("org.infinispan.test.host.address", "127.0.0.1"); ParserRegistry registry = new ParserRegistry(Thread.currentThread().getContextClassLoader(), false, properties); ConfigurationBuilderHolder holderBefore = registry.parse(config); ByteArrayOutputStream baos = new ByteArrayOutputStream(); Map<String, Configuration> configurations = new HashMap<>(); for (Map.Entry<String, ConfigurationBuilder> configuration : holderBefore.getNamedConfigurationBuilders().entrySet()) { configurations.put(configuration.getKey(), configuration.getValue().build()); } try (ConfigurationWriter writer = ConfigurationWriter.to(baos).withType(type).clearTextSecrets(true).build()) { registry.serialize(writer, holderBefore.getGlobalConfigurationBuilder().build(), configurations); } log.debug(baos); ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); ConfigurationBuilderHolder holderAfter = registry.parse(bais, ConfigurationResourceResolvers.DEFAULT, type); GlobalConfiguration globalConfigurationBefore = holderBefore.getGlobalConfigurationBuilder().build(); GlobalConfiguration globalConfigurationAfter = holderAfter.getGlobalConfigurationBuilder().build(); ServerConfiguration serverBefore = globalConfigurationBefore.module(ServerConfiguration.class); ServerConfiguration serverAfter = globalConfigurationAfter.module(ServerConfiguration.class); compare(serverBefore.interfaces.interfaces(), serverAfter.interfaces.interfaces()); compare(serverBefore.socketBindings, serverAfter.socketBindings); compare(serverBefore.dataSources, serverAfter.dataSources); compare(serverBefore.security.credentialStores(), serverAfter.security.credentialStores()); compare(serverBefore.security.realms().realms(), serverAfter.security.realms().realms()); compare(serverBefore.transport(), serverAfter.transport(), org.infinispan.server.configuration.Attribute.SECURITY_REALM.toString()); compare(serverBefore.endpoints.endpoints(), serverAfter.endpoints.endpoints()); for (int i = 0; i < serverBefore.endpoints.endpoints().size(); i++) { EndpointConfiguration endpointBefore = serverBefore.endpoints.endpoints().get(i); EndpointConfiguration endpointAfter = serverAfter.endpoints.endpoints().get(i); compare(endpointBefore, endpointAfter); compare(endpointBefore.connectors(), endpointAfter.connectors()); compare(endpointBefore.singlePortRouter(), endpointAfter.singlePortRouter()); } }