focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public SendResult send( Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException { msg.setTopic(withNamespace(msg.getTopic())); if (this.getAutoBatch() && !(msg instanceof MessageBatch)) { return sendByAccumulator(msg, null, null); } else { return sendDirect(msg, null, null); } }
@Test public void testSendMessage_ZeroMessage() throws InterruptedException, RemotingException, MQBrokerException { try { producer.send(zeroMsg); failBecauseExceptionWasNotThrown(MQClientException.class); } catch (MQClientException e) { assertThat(e).hasMessageContaining("message body length is zero"); } }
public static FuryBuilder builder() { return new FuryBuilder(); }
@Test public void testIgnoreFields() { Fury fury = Fury.builder().requireClassRegistration(false).build(); IgnoreFields o = serDe(fury, new IgnoreFields(1, 2, 3)); assertEquals(0, o.f1); assertEquals(0, o.f2); assertEquals(3, o.f3); }
public static ConsumingResult createConsumingResult( DataTypeFactory dataTypeFactory, TypeInformation<?> inputTypeInfo, @Nullable Schema declaredSchema) { final DataType inputDataType = TypeInfoDataTypeConverter.toDataType(dataTypeFactory, inputTypeInfo); return createConsumingResult(dataTypeFactory, inputDataType, declaredSchema, true); }
@Test void testInputFromRowWithNonPhysicalDeclaredSchema() { final TypeInformation<?> inputTypeInfo = Types.ROW(Types.INT, Types.LONG); final ConsumingResult result = SchemaTranslator.createConsumingResult( dataTypeFactory(), inputTypeInfo, Schema.newBuilder() .columnByExpression("computed", "f1 + 42") .columnByExpression("computed2", "f1 - 1") .primaryKeyNamed("pk", "f0") .build()); assertThat(result.getPhysicalDataType()) .isEqualTo(ROW(FIELD("f0", INT()), FIELD("f1", BIGINT())).notNull()); assertThat(result.isTopLevelRecord()).isTrue(); assertThat(result.getSchema()) .isEqualTo( Schema.newBuilder() .column("f0", INT().notNull()) // not null due to primary key .column("f1", BIGINT()) .columnByExpression("computed", "f1 + 42") .columnByExpression("computed2", "f1 - 1") .primaryKeyNamed("pk", "f0") .build()); assertThat(result.getProjections()).isNull(); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test(expected = NotfoundException.class) public void testFindNotFound() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final S3AttributesFinderFeature f = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)); f.find(test); }
public static boolean safeContains(final Range<Comparable<?>> range, final Comparable<?> endpoint) { try { return range.contains(endpoint); } catch (final ClassCastException ex) { Comparable<?> rangeUpperEndpoint = range.hasUpperBound() ? range.upperEndpoint() : null; Comparable<?> rangeLowerEndpoint = range.hasLowerBound() ? range.lowerEndpoint() : null; Optional<Class<?>> clazz = getTargetNumericType(Arrays.asList(rangeLowerEndpoint, rangeUpperEndpoint, endpoint)); if (!clazz.isPresent()) { throw ex; } Range<Comparable<?>> newRange = createTargetNumericTypeRange(range, clazz.get()); return newRange.contains(parseNumberByClazz(endpoint.toString(), clazz.get())); } }
@Test void assertSafeContainsForFloat() { Range<Comparable<?>> range = Range.closed(123.11F, 9999.123F); assertTrue(SafeNumberOperationUtils.safeContains(range, 510.12)); }
@Override public AppResponse processAction(String flowType, BaseAction action, AppRequest request, AppSession appSession) throws FlowStateNotDefinedException, FlowNotDefinedException, SharedServiceClientException, NoSuchAlgorithmException, IOException { Flow flow = flowFactoryFactory.getFactory(flowType).getFlow(ConfirmSessionFlow.NAME); AbstractFlowStep flowStep = flow.validateStateTransition(stateValueOf(appSession.getState().toUpperCase()), action); if (flowStep == null) { logger.error("nl.logius.digid.app.domain.shared.flow transition not allowed:{} - {} -> {}", flow.getClass(), appSession.getState(), action); return new NokResponse("nl.logius.digid.app.domain.shared.flow transition not allowed"); } flowStep.setAppSession(appSession); if (flowStep.expectAppAuthenticator()) { flowStep.setAppAuthenticator(getAppAuthenticator(appSession)); } AppResponse appResponse = flow.processState(flowStep, request); if (appResponse instanceof NokResponse || !flowStep.isValid()) { return appResponse; } appSession.setState(getStateName(flow.getNextState(stateValueOf(appSession.getState().toUpperCase()), action))); if (flowStep.getAppAuthenticator() != null) { appAuthenticatorService.save(flowStep.getAppAuthenticator()); if (appSession.getDeviceName() == null) { appSession.setDeviceName(flowStep.getAppAuthenticator().getDeviceName()); appSession.setAppCode(flowStep.getAppAuthenticator().getAppCode()); } } appSessionService.save(appSession); return appResponse; }
@Test public void processActionOkResponseTest() throws FlowNotDefinedException, SharedServiceClientException, IOException, NoSuchAlgorithmException, FlowStateNotDefinedException { //given when(abstractFlowStep.isValid()).thenReturn(true); //when AppResponse response = confirmationFlowService.processAction("confirm", Action.CONFIRM, confirmRequest, appSession); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test(enabled = false) // TODO: need to support widening conversion for numbers public void testInWithNumericTypes() { analyze("SELECT * FROM t1 WHERE 1 IN (1, 2, 3.5)"); }
@Override public ValidationResult toValidationResult(String responseBody) { ValidationResult validationResult = new ValidationResult(); ArrayList<String> exceptions = new ArrayList<>(); try { Map result = (Map) GSON.fromJson(responseBody, Object.class); if (result == null) return validationResult; final Map<String, Object> errors = (Map<String, Object>) result.get("errors"); if (errors != null) { for (Map.Entry<String, Object> entry : errors.entrySet()) { if (!(entry.getValue() instanceof String)) { exceptions.add(String.format("Key: '%s' - The Json for Validation Request must contain a not-null error message of type String", entry.getKey())); } else { validationResult.addError(new ValidationError(entry.getKey(), entry.getValue().toString())); } } } if (!exceptions.isEmpty()) { throw new RuntimeException(StringUtils.join(exceptions, ", ")); } return validationResult; } catch (Exception e) { LOGGER.error("Error occurred while converting the Json to Validation Result. Error: {}. The Json received was '{}'.", e.getMessage(), responseBody); throw new RuntimeException(String.format("Error occurred while converting the Json to Validation Result. Error: %s.", e.getMessage())); } }
@Test public void shouldConvertJsonResponseToValidationResultWhenValidationFails() { String jsonResponse = "{\"errors\":{\"key1\":\"err1\",\"key2\":\"err2\"}}"; TaskConfig configuration = new TaskConfig(); TaskConfigProperty property = new TaskConfigProperty("URL", "http://foo"); property.with(Property.SECURE, false); property.with(Property.REQUIRED, true); configuration.add(property); ValidationResult result = new JsonBasedTaskExtensionHandler_V1().toValidationResult(jsonResponse); assertThat(result.isSuccessful(), is(false)); ValidationError error1 = result.getErrors().get(0); ValidationError error2 = result.getErrors().get(1); assertThat(error1.getKey(), is("key1")); assertThat(error1.getMessage(), is("err1")); assertThat(error2.getKey(), is("key2")); assertThat(error2.getMessage(), is("err2")); }
@Override public void setConfig(RedisClusterNode node, String param, String value) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_SET, param, value); syncFuture(f); }
@Test public void testSetConfig() { RedisClusterNode master = getFirstMaster(); connection.setConfig(master, "timeout", "10"); }
public final void isNotSameInstanceAs(@Nullable Object unexpected) { if (actual == unexpected) { /* * We use actualCustomStringRepresentation() because it might be overridden to be better than * actual.toString()/unexpected.toString(). */ failWithoutActual( fact("expected not to be specific instance", actualCustomStringRepresentation())); } }
@Test public void isNotSameInstanceAsFailureWithSameObject() { Object a = OBJECT_1; Object b = a; expectFailure.whenTesting().that(a).isNotSameInstanceAs(b); assertFailureKeys("expected not to be specific instance"); assertFailureValue("expected not to be specific instance", "Object 1"); }
@Override public Iterator<Text> search(String term) { if (invertedFile.containsKey(term)) { ArrayList<Text> hits = new ArrayList<>(invertedFile.get(term)); return hits.iterator(); } else { return Collections.emptyIterator(); } }
@Test public void testSearchNoResult() { System.out.println("search 'romantic'"); Iterator<Relevance> hits = corpus.search(new BM25(), "find"); assertEquals(Collections.emptyIterator(),hits); }
public MemoryLRUCacheBytesIterator range(final String namespace, final Bytes from, final Bytes to) { return range(namespace, from, to, true); }
@Test public void shouldPeekNextKey() { final ThreadCache cache = setupThreadCache(0, 1, 10000L, false); final Bytes theByte = Bytes.wrap(new byte[]{0}); final ThreadCache.MemoryLRUCacheBytesIterator iterator = cache.range(namespace, theByte, Bytes.wrap(new byte[]{1})); assertEquals(theByte, iterator.peekNextKey()); assertEquals(theByte, iterator.peekNextKey()); }
public void verifyState(HttpRequest request, @Nullable String csrfState, @Nullable String login) { if (!shouldRequestBeChecked(request)) { return; } String failureCause = checkCsrf(csrfState, request.getHeader(CSRF_HEADER)); if (failureCause != null) { throw AuthenticationException.newBuilder() .setSource(Source.local(Method.JWT)) .setLogin(login) .setMessage(failureCause) .build(); } }
@Test public void fail_with_AuthenticationException_when_state_is_null() { mockRequestCsrf(CSRF_STATE); mockPostJavaWsRequest(); assertThatThrownBy(() -> underTest.verifyState(request, null, LOGIN)) .hasMessage("Missing reference CSRF value") .isInstanceOf(AuthenticationException.class) .hasFieldOrPropertyWithValue("login", LOGIN) .hasFieldOrPropertyWithValue("source", Source.local(Method.JWT)); }
@Override public HttpAction restore(final CallContext ctx, final String defaultUrl) { val webContext = ctx.webContext(); val sessionStore = ctx.sessionStore(); val optRequestedUrl = sessionStore.get(webContext, Pac4jConstants.REQUESTED_URL); HttpAction requestedAction = null; if (optRequestedUrl.isPresent()) { sessionStore.set(webContext, Pac4jConstants.REQUESTED_URL, null); val requestedUrl = optRequestedUrl.get(); if (requestedUrl instanceof String) { requestedAction = new FoundAction((String) requestedUrl); } else if (requestedUrl instanceof RedirectionAction) { requestedAction = (RedirectionAction) requestedUrl; } } if (requestedAction == null) { requestedAction = new FoundAction(defaultUrl); } LOGGER.debug("requestedAction: {}", requestedAction.getMessage()); if (requestedAction instanceof FoundAction) { return HttpActionHelper.buildRedirectUrlAction(webContext, ((FoundAction) requestedAction).getLocation()); } else { return HttpActionHelper.buildFormPostContentAction(webContext, ((OkAction) requestedAction).getContent()); } }
@Test public void testRestoreFoundAction() { val context = MockWebContext.create(); val sessionStore = new MockSessionStore(); sessionStore.set(context, Pac4jConstants.REQUESTED_URL, new FoundAction(PAC4J_URL)); val action = handler.restore(new CallContext(context, sessionStore), LOGIN_URL); assertTrue(action instanceof FoundAction); assertEquals(PAC4J_URL, ((FoundAction) action).getLocation()); assertFalse(sessionStore.get(context, Pac4jConstants.REQUESTED_URL).isPresent()); }
@NonNull public static synchronized ScheduledExecutorService get() { if (executorService == null) { // corePoolSize is set to 10, but will only be created if needed. // ScheduledThreadPoolExecutor "acts as a fixed-sized pool using corePoolSize threads" // TODO consider also wrapping in ContextResettingExecutorService executorService = new ImpersonatingScheduledExecutorService( new ErrorLoggingScheduledThreadPoolExecutor(10, new NamingThreadFactory(new ClassLoaderSanityThreadFactory(new DaemonThreadFactory()), "jenkins.util.Timer")), ACL.SYSTEM2); } return executorService; }
@Test @Issue("JENKINS-19622") public void timersArentBlocked() throws InterruptedException { final CountDownLatch startLatch = new CountDownLatch(1); final CountDownLatch stopLatch = new CountDownLatch(1); SafeTimerTask task1 = new SafeTimerTask() { @Override protected void doRun() throws Exception { startLatch.countDown(); stopLatch.await(); } }; SafeTimerTask task2 = new SafeTimerTask() { @Override protected void doRun() { stopLatch.countDown(); } }; Timer.get().schedule(task1, 1, TimeUnit.MILLISECONDS); startLatch.await(); Timer.get().schedule(task2, 2, TimeUnit.MILLISECONDS); if (! stopLatch.await(10000, TimeUnit.MILLISECONDS)) { fail("Failed to run the two tasks simultaneously"); } }
public static Ip4Prefix valueOf(int address, int prefixLength) { return new Ip4Prefix(Ip4Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfStringNegativePrefixLengthIPv4() { Ip4Prefix ipPrefix; ipPrefix = Ip4Prefix.valueOf("1.2.3.4/-1"); }
public static TimePeriod of(long months, long days, long millis) { return new TimePeriod(unsignedInt(months), unsignedInt(days), unsignedInt(millis)); }
@Test void checkAddingToTemporalItems() { TimePeriod monthAndTwoDays = TimePeriod.of(1, 2, 0); TimePeriod threeMillis = TimePeriod.of(0, 0, 3); TimePeriod complexTimePeriod = TimePeriod.of(1, 2, 3); LocalDateTime localDateTime = LocalDateTime.of(2001, 2, 3, 4, 5, 6, 7_000_000); LocalDate localDate = LocalDate.of(2001, 2, 3); LocalTime localTime = LocalTime.of(4, 5, 6, 7_000_000); assertEquals(localDateTime.plusMonths(1).plusDays(2), localDateTime.plus(monthAndTwoDays)); assertEquals(localDateTime.plus(3, MILLIS), localDateTime.plus(threeMillis)); assertEquals(localDateTime.plusMonths(1).plusDays(2).plus(3, MILLIS), localDateTime.plus(complexTimePeriod)); assertEquals(localDate.plusMonths(1).plusDays(2), localDate.plus(monthAndTwoDays)); assertEquals(localTime.plus(3, MILLIS), localTime.plus(threeMillis)); assertEquals(localDateTime.minusMonths(1).minusDays(2), localDateTime.minus(monthAndTwoDays)); assertEquals(localDateTime.minus(3, MILLIS), localDateTime.minus(threeMillis)); assertEquals(localDateTime.minusMonths(1).minusDays(2).minus(3, MILLIS), localDateTime.minus(complexTimePeriod)); assertEquals(localDate.minusMonths(1).minusDays(2), localDate.minus(monthAndTwoDays)); assertEquals(localTime.minus(3, MILLIS), localTime.minus(threeMillis)); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_SINT24() { final MutableData data = new MutableData(new byte[3]); data.setValue(0xfefdfd, Data.FORMAT_UINT24_LE, 0); assertArrayEquals(new byte[] { (byte) 0xFD, (byte) 0xFD, (byte) 0xFE } , data.getValue()); }
static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) { NewTopic topicDescription = TopicAdmin.defineTopic(topicName). compacted(). partitions(partitions). replicationFactor(replicationFactor). build(); CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false); try { admin.createTopics(singleton(topicDescription), args).values().get(topicName).get(); log.info("Created topic '{}'", topicName); } catch (InterruptedException e) { Thread.interrupted(); throw new ConnectException("Interrupted while attempting to create/find topic '" + topicName + "'", e); } catch (ExecutionException e) { Throwable cause = e.getCause(); if (cause instanceof TopicExistsException) { log.debug("Unable to create topic '{}' since it already exists.", topicName); return; } if (cause instanceof UnsupportedVersionException) { log.debug("Unable to create topic '{}' since the brokers do not support the CreateTopics API." + " Falling back to assume topic exists or will be auto-created by the broker.", topicName); return; } if (cause instanceof TopicAuthorizationException) { log.debug("Not authorized to create topic(s) '{}' upon the brokers." + " Falling back to assume topic(s) exist or will be auto-created by the broker.", topicName); return; } if (cause instanceof ClusterAuthorizationException) { log.debug("Not authorized to create topic '{}'." + " Falling back to assume topic exists or will be auto-created by the broker.", topicName); return; } if (cause instanceof InvalidConfigurationException) { throw new ConnectException("Unable to create topic '" + topicName + "': " + cause.getMessage(), cause); } if (cause instanceof TimeoutException) { // Timed out waiting for the operation to complete throw new ConnectException("Timed out while checking for or creating topic '" + topicName + "'." + " This could indicate a connectivity issue, unavailable topic partitions, or if" + " this is your first use of the topic it may have taken too long to create.", cause); } throw new ConnectException("Error while attempting to create/find topic '" + topicName + "'", e); } }
@Test public void testCreateCompactedTopic() throws Exception { Map<String, KafkaFuture<Void>> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenReturn(null); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); MirrorUtils.createCompactedTopic(TOPIC, (short) 1, (short) 1, admin); verify(future).get(); verify(ctr).values(); verify(admin).createTopics(any(), any()); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { JsonObject json = JsonParser.parseString(msg.getData()).getAsJsonObject(); String tmp; if (msg.getOriginator().getEntityType() != EntityType.DEVICE) { ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!")); } else if (!json.has("method")) { ctx.tellFailure(msg, new RuntimeException("Method is not present in the message!")); } else if (!json.has("params")) { ctx.tellFailure(msg, new RuntimeException("Params are not present in the message!")); } else { int requestId = json.has("requestId") ? json.get("requestId").getAsInt() : random.nextInt(); boolean restApiCall = msg.isTypeOf(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE); tmp = msg.getMetaData().getValue("oneway"); boolean oneway = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue(DataConstants.PERSISTENT); boolean persisted = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue("requestUUID"); UUID requestUUID = !StringUtils.isEmpty(tmp) ? UUID.fromString(tmp) : Uuids.timeBased(); tmp = msg.getMetaData().getValue("originServiceId"); String originServiceId = !StringUtils.isEmpty(tmp) ? tmp : null; tmp = msg.getMetaData().getValue(DataConstants.EXPIRATION_TIME); long expirationTime = !StringUtils.isEmpty(tmp) ? Long.parseLong(tmp) : (System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(config.getTimeoutInSeconds())); tmp = msg.getMetaData().getValue(DataConstants.RETRIES); Integer retries = !StringUtils.isEmpty(tmp) ? Integer.parseInt(tmp) : null; String params = parseJsonData(json.get("params")); String additionalInfo = parseJsonData(json.get(DataConstants.ADDITIONAL_INFO)); RuleEngineDeviceRpcRequest request = RuleEngineDeviceRpcRequest.builder() .oneway(oneway) .method(json.get("method").getAsString()) .body(params) .tenantId(ctx.getTenantId()) .deviceId(new DeviceId(msg.getOriginator().getId())) .requestId(requestId) .requestUUID(requestUUID) .originServiceId(originServiceId) .expirationTime(expirationTime) .retries(retries) .restApiCall(restApiCall) .persisted(persisted) .additionalInfo(additionalInfo) .build(); ctx.getRpcService().sendRpcRequestToDevice(request, ruleEngineDeviceRpcResponse -> { if (ruleEngineDeviceRpcResponse.getError().isEmpty()) { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), ruleEngineDeviceRpcResponse.getResponse().orElse(TbMsg.EMPTY_JSON_OBJECT)); ctx.enqueueForTellNext(next, TbNodeConnectionType.SUCCESS); } else { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), wrap("error", ruleEngineDeviceRpcResponse.getError().get().name())); ctx.enqueueForTellFailure(next, ruleEngineDeviceRpcResponse.getError().get().name()); } }); ctx.ack(msg); } }
@Test public void givenRpcResponseWithError_whenOnMsg_thenTellFailure() { TbMsg outMsg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, TbMsgMetaData.EMPTY, TbMsg.EMPTY_JSON_OBJECT); given(ctxMock.getRpcService()).willReturn(rpcServiceMock); given(ctxMock.getTenantId()).willReturn(TENANT_ID); // TODO: replace deprecated method newMsg() given(ctxMock.newMsg(any(), any(String.class), any(), any(), any(), any())).willReturn(outMsg); willAnswer(invocation -> { Consumer<RuleEngineDeviceRpcResponse> consumer = invocation.getArgument(1); RuleEngineDeviceRpcResponse rpcResponseMock = mock(RuleEngineDeviceRpcResponse.class); given(rpcResponseMock.getError()).willReturn(Optional.of(RpcError.NO_ACTIVE_CONNECTION)); consumer.accept(rpcResponseMock); return null; }).given(rpcServiceMock).sendRpcRequestToDevice(any(RuleEngineDeviceRpcRequest.class), any(Consumer.class)); TbMsg msg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, TbMsgMetaData.EMPTY, MSG_DATA); node.onMsg(ctxMock, msg); then(ctxMock).should().enqueueForTellFailure(outMsg, RpcError.NO_ACTIVE_CONNECTION.name()); then(ctxMock).should().ack(msg); }
@Override public boolean remove(Object objectToRemove) { return remove(objectToRemove, objectToRemove.hashCode()); }
@Test public void testRemove() { final OAHashSet<Integer> set = new OAHashSet<>(8); populateSet(set, 10); final boolean removed = set.remove(5); assertTrue(removed); assertFalse("Element 5 should not be contained", set.contains(5)); for (int i = 0; i < 5; i++) { final boolean contained = set.contains(i); assertTrue("Element " + i + " should be contained", contained); } for (int i = 6; i < 10; i++) { final boolean contained = set.contains(i); assertTrue("Element " + i + " should be contained", contained); } }
ControllerResult<List<CreatePartitionsTopicResult>> createPartitions( ControllerRequestContext context, List<CreatePartitionsTopic> topics ) { List<ApiMessageAndVersion> records = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); List<CreatePartitionsTopicResult> results = BoundedList.newArrayBacked(MAX_RECORDS_PER_USER_OP); for (CreatePartitionsTopic topic : topics) { ApiError apiError = ApiError.NONE; try { createPartitions(context, topic, records); } catch (ApiException e) { apiError = ApiError.fromThrowable(e); } catch (Exception e) { log.error("Unexpected createPartitions error for {}", topic, e); apiError = ApiError.fromThrowable(e); } results.add(new CreatePartitionsTopicResult(). setName(topic.name()). setErrorCode(apiError.error().code()). setErrorMessage(apiError.message())); } return ControllerResult.atomicOf(records, results); }
@Test public void testCreatePartitions() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); ReplicationControlManager replicationControl = ctx.replicationControl; CreateTopicsRequestData request = new CreateTopicsRequestData(); request.topics().add(new CreatableTopic().setName("foo"). setNumPartitions(3).setReplicationFactor((short) 2)); request.topics().add(new CreatableTopic().setName("bar"). setNumPartitions(4).setReplicationFactor((short) 2)); request.topics().add(new CreatableTopic().setName("quux"). setNumPartitions(2).setReplicationFactor((short) 2)); request.topics().add(new CreatableTopic().setName("foo2"). setNumPartitions(2).setReplicationFactor((short) 2)); ctx.registerBrokersWithDirs( 0, Collections.emptyList(), 1, asList(Uuid.fromString("QMzamNQVQ7GnJK9DwQHG7Q"), Uuid.fromString("loDxEBLETdedNnQGOKKENw")), 3, Collections.singletonList(Uuid.fromString("dxCDSgNjQvS4WuyqEKoCwA"))); ctx.unfenceBrokers(0, 1, 3); ControllerRequestContext requestContext = anonymousContextFor(ApiKeys.CREATE_TOPICS); ControllerResult<CreateTopicsResponseData> createTopicResult = replicationControl. createTopics(requestContext, request, new HashSet<>(Arrays.asList("foo", "bar", "quux", "foo2"))); ctx.replay(createTopicResult.records()); List<CreatePartitionsTopic> topics = new ArrayList<>(); topics.add(new CreatePartitionsTopic(). setName("foo").setCount(5).setAssignments(null)); topics.add(new CreatePartitionsTopic(). setName("bar").setCount(3).setAssignments(null)); topics.add(new CreatePartitionsTopic(). setName("baz").setCount(3).setAssignments(null)); topics.add(new CreatePartitionsTopic(). setName("quux").setCount(2).setAssignments(null)); ControllerResult<List<CreatePartitionsTopicResult>> createPartitionsResult = replicationControl.createPartitions(requestContext, topics); assertEquals(asList(new CreatePartitionsTopicResult(). setName("foo"). setErrorCode(NONE.code()). setErrorMessage(null), new CreatePartitionsTopicResult(). setName("bar"). setErrorCode(INVALID_PARTITIONS.code()). setErrorMessage("The topic bar currently has 4 partition(s); 3 would not be an increase."), new CreatePartitionsTopicResult(). setName("baz"). setErrorCode(UNKNOWN_TOPIC_OR_PARTITION.code()). setErrorMessage(null), new CreatePartitionsTopicResult(). setName("quux"). setErrorCode(INVALID_PARTITIONS.code()). setErrorMessage("Topic already has 2 partition(s).")), createPartitionsResult.response()); ctx.replay(createPartitionsResult.records()); List<CreatePartitionsTopic> topics2 = new ArrayList<>(); topics2.add(new CreatePartitionsTopic(). setName("foo").setCount(6).setAssignments(singletonList( new CreatePartitionsAssignment().setBrokerIds(asList(1, 3))))); topics2.add(new CreatePartitionsTopic(). setName("bar").setCount(5).setAssignments(singletonList( new CreatePartitionsAssignment().setBrokerIds(singletonList(1))))); topics2.add(new CreatePartitionsTopic(). setName("quux").setCount(4).setAssignments(singletonList( new CreatePartitionsAssignment().setBrokerIds(asList(1, 0))))); topics2.add(new CreatePartitionsTopic(). setName("foo2").setCount(3).setAssignments(singletonList( new CreatePartitionsAssignment().setBrokerIds(asList(2, 0))))); ControllerResult<List<CreatePartitionsTopicResult>> createPartitionsResult2 = replicationControl.createPartitions(requestContext, topics2); assertEquals(asList(new CreatePartitionsTopicResult(). setName("foo"). setErrorCode(NONE.code()). setErrorMessage(null), new CreatePartitionsTopicResult(). setName("bar"). setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). setErrorMessage("The manual partition assignment includes a partition " + "with 1 replica(s), but this is not consistent with previous " + "partitions, which have 2 replica(s)."), new CreatePartitionsTopicResult(). setName("quux"). setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). setErrorMessage("Attempted to add 2 additional partition(s), but only 1 assignment(s) were specified."), new CreatePartitionsTopicResult(). setName("foo2"). setErrorCode(INVALID_REPLICA_ASSIGNMENT.code()). setErrorMessage("The manual partition assignment includes broker 2, but " + "no such broker is registered.")), createPartitionsResult2.response()); ctx.replay(createPartitionsResult2.records()); assertArrayEquals( new Uuid[] {DirectoryId.UNASSIGNED, Uuid.fromString("dxCDSgNjQvS4WuyqEKoCwA")}, replicationControl.getPartition(replicationControl.getTopicId("foo"), 5).directories); }
public static SchemaPairCompatibility checkReaderWriterCompatibility(final Schema reader, final Schema writer) { final SchemaCompatibilityResult compatibility = new ReaderWriterCompatibilityChecker().getCompatibility(reader, writer); final String message; switch (compatibility.getCompatibility()) { case INCOMPATIBLE: { message = String.format( "Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n", writer.toString(true), reader.toString(true)); break; } case COMPATIBLE: { message = READER_WRITER_COMPATIBLE_MESSAGE; break; } default: throw new AvroRuntimeException("Unknown compatibility: " + compatibility); } return new SchemaPairCompatibility(compatibility, reader, writer, message); }
@Test void validatePrimitiveWriterSchema() { final Schema validReader = Schema.create(Schema.Type.STRING); final SchemaCompatibility.SchemaPairCompatibility validResult = new SchemaCompatibility.SchemaPairCompatibility( SchemaCompatibility.SchemaCompatibilityResult.compatible(), validReader, STRING_SCHEMA, SchemaCompatibility.READER_WRITER_COMPATIBLE_MESSAGE); final SchemaCompatibility.SchemaPairCompatibility invalidResult = new SchemaCompatibility.SchemaPairCompatibility( SchemaCompatibility.SchemaCompatibilityResult.incompatible(SchemaIncompatibilityType.TYPE_MISMATCH, INT_SCHEMA, STRING_SCHEMA, "reader type: INT not compatible with writer type: STRING", Collections.singletonList("")), INT_SCHEMA, STRING_SCHEMA, String.format( "Data encoded using writer schema:%n%s%n" + "will or may fail to decode using reader schema:%n%s%n", STRING_SCHEMA.toString(true), INT_SCHEMA.toString(true))); assertEquals(validResult, checkReaderWriterCompatibility(validReader, STRING_SCHEMA)); assertEquals(invalidResult, checkReaderWriterCompatibility(INT_SCHEMA, STRING_SCHEMA)); }
public String toJson() { JsonObject details = new JsonObject(); details.addProperty(FIELD_LEVEL, level.toString()); JsonArray conditionResults = new JsonArray(); for (EvaluatedCondition condition : this.conditions) { conditionResults.add(toJson(condition)); } details.add("conditions", conditionResults); details.addProperty(FIELD_IGNORED_CONDITIONS, ignoredConditions); return details.toString(); }
@Test public void verify_json_for_condition_on_leak_metric() { String value = "actualValue"; Condition condition = new Condition(new MetricImpl("1", "new_key1", "name1", Metric.MetricType.STRING), Condition.Operator.GREATER_THAN.getDbValue(), "errorTh"); ImmutableList<EvaluatedCondition> evaluatedConditions = ImmutableList.of( new EvaluatedCondition(condition, Measure.Level.OK, value), new EvaluatedCondition(condition, Measure.Level.ERROR, value)); String actualJson = new QualityGateDetailsData(Measure.Level.OK, evaluatedConditions, false).toJson(); JsonAssert.assertJson(actualJson).isSimilarTo("{" + "\"level\":\"OK\"," + "\"conditions\":[" + " {" + " \"metric\":\"new_key1\"," + " \"op\":\"GT\"," + " \"error\":\"errorTh\"," + " \"actual\":\"actualValue\"," + " \"period\":1," + " \"level\":\"OK\"" + " }," + " {" + " \"metric\":\"new_key1\"," + " \"op\":\"GT\"," + " \"error\":\"errorTh\"," + " \"actual\":\"actualValue\"," + " \"period\":1," + " \"level\":\"ERROR\"" + " }" + "]" + "}"); }
public static String generateResourceId( String baseString, Pattern illegalChars, String replaceChar, int targetLength, DateTimeFormatter timeFormat) { // first, make sure the baseString, typically the test ID, is not empty checkArgument(baseString.length() != 0, "baseString cannot be empty."); // next, replace all illegal characters from given string with given replacement character String illegalCharsRemoved = illegalChars.matcher(baseString.toLowerCase()).replaceAll(replaceChar); // finally, append the date/time and return the substring that does not exceed the length limit LocalDateTime localDateTime = LocalDateTime.now(ZoneId.of(TIME_ZONE)); String timeAddOn = localDateTime.format(timeFormat); return illegalCharsRemoved.subSequence( 0, min(targetLength - timeAddOn.length() - 1, illegalCharsRemoved.length())) + replaceChar + localDateTime.format(timeFormat); }
@Test public void testGenerateResourceIdShouldThrowErrorWithEmptyInput() { String testBaseString = ""; assertThrows( IllegalArgumentException.class, () -> generateResourceId( testBaseString, ILLEGAL_INSTANCE_CHARS, REPLACE_INSTANCE_CHAR, MAX_INSTANCE_ID_LENGTH, TIME_FORMAT)); }
public static ReadonlyConfig replaceTablePlaceholder( ReadonlyConfig config, CatalogTable table) { return replaceTablePlaceholder(config, table, Collections.emptyList()); }
@Test public void testSinkOptions() { ReadonlyConfig config = createConfig(); CatalogTable table = createTestTable(); ReadonlyConfig newConfig = TablePlaceholder.replaceTablePlaceholder(config, table); Assertions.assertEquals("xyz_my-database_test", newConfig.get(DATABASE)); Assertions.assertEquals("xyz_my-schema_test", newConfig.get(SCHEMA)); Assertions.assertEquals("xyz_my-table_test", newConfig.get(TABLE)); Assertions.assertEquals("f1,f2", newConfig.get(PRIMARY_KEY)); Assertions.assertEquals("f3,f4", newConfig.get(UNIQUE_KEY)); Assertions.assertEquals("f1,f2,f3,f4,f5", newConfig.get(FIELD_NAMES)); Assertions.assertEquals(Arrays.asList("f1", "f2"), newConfig.get(PRIMARY_KEY_ARRAY)); Assertions.assertEquals(Arrays.asList("f3", "f4"), newConfig.get(UNIQUE_KEY_ARRAY)); Assertions.assertEquals( Arrays.asList("f1", "f2", "f3", "f4", "f5"), newConfig.get(FIELD_NAMES_ARRAY)); }
public static String getInterfaceName(Invoker invoker) { return getInterfaceName(invoker, false); }
@Test public void testGetInterfaceNameWithPrefix() throws NoSuchMethodException { URL url = URL.valueOf("dubbo://127.0.0.1:2181") .addParameter(CommonConstants.VERSION_KEY, "1.0.0") .addParameter(CommonConstants.GROUP_KEY, "grp1") .addParameter(CommonConstants.INTERFACE_KEY, DemoService.class.getName()); Invoker invoker = mock(Invoker.class); when(invoker.getUrl()).thenReturn(url); when(invoker.getInterface()).thenReturn(DemoService.class); //test with default prefix String resourceName = DubboUtils.getInterfaceName(invoker, DubboAdapterGlobalConfig.getDubboProviderResNamePrefixKey()); assertEquals("dubbo:provider:com.alibaba.csp.sentinel.adapter.dubbo3.provider.DemoService", resourceName); resourceName = DubboUtils.getInterfaceName(invoker, DubboAdapterGlobalConfig.getDubboConsumerResNamePrefixKey()); assertEquals("dubbo:consumer:com.alibaba.csp.sentinel.adapter.dubbo3.provider.DemoService", resourceName); //test with custom prefix SentinelConfig.setConfig(DubboAdapterGlobalConfig.DUBBO_PROVIDER_RES_NAME_PREFIX_KEY, "my:dubbo:provider:"); SentinelConfig.setConfig(DubboAdapterGlobalConfig.DUBBO_CONSUMER_RES_NAME_PREFIX_KEY, "my:dubbo:consumer:"); resourceName = DubboUtils.getInterfaceName(invoker, DubboAdapterGlobalConfig.getDubboProviderResNamePrefixKey()); assertEquals("my:dubbo:provider:com.alibaba.csp.sentinel.adapter.dubbo3.provider.DemoService", resourceName); resourceName = DubboUtils.getInterfaceName(invoker, DubboAdapterGlobalConfig.getDubboConsumerResNamePrefixKey()); assertEquals("my:dubbo:consumer:com.alibaba.csp.sentinel.adapter.dubbo3.provider.DemoService", resourceName); }
public <T> HttpRestResult<T> exchange(String url, HttpClientConfig config, Header header, Query query, Object body, String httpMethod, Type responseType) throws Exception { RequestHttpEntity requestHttpEntity = new RequestHttpEntity(config, header, query, body); return execute(url, httpMethod, requestHttpEntity, responseType); }
@Test void testExchange() throws Exception { when(requestClient.execute(any(), eq("PUT"), any())).thenReturn(mockResponse); when(mockResponse.getStatusCode()).thenReturn(200); when(mockResponse.getBody()).thenReturn(new ByteArrayInputStream("test".getBytes())); HttpClientConfig config = HttpClientConfig.builder().setConTimeOutMillis(1000).build(); HttpRestResult<String> result = restTemplate.exchange("http://127.0.0.1:8848/nacos/test", config, Header.EMPTY, Query.EMPTY, new Object(), "PUT", String.class); assertTrue(result.ok()); assertEquals(Header.EMPTY, result.getHeader()); assertEquals("test", result.getData()); }
static boolean fieldMatch(Object repoObj, Object filterObj) { return filterObj == null || repoObj.equals(filterObj); }
@Test public void testFieldMatchWithNonStringEqualObjectsShouldReturnTrue() { assertTrue(Utilities.fieldMatch(42, 42)); }
public final void containsEntry(@Nullable Object key, @Nullable Object value) { Map.Entry<@Nullable Object, @Nullable Object> entry = immutableEntry(key, value); checkNotNull(actual); if (!actual.entrySet().contains(entry)) { List<@Nullable Object> keyList = singletonList(key); List<@Nullable Object> valueList = singletonList(value); if (actual.containsKey(key)) { Object actualValue = actual.get(key); /* * In the case of a null expected or actual value, clarify that the key *is* present and * *is* expected to be present. That is, get() isn't returning null to indicate that the key * is missing, and the user isn't making an assertion that the key is missing. */ StandardSubjectBuilder check = check("get(%s)", key); if (value == null || actualValue == null) { check = check.withMessage("key is present but with a different value"); } // See the comment on IterableSubject's use of failEqualityCheckForEqualsWithoutDescription. check.that(actualValue).failEqualityCheckForEqualsWithoutDescription(value); } else if (hasMatchingToStringPair(actual.keySet(), keyList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain keys", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.keySet(), /* itemsToCheck= */ keyList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (actual.containsValue(value)) { Set<@Nullable Object> keys = new LinkedHashSet<>(); for (Map.Entry<?, ?> actualEntry : actual.entrySet()) { if (Objects.equal(actualEntry.getValue(), value)) { keys.add(actualEntry.getKey()); } } failWithoutActual( fact("expected to contain entry", entry), simpleFact("but did not"), fact("though it did contain keys with that value", keys), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else if (hasMatchingToStringPair(actual.values(), valueList)) { failWithoutActual( fact("expected to contain entry", entry), fact("an instance of", objectToTypeName(entry)), simpleFact("but did not"), fact( "though it did contain values", countDuplicatesAndAddTypeInfo( retainMatchingToString(actual.values(), /* itemsToCheck= */ valueList))), fact("full contents", actualCustomStringRepresentationForPackageMembersToCall())); } else { failWithActual("expected to contain entry", entry); } } }
@Test public void containsNullEntryKey() { Map<String, String> actual = Maps.newHashMap(); actual.put(null, null); expectFailureWhenTestingThat(actual).containsEntry(null, "kluever"); assertFailureValue("value of", "map.get(null)"); assertFailureValue("expected", "kluever"); assertFailureValue("but was", "null"); assertFailureValue("map was", "{null=null}"); assertThat(expectFailure.getFailure()) .hasMessageThat() .contains(KEY_IS_PRESENT_WITH_DIFFERENT_VALUE); }
static String prettifyException(Exception e) { if (e.getLocalizedMessage() != null) { return e.getClass().getSimpleName() + ": " + e.getLocalizedMessage().split("\n")[0]; } else if (e.getStackTrace() != null && e.getStackTrace().length > 0) { return e.getClass().getSimpleName() + " at " + e.getStackTrace()[0]; } else { return e.getClass().getSimpleName(); } }
@Test public void prettifyException() { String pretty = AdminHelper.prettifyException( new IllegalArgumentException("Something is wrong", new IllegalArgumentException("Something is illegal"))); Assert.assertEquals( "IllegalArgumentException: Something is wrong", pretty); }
public void itemClicked(MenuItem item) { Dispatcher.getInstance().menuItemSelected(item); }
@Test void testItemClicked() { final var store = mock(Store.class); Dispatcher.getInstance().registerStore(store); final var view = new MenuView(); view.itemClicked(MenuItem.PRODUCTS); // We should receive a menu click action and a content changed action verify(store, times(2)).onAction(any(Action.class)); }
public static Transaction read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException { return Transaction.read(payload, ProtocolVersion.CURRENT.intValue()); }
@Test public void parseTransactionWithHugeDeclaredWitnessPushCountSize() { Transaction tx = new HugeDeclaredSizeTransaction(false, false, true); byte[] serializedTx = tx.serialize(); try { Transaction.read(ByteBuffer.wrap(serializedTx)); fail("We expect BufferUnderflowException with the fixed code and OutOfMemoryError with the buggy code, so this is weird"); } catch (BufferUnderflowException e) { //Expected, do nothing } }
public ReferenceBuilder<T> addMethod(MethodConfig method) { if (this.methods == null) { this.methods = new ArrayList<>(); } this.methods.add(method); return getThis(); }
@Test void addMethod() { MethodConfig method = new MethodConfig(); ReferenceBuilder builder = new ReferenceBuilder(); builder.addMethod(method); Assertions.assertTrue(builder.build().getMethods().contains(method)); Assertions.assertEquals(1, builder.build().getMethods().size()); }
static ControllerResult<Void> recordsForEmptyLog( Consumer<String> activationMessageConsumer, long transactionStartOffset, boolean zkMigrationEnabled, BootstrapMetadata bootstrapMetadata, MetadataVersion metadataVersion ) { StringBuilder logMessageBuilder = new StringBuilder("Performing controller activation. "); List<ApiMessageAndVersion> records = new ArrayList<>(); if (transactionStartOffset != -1L) { // In-flight bootstrap transaction if (!metadataVersion.isMetadataTransactionSupported()) { throw new RuntimeException("Detected partial bootstrap records transaction at " + transactionStartOffset + ", but the metadata.version " + metadataVersion + " does not support transactions. Cannot continue."); } else { logMessageBuilder .append("Aborting partial bootstrap records transaction at offset ") .append(transactionStartOffset) .append(". Re-appending ") .append(bootstrapMetadata.records().size()) .append(" bootstrap record(s) in new metadata transaction at metadata.version ") .append(metadataVersion) .append(" from bootstrap source '") .append(bootstrapMetadata.source()) .append("'. "); records.add(new ApiMessageAndVersion( new AbortTransactionRecord().setReason("Controller failover"), (short) 0)); records.add(new ApiMessageAndVersion( new BeginTransactionRecord().setName("Bootstrap records"), (short) 0)); } } else { // No in-flight transaction logMessageBuilder .append("The metadata log appears to be empty. ") .append("Appending ") .append(bootstrapMetadata.records().size()) .append(" bootstrap record(s) "); if (metadataVersion.isMetadataTransactionSupported()) { records.add(new ApiMessageAndVersion( new BeginTransactionRecord().setName("Bootstrap records"), (short) 0)); logMessageBuilder.append("in metadata transaction "); } logMessageBuilder .append("at metadata.version ") .append(metadataVersion) .append(" from bootstrap source '") .append(bootstrapMetadata.source()) .append("'. "); } // If no records have been replayed, we need to write out the bootstrap records. // This will include the new metadata.version, as well as things like SCRAM // initialization, etc. records.addAll(bootstrapMetadata.records()); if (metadataVersion.isMigrationSupported()) { if (zkMigrationEnabled) { logMessageBuilder.append("Putting the controller into pre-migration mode. No metadata updates " + "will be allowed until the ZK metadata has been migrated. "); records.add(ZkMigrationState.PRE_MIGRATION.toRecord()); } else { logMessageBuilder.append("Setting the ZK migration state to NONE since this is a de-novo " + "KRaft cluster. "); records.add(ZkMigrationState.NONE.toRecord()); } } else { if (zkMigrationEnabled) { throw new RuntimeException("The bootstrap metadata.version " + bootstrapMetadata.metadataVersion() + " does not support ZK migrations. Cannot continue with ZK migrations enabled."); } } activationMessageConsumer.accept(logMessageBuilder.toString().trim()); if (metadataVersion.isMetadataTransactionSupported()) { records.add(new ApiMessageAndVersion(new EndTransactionRecord(), (short) 0)); return ControllerResult.of(records, null); } else { return ControllerResult.atomicOf(records, null); } }
@Test public void testActivationMessageForEmptyLog() { ControllerResult<Void> result; result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) at metadata.version 3.0-IV1 from bootstrap source 'test'.", logMsg), -1L, false, BootstrapMetadata.fromVersion(MetadataVersion.MINIMUM_BOOTSTRAP_VERSION, "test"), MetadataVersion.MINIMUM_KRAFT_VERSION ); assertTrue(result.isAtomic()); assertEquals(1, result.records().size()); result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) at metadata.version 3.4-IV0 from bootstrap " + "source 'test'. Setting the ZK migration state to NONE since this is a de-novo KRaft cluster.", logMsg), -1L, false, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_4_IV0, "test"), MetadataVersion.IBP_3_4_IV0 ); assertTrue(result.isAtomic()); assertEquals(2, result.records().size()); result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) at metadata.version 3.4-IV0 from bootstrap " + "source 'test'. Putting the controller into pre-migration mode. No metadata updates will be allowed " + "until the ZK metadata has been migrated.", logMsg), -1L, true, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_4_IV0, "test"), MetadataVersion.IBP_3_4_IV0 ); assertTrue(result.isAtomic()); assertEquals(2, result.records().size()); assertEquals( "The bootstrap metadata.version 3.3-IV2 does not support ZK migrations. Cannot continue with ZK migrations enabled.", assertThrows(RuntimeException.class, () -> ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> fail(), -1L, true, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_3_IV2, "test"), MetadataVersion.IBP_3_3_IV2 )).getMessage() ); result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) in metadata transaction at metadata.version 3.6-IV1 from bootstrap " + "source 'test'. Setting the ZK migration state to NONE since this is a de-novo KRaft cluster.", logMsg), -1L, false, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), MetadataVersion.IBP_3_6_IV1 ); assertFalse(result.isAtomic()); assertEquals(4, result.records().size()); result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. The metadata log appears to be empty. " + "Appending 1 bootstrap record(s) in metadata transaction at metadata.version 3.6-IV1 from bootstrap " + "source 'test'. Putting the controller into pre-migration mode. No metadata updates will be allowed " + "until the ZK metadata has been migrated.", logMsg), -1L, true, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), MetadataVersion.IBP_3_6_IV1 ); assertFalse(result.isAtomic()); assertEquals(4, result.records().size()); result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. Aborting partial bootstrap records " + "transaction at offset 0. Re-appending 1 bootstrap record(s) in new metadata transaction at " + "metadata.version 3.6-IV1 from bootstrap source 'test'. Setting the ZK migration state to NONE " + "since this is a de-novo KRaft cluster.", logMsg), 0L, false, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), MetadataVersion.IBP_3_6_IV1 ); assertFalse(result.isAtomic()); assertEquals(5, result.records().size()); result = ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("Performing controller activation. Aborting partial bootstrap records " + "transaction at offset 0. Re-appending 1 bootstrap record(s) in new metadata transaction at " + "metadata.version 3.6-IV1 from bootstrap source 'test'. Putting the controller into pre-migration " + "mode. No metadata updates will be allowed until the ZK metadata has been migrated.", logMsg), 0L, true, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV1, "test"), MetadataVersion.IBP_3_6_IV1 ); assertFalse(result.isAtomic()); assertEquals(5, result.records().size()); assertEquals( "Detected partial bootstrap records transaction at 0, but the metadata.version 3.6-IV0 does not " + "support transactions. Cannot continue.", assertThrows(RuntimeException.class, () -> ActivationRecordsGenerator.recordsForEmptyLog( logMsg -> assertEquals("", logMsg), 0L, true, BootstrapMetadata.fromVersion(MetadataVersion.IBP_3_6_IV0, "test"), MetadataVersion.IBP_3_6_IV0 )).getMessage() ); }
@Override public void writeCharacters(char[] text, int start, int len) throws XMLStreamException { nonXmlCharFilterer.filter(text, start, len); writer.writeCharacters(text, start, len); }
@Test public void testWriteCharacters1Arg() throws XMLStreamException { filteringXmlStreamWriter.writeCharacters("value"); verify(xmlStreamWriterMock).writeCharacters("filteredValue"); }
@Override public Map<String, List<V>> pollLastFromAny(Duration duration, int count, String... queueNames) throws InterruptedException { return blockingQueue.pollLastFromAny(duration, count, queueNames); }
@Test public void testPollLastFromAny() throws InterruptedException { final RBlockingDeque<Integer> queue1 = redisson.getBlockingDeque("deque:pollany"); Executors.newSingleThreadScheduledExecutor().schedule(() -> { RBlockingDeque<Integer> queue2 = redisson.getBlockingDeque("deque:pollany1"); RBlockingDeque<Integer> queue3 = redisson.getBlockingDeque("deque:pollany2"); try { queue3.put(2); queue1.put(1); queue2.put(3); } catch (InterruptedException e) { Assertions.fail(); } }, 3, TimeUnit.SECONDS); long s = System.currentTimeMillis(); int l = queue1.pollLastFromAny(4, TimeUnit.SECONDS, "deque:pollany1", "deque:pollany2"); assertThat(l).isEqualTo(2); assertThat(System.currentTimeMillis() - s).isGreaterThan(2000); }
@Override public void deleteOnExit() { // there exists no pre-CacheShutdown event, so unable to remove the entry throw new UnsupportedOperationException("Not implemented"); }
@Test(expectedExceptions = UnsupportedOperationException.class) public void testDeleteOnExit() { fs.getFile("nonsuch.txt").deleteOnExit(); }
@Override public void setConfigAttributes(Object attributes) { clear(); if (attributes == null) { return; } List<Map> attrList = (List<Map>) attributes; for (Map attrMap : attrList) { String type = (String) attrMap.get("artifactTypeValue"); if (TestArtifactConfig.TEST_PLAN_DISPLAY_NAME.equals(type) || BuildArtifactConfig.ARTIFACT_PLAN_DISPLAY_NAME.equals(type)) { String source = (String) attrMap.get(BuiltinArtifactConfig.SRC); String destination = (String) attrMap.get(BuiltinArtifactConfig.DEST); if (source.trim().isEmpty() && destination.trim().isEmpty()) { continue; } if (TestArtifactConfig.TEST_PLAN_DISPLAY_NAME.equals(type)) { this.add(new TestArtifactConfig(source, destination)); } else { this.add(new BuildArtifactConfig(source, destination)); } } else { String artifactId = (String) attrMap.get(PluggableArtifactConfig.ID); String storeId = (String) attrMap.get(PluggableArtifactConfig.STORE_ID); String pluginId = (String) attrMap.get("pluginId"); Map<String, Object> userSpecifiedConfiguration = (Map<String, Object>) attrMap.get("configuration"); PluggableArtifactConfig pluggableArtifactConfig = new PluggableArtifactConfig(artifactId, storeId); this.add(pluggableArtifactConfig); if (userSpecifiedConfiguration == null) { return; } if (StringUtils.isBlank(pluginId)) { Configuration configuration = pluggableArtifactConfig.getConfiguration(); for (String key : userSpecifiedConfiguration.keySet()) { Map<String, String> configurationMetadata = (Map<String, String>) userSpecifiedConfiguration.get(key); if (configurationMetadata != null) { boolean isSecure = Boolean.parseBoolean(configurationMetadata.get("isSecure")); if (configuration.getProperty(key) == null) { configuration.addNewConfiguration(key, isSecure); } if (isSecure) { configuration.getProperty(key).setEncryptedValue(new EncryptedConfigurationValue(configurationMetadata.get("value"))); } else { configuration.getProperty(key).setConfigurationValue(new ConfigurationValue(configurationMetadata.get("value"))); } } } } else { for (Map.Entry<String, Object> configuration : userSpecifiedConfiguration.entrySet()) { pluggableArtifactConfig.getConfiguration().addNewConfigurationWithValue(configuration.getKey(), String.valueOf(configuration.getValue()), false); } } } } }
@Test public void shouldClearAllArtifactsWhenTheMapIsNull() { ArtifactTypeConfigs artifactTypeConfigs = new ArtifactTypeConfigs(); artifactTypeConfigs.add(new BuildArtifactConfig("src", "dest")); artifactTypeConfigs.setConfigAttributes(null); assertThat(artifactTypeConfigs.size(), is(0)); }
public static boolean shouldLoadInIsolation(String name) { return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches()); }
@Test public void testKafkaDependencyClasses() { assertFalse(PluginUtils.shouldLoadInIsolation("org.apache.kafka.common.")); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.common.config.AbstractConfig") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.common.config.ConfigDef$Type") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.common.serialization.Deserializer") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.clients.producer.ProducerConfig") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.clients.consumer.ConsumerConfig") ); assertFalse(PluginUtils.shouldLoadInIsolation( "org.apache.kafka.clients.admin.KafkaAdminClient") ); }
public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); }
@Test public void testValidateValueMismatchDouble() { assertThrows(DataException.class, () -> ConnectSchema.validateValue(Schema.FLOAT64_SCHEMA, 1.f)); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseAlipayTest() { final String uaString = "Mozilla/5.0 (Linux; U; Android 7.0; zh-CN; FRD-AL00 Build/HUAWEIFRD-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/40.0.2214.89 UCBrowser/11.3.8.909 UWS/2.10.2.5 Mobile Safari/537.36 UCBS/2.10.2.5 Nebula AlipayDefined(nt:WIFI,ws:360|0|3.0) AliApp(AP/10.0.18.062203) AlipayClient/10.0.18.062203 Language/zh-Hans useStatusBar/true"; final UserAgent ua = UserAgentUtil.parse(uaString); assertEquals("Alipay", ua.getBrowser().toString()); assertEquals("10.0.18.062203", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("537.36", ua.getEngineVersion()); assertEquals("Android", ua.getOs().toString()); assertEquals("7.0", ua.getOsVersion()); assertEquals("Android", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
@Override public void createPort(K8sPort port) { checkNotNull(port, ERR_NULL_PORT); checkArgument(!Strings.isNullOrEmpty(port.portId()), ERR_NULL_PORT_ID); checkArgument(!Strings.isNullOrEmpty(port.networkId()), ERR_NULL_PORT_NET_ID); k8sNetworkStore.createPort(port); log.info(String.format(MSG_PORT, port.portId(), MSG_CREATED)); }
@Test(expected = NullPointerException.class) public void testCreateNullPort() { target.createPort(null); }
public ClientAuth getClientAuth() { String clientAuth = getString(SSL_CLIENT_AUTHENTICATION_CONFIG); if (originals().containsKey(SSL_CLIENT_AUTH_CONFIG)) { if (originals().containsKey(SSL_CLIENT_AUTHENTICATION_CONFIG)) { log.warn( "The {} configuration is deprecated. Since a value has been supplied for the {} " + "configuration, that will be used instead", SSL_CLIENT_AUTH_CONFIG, SSL_CLIENT_AUTHENTICATION_CONFIG ); } else { log.warn( "The configuration {} is deprecated and should be replaced with {}", SSL_CLIENT_AUTH_CONFIG, SSL_CLIENT_AUTHENTICATION_CONFIG ); clientAuth = getBoolean(SSL_CLIENT_AUTH_CONFIG) ? SSL_CLIENT_AUTHENTICATION_REQUIRED : SSL_CLIENT_AUTHENTICATION_NONE; } } return getClientAuth(clientAuth); }
@Test public void shouldResolveClientAuthenticationNone() { // Given: final KsqlRestConfig config = new KsqlRestConfig(ImmutableMap.<String, Object>builder() .put(KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_CONFIG, KsqlRestConfig.SSL_CLIENT_AUTHENTICATION_NONE) .build() ); // When: final ClientAuth clientAuth = config.getClientAuth(); // Then: assertThat(clientAuth, is(ClientAuth.NONE)); }
@Override public int hashCode() { return Objects.hashCode(wrapperMap); }
@Test public void testHashCode() { CharSequenceMap<String> map1 = CharSequenceMap.create(); map1.put(new StringBuilder("key"), "value"); CharSequenceMap<String> map2 = CharSequenceMap.create(); map2.put("key", "value"); assertThat(map1.hashCode()).isEqualTo(map2.hashCode()); }
@Override public List<MenuDO> getMenuList() { return menuMapper.selectList(); }
@Test public void testGetMenuList() { // mock 数据 MenuDO menuDO = randomPojo(MenuDO.class, o -> o.setName("芋艿").setStatus(CommonStatusEnum.ENABLE.getStatus())); menuMapper.insert(menuDO); // 测试 status 不匹配 menuMapper.insert(cloneIgnoreId(menuDO, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 name 不匹配 menuMapper.insert(cloneIgnoreId(menuDO, o -> o.setName("艿"))); // 准备参数 MenuListReqVO reqVO = new MenuListReqVO().setName("芋").setStatus(CommonStatusEnum.ENABLE.getStatus()); // 调用 List<MenuDO> result = menuService.getMenuList(reqVO); // 断言 assertEquals(1, result.size()); assertPojoEquals(menuDO, result.get(0)); }
public B addRegistry(RegistryConfig registry) { if (this.registries == null) { this.registries = new ArrayList<>(); } this.registries.add(registry); return getThis(); }
@Test void addRegistry() { RegistryConfig registryConfig = new RegistryConfig(); InterfaceBuilder builder = new InterfaceBuilder(); builder.addRegistry(registryConfig); Assertions.assertEquals(1, builder.build().getRegistries().size()); Assertions.assertSame(registryConfig, builder.build().getRegistries().get(0)); Assertions.assertSame(registryConfig, builder.build().getRegistry()); }
@Override public GetNodesToAttributesResponse getNodesToAttributes( GetNodesToAttributesRequest request) throws YarnException, IOException { NodeAttributesManager attributesManager = rmContext.getNodeAttributesManager(); GetNodesToAttributesResponse response = GetNodesToAttributesResponse .newInstance( attributesManager.getNodesToAttributes(request.getHostNames())); return response; }
@Test(timeout = 120000) public void testGetNodesToAttributes() throws IOException, YarnException { Configuration newConf = NodeAttributeTestUtils.getRandomDirConf(null); MockRM rm = new MockRM(newConf) { protected ClientRMService createClientRMService() { return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager()); } }; resourceManager = rm; rm.start(); NodeAttributesManager mgr = rm.getRMContext().getNodeAttributesManager(); String node1 = "host1"; String node2 = "host2"; NodeAttribute gpu = NodeAttribute .newInstance(NodeAttribute.PREFIX_CENTRALIZED, "GPU", NodeAttributeType.STRING, "nvida"); NodeAttribute os = NodeAttribute .newInstance(NodeAttribute.PREFIX_CENTRALIZED, "OS", NodeAttributeType.STRING, "windows64"); NodeAttribute docker = NodeAttribute .newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "DOCKER", NodeAttributeType.STRING, "docker0"); NodeAttribute dist = NodeAttribute .newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "VERSION", NodeAttributeType.STRING, "3_0_2"); Map<String, Set<NodeAttribute>> nodes = new HashMap<>(); nodes.put(node1, ImmutableSet.of(gpu, os, dist)); nodes.put(node2, ImmutableSet.of(docker, dist)); mgr.addNodeAttributes(nodes); // Create a client. conf = new Configuration(); rpc = YarnRPC.create(conf); InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress(); LOG.info("Connecting to ResourceManager at " + rmAddress); client = (ApplicationClientProtocol) rpc.getProxy( ApplicationClientProtocol.class, rmAddress, conf); // Specify null for hostnames. GetNodesToAttributesRequest request1 = GetNodesToAttributesRequest.newInstance(null); GetNodesToAttributesResponse response1 = client.getNodesToAttributes(request1); Map<String, Set<NodeAttribute>> hostToAttrs = response1.getNodeToAttributes(); Assert.assertEquals(2, hostToAttrs.size()); Assert.assertTrue(hostToAttrs.get(node2).contains(dist)); Assert.assertTrue(hostToAttrs.get(node2).contains(docker)); Assert.assertTrue(hostToAttrs.get(node1).contains(dist)); // Specify particular node GetNodesToAttributesRequest request2 = GetNodesToAttributesRequest.newInstance(ImmutableSet.of(node1)); GetNodesToAttributesResponse response2 = client.getNodesToAttributes(request2); hostToAttrs = response2.getNodeToAttributes(); Assert.assertEquals(1, response2.getNodeToAttributes().size()); Assert.assertTrue(hostToAttrs.get(node1).contains(dist)); // Test queury with empty set GetNodesToAttributesRequest request3 = GetNodesToAttributesRequest.newInstance(Collections.emptySet()); GetNodesToAttributesResponse response3 = client.getNodesToAttributes(request3); hostToAttrs = response3.getNodeToAttributes(); Assert.assertEquals(2, hostToAttrs.size()); Assert.assertTrue(hostToAttrs.get(node2).contains(dist)); Assert.assertTrue(hostToAttrs.get(node2).contains(docker)); Assert.assertTrue(hostToAttrs.get(node1).contains(dist)); // test invalid hostname GetNodesToAttributesRequest request4 = GetNodesToAttributesRequest.newInstance(ImmutableSet.of("invalid")); GetNodesToAttributesResponse response4 = client.getNodesToAttributes(request4); hostToAttrs = response4.getNodeToAttributes(); Assert.assertEquals(0, hostToAttrs.size()); }
@Override public String getSinkTableName(Table table) { String tableName = table.getName(); Map<String, String> sink = config.getSink(); // Add table name mapping logic String mappingRoute = sink.get(FlinkCDCConfig.TABLE_MAPPING_ROUTES); if (mappingRoute != null) { Map<String, String> mappingRules = parseMappingRoute(mappingRoute); if (mappingRules.containsKey(tableName)) { tableName = mappingRules.get(tableName); } } tableName = sink.getOrDefault(FlinkCDCConfig.TABLE_PREFIX, "") + tableName + sink.getOrDefault(FlinkCDCConfig.TABLE_SUFFIX, ""); // table.lower and table.upper can not be true at the same time if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER)) && Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) { throw new IllegalArgumentException("table.lower and table.upper can not be true at the same time"); } if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_UPPER))) { tableName = tableName.toUpperCase(); } if (Boolean.parseBoolean(sink.get(FlinkCDCConfig.TABLE_LOWER))) { tableName = tableName.toLowerCase(); } // Implement regular expressions to replace table names through // sink.table.replace.pattern and table.replace.with String replacePattern = sink.get(FlinkCDCConfig.TABLE_REPLACE_PATTERN); String replaceWith = sink.get(FlinkCDCConfig.TABLE_REPLACE_WITH); if (replacePattern != null && replaceWith != null) { Pattern pattern = Pattern.compile(replacePattern); Matcher matcher = pattern.matcher(tableName); tableName = matcher.replaceAll(replaceWith); } // add schema if (Boolean.parseBoolean(sink.get("table.prefix.schema"))) { tableName = table.getSchema() + "_" + tableName; } return tableName; }
@Test public void testGetSinkTableNameWithSchemaPrefixEnabled() { Map<String, String> sinkConfig = new HashMap<>(); sinkConfig.put("table.prefix.schema", "true"); sinkConfig.put("table.prefix", ""); sinkConfig.put("table.suffix", ""); sinkConfig.put("table.lower", "false"); sinkConfig.put("table.upper", "false"); when(config.getSink()).thenReturn(sinkConfig); Table table = new Table("testTable", "testSchema", null); String expectedTableName = "testSchema_testTable"; Assert.assertEquals(expectedTableName, sinkBuilder.getSinkTableName(table)); }
@DeleteMapping("by-product-id/{productId:\\d+}") public Mono<ResponseEntity<Void>> removeProductFromFavourites(Mono<JwtAuthenticationToken> authenticationTokenMono, @PathVariable("productId") int productId) { return authenticationTokenMono .flatMap(token -> this.favouriteProductsService .removeProductFromFavourites(productId, token.getToken().getSubject())) .then(Mono.just(ResponseEntity.noContent().build())); }
@Test void removeProductFromFavourites_ReturnsNoContent() { // given doReturn(Mono.empty()).when(this.favouriteProductsService) .removeProductFromFavourites(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"); // when StepVerifier.create(this.controller.removeProductFromFavourites(Mono.just(new JwtAuthenticationToken(Jwt.withTokenValue("e30.e30") .headers(headers -> headers.put("foo", "bar")) .claim("sub", "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c").build())), 1)) // then .expectNext(ResponseEntity.noContent().build()) .verifyComplete(); verify(this.favouriteProductsService) .removeProductFromFavourites(1, "5f1d5cf8-cbd6-11ee-9579-cf24d050b47c"); }
@Override public TableCellByTypeTransformer tableCellByTypeTransformer() { return transformer; }
@Test void can_transform_null_while_using_replacement_patterns() throws Throwable { Method method = JavaDefaultDataTableCellTransformerDefinitionTest.class.getMethod("transform_string_to_type", String.class, Type.class); JavaDefaultDataTableCellTransformerDefinition definition = new JavaDefaultDataTableCellTransformerDefinition( method, lookup, new String[] { "[empty]" }); Object transformed = definition.tableCellByTypeTransformer().transform(null, String.class); assertThat(transformed, is("transform_string_to_type=null")); }
@Override public RFuture<Boolean> addAsync(V e) { String name = getRawName(e); return commandExecutor.writeAsync(name, codec, RedisCommands.SADD_SINGLE, name, encode(e)); }
@Test public void testAddAsync() throws InterruptedException, ExecutionException { RSet<Integer> set = redisson.getSet("simple"); RFuture<Boolean> future = set.addAsync(2); Assertions.assertTrue(future.get()); Assertions.assertTrue(set.contains(2)); }
@Override public void close() throws IOException { if (closed) { return; } closed = true; blockData = null; reader.close(); reader = null; remoteObject = null; fpos.invalidate(); try { client.close(); } finally { streamStatistics.close(); } client = null; }
@Test public void testClose() throws Exception { S3ARemoteInputStream inputStream = S3APrefetchFakes.createS3InMemoryInputStream(futurePool, "bucket", "key", 9); testCloseHelper(inputStream, 9); inputStream = S3APrefetchFakes.createS3CachingInputStream(futurePool, "bucket", "key", 9, 5, 3); testCloseHelper(inputStream, 5); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } final Object single = inner.deserialize(topic, bytes); return Collections.singletonList(single); }
@Test public void shouldDeserializeNewStyle() { // When: final List<?> result = deserializer.deserialize(TOPIC, HEADERS, SERIALIZED); // Then: verify(inner).deserialize(TOPIC, HEADERS, SERIALIZED); assertThat(result, contains(DESERIALIZED)); }
public static DataCleaner<Track<NopHit>> coreSmoothing() { return CompositeCleaner.of( //removes error-prone synthetic "assumed" points from Nop data new CoastedPointRemover<>(), //remove both points if any two sequential points are within 500 Milliseconds new HighFrequencyPointRemover<>(Duration.ofMillis(500)), //remove tracks with small just a handful of points, new SmallTrackRemover(9), /* * ensure any two sequential points have at least 4 seconds between them (by removing * only the trailing points) */ new TimeDownSampler<>(Duration.ofMillis(4_000)), //removes near-stationary Tracks produces by "radar mirages" off of skyscrapers and such new RemoveLowVariabilityTracks<>(), //removes near-duplicate points when a track is stationary. new DistanceDownSampler<>(), //forces 000 altitudes to null new ZeroAltitudeToNull<>(), //correct missing altitude values new FillMissingAltitudes<>(), //correct the altitude values for outlying Points new VerticalOutlierDetector<>(), //remove points with inconsistent LatLong values new LateralOutlierDetector<>(), //remove radar noise using polynomial fitting new TrackFilter<>() ); }
@Disabled @Test public void baseLineSmoothing() { /* * This "test" is actually a demo that prints some "smoothed track data" for making graphics */ Track<NopHit> baseLineTrack = createTrackFromResource( TrackSmoothing.class, "curvyTrack.txt" ); Optional<Track<NopHit>> result = coreSmoothing().clean(baseLineTrack); assertDoesNotThrow(() -> result.get()); }
public static TransactionInput coinbaseInput(Transaction parentTransaction, byte[] scriptBytes) { Objects.requireNonNull(parentTransaction); checkArgument(scriptBytes.length >= 2 && scriptBytes.length <= 100, () -> "script must be between 2 and 100 bytes: " + scriptBytes.length); return new TransactionInput(parentTransaction, scriptBytes, TransactionOutPoint.UNCONNECTED); }
@Test public void coinbaseInput() { TransactionInput coinbaseInput = TransactionInput.coinbaseInput(new Transaction(), new byte[2]); assertTrue(coinbaseInput.isCoinBase()); }
public FEELFnResult<Object> invoke(@ParameterName("list") List list) { if ( list == null || list.isEmpty() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty")); } else { try { return FEELFnResult.ofResult(Collections.max(list, new InterceptNotComparableComparator())); } catch (ClassCastException e) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable")); } } }
@Test void invokeListOfChronoPeriods() { final ChronoPeriod p1Period = Period.parse("P1Y"); final ChronoPeriod p1Comparable = ComparablePeriod.parse("P1Y"); final ChronoPeriod p2Period = Period.parse("P1M"); final ChronoPeriod p2Comparable = ComparablePeriod.parse("P1M"); Predicate<ChronoPeriod> assertion = i -> i.get(ChronoUnit.YEARS) == 1 && i.get(ChronoUnit.MONTHS) == 0; FunctionTestUtil.assertPredicateOnResult(maxFunction.invoke(Collections.singletonList(p1Period)), ChronoPeriod.class, assertion); FunctionTestUtil.assertPredicateOnResult(maxFunction.invoke(Collections.singletonList(p1Comparable)), ChronoPeriod.class, assertion); FunctionTestUtil.assertPredicateOnResult(maxFunction.invoke(Arrays.asList(p1Period, p2Period)), ChronoPeriod.class, assertion); FunctionTestUtil.assertPredicateOnResult(maxFunction.invoke(Arrays.asList(p1Comparable, p2Period)), ChronoPeriod.class, assertion); FunctionTestUtil.assertPredicateOnResult(maxFunction.invoke(Arrays.asList(p1Period, p2Comparable)), ChronoPeriod.class, assertion); FunctionTestUtil.assertPredicateOnResult(maxFunction.invoke(Arrays.asList(p1Comparable, p2Comparable)), ChronoPeriod.class, assertion); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldPadEmptyInputBytes() { final ByteBuffer result = udf.rpad(EMPTY_BYTES, 4, BYTES_45); assertThat(result, is(ByteBuffer.wrap(new byte[]{4,5,4,5}))); }
@Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback); return bean; }
@Test void beansWithMethodsAnnotatedWithRecurringAnnotationHasDisabledCronExpressionButNotSpecifiedIdShouldBeOmitted() { new ApplicationContextRunner() .withBean(RecurringJobPostProcessor.class) .withBean(JobScheduler.class, () -> jobScheduler) .withPropertyValues("my-job.id=") .withPropertyValues("my-job.cron=-") .withPropertyValues("my-job.zone-id=Asia/Taipei") .run(context -> { context.getBean(RecurringJobPostProcessor.class) .postProcessAfterInitialization(new MyServiceWithRecurringAnnotationContainingPropertyPlaceholder(), "not important"); verifyNoInteractions(jobScheduler); }); }
private boolean healthCheck() { HealthCheckRequest healthCheckRequest = new HealthCheckRequest(); if (this.currentConnection == null) { return false; } int reTryTimes = rpcClientConfig.healthCheckRetryTimes(); Random random = new Random(); while (reTryTimes >= 0) { reTryTimes--; try { if (reTryTimes > 1) { Thread.sleep(random.nextInt(500)); } Response response = this.currentConnection .request(healthCheckRequest, rpcClientConfig.healthCheckTimeOut()); // not only check server is ok, also check connection is register. return response != null && response.isSuccess(); } catch (Exception e) { // ignore } } return false; }
@Test void testHealthCheck() throws IllegalAccessException, NacosException { Random random = new Random(); int retry = random.nextInt(10); when(rpcClientConfig.healthCheckRetryTimes()).thenReturn(retry); rpcClient.rpcClientStatus.set(RpcClientStatus.RUNNING); rpcClient.currentConnection = connection; doThrow(new NacosException()).when(connection).request(any(), anyLong()); try { healthCheck.invoke(rpcClient); } catch (InvocationTargetException e) { e.printStackTrace(); } verify(connection, times(retry + 1)).request(any(), anyLong()); }
void start() { Scheduler scheduler = threadPoolScheduler(threads, threadPoolQueueSize); messageSource.getMessageBatches() .subscribeOn(Schedulers.from(Executors.newSingleThreadExecutor())) .doOnNext(batch -> logger.log(batch.toString())) .flatMap(batch -> Flowable.fromIterable(batch.getMessages())) .flatMapSingle(m -> Single.defer(() -> Single.just(m) .map(messageHandler::handleMessage)) .subscribeOn(scheduler)) .subscribeWith(new SimpleSubscriber<>(threads, 1)); }
@Test void allMessagesAreProcessedOnMultipleThreads() { int batches = 10; int batchSize = 3; int threads = 2; int threadPoolQueueSize = 10; MessageSource messageSource = new TestMessageSource(batches, batchSize); TestMessageHandler messageHandler = new TestMessageHandler(); ReactiveBatchProcessor processor = new ReactiveBatchProcessor( messageSource, messageHandler, threads, threadPoolQueueSize); processor.start(); await() .atMost(10, TimeUnit.SECONDS) .pollInterval(1, TimeUnit.SECONDS) .untilAsserted(() -> assertEquals(batches * batchSize, messageHandler.getProcessedMessages())); assertEquals(threads, messageHandler.threadNames().size()); }
void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception { Http2HeadersSink sink = new Http2HeadersSink( streamId, headers, maxHeaderListSize, validateHeaders); // Check for dynamic table size updates, which must occur at the beginning: // https://www.rfc-editor.org/rfc/rfc7541.html#section-4.2 decodeDynamicTableSizeUpdates(in); decode(in, sink); // Now that we've read all of our headers we can perform the validation steps. We must // delay throwing until this point to prevent dynamic table corruption. sink.finish(); }
@Test public void pseudoHeaderAfterRegularHeader() throws Exception { final ByteBuf in = Unpooled.buffer(200); try { HpackEncoder hpackEncoder = new HpackEncoder(true); Http2Headers toEncode = new InOrderHttp2Headers(); toEncode.add("test", "1"); toEncode.add(":method", "GET"); hpackEncoder.encodeHeaders(1, in, toEncode, NEVER_SENSITIVE); final Http2Headers decoded = new DefaultHttp2Headers(); Http2Exception.StreamException e = assertThrows(Http2Exception.StreamException.class, new Executable() { @Override public void execute() throws Throwable { hpackDecoder.decode(3, in, decoded, true); } }); assertThat(e.streamId(), is(3)); assertThat(e.error(), is(PROTOCOL_ERROR)); } finally { in.release(); } }
public JibContainer runBuild() throws BuildStepsExecutionException, IOException, CacheDirectoryCreationException { try { logger.accept(LogEvent.lifecycle("")); logger.accept(LogEvent.lifecycle(startupMessage)); JibContainer jibContainer = jibContainerBuilder.containerize(containerizer); logger.accept(LogEvent.lifecycle("")); logger.accept(LogEvent.lifecycle(successMessage)); // when an image is built, write out the digest and id if (imageDigestOutputPath != null) { String imageDigest = jibContainer.getDigest().toString(); Files.write(imageDigestOutputPath, imageDigest.getBytes(StandardCharsets.UTF_8)); } if (imageIdOutputPath != null) { String imageId = jibContainer.getImageId().toString(); Files.write(imageIdOutputPath, imageId.getBytes(StandardCharsets.UTF_8)); } if (imageJsonOutputPath != null) { ImageMetadataOutput metadataOutput = ImageMetadataOutput.fromJibContainer(jibContainer); String imageJson = metadataOutput.toJson(); Files.write(imageJsonOutputPath, imageJson.getBytes(StandardCharsets.UTF_8)); } return jibContainer; } catch (HttpHostConnectException ex) { // Failed to connect to registry. throw new BuildStepsExecutionException(helpfulSuggestions.forHttpHostConnect(), ex); } catch (RegistryUnauthorizedException ex) { handleRegistryUnauthorizedException(ex, helpfulSuggestions); } catch (RegistryCredentialsNotSentException ex) { throw new BuildStepsExecutionException(helpfulSuggestions.forCredentialsNotSent(), ex); } catch (RegistryAuthenticationFailedException ex) { if (ex.getCause() instanceof ResponseException) { handleRegistryUnauthorizedException( new RegistryUnauthorizedException( ex.getServerUrl(), ex.getImageName(), (ResponseException) ex.getCause()), helpfulSuggestions); } else { // Unknown cause throw new BuildStepsExecutionException(helpfulSuggestions.none(), ex); } } catch (UnknownHostException ex) { throw new BuildStepsExecutionException(helpfulSuggestions.forUnknownHost(), ex); } catch (InsecureRegistryException ex) { throw new BuildStepsExecutionException(helpfulSuggestions.forInsecureRegistry(), ex); } catch (RegistryException ex) { String message = Verify.verifyNotNull(ex.getMessage()); // keep null-away happy throw new BuildStepsExecutionException(message, ex); } catch (ExecutionException ex) { String message = ex.getCause().getMessage(); throw new BuildStepsExecutionException( message == null ? "(null exception message)" : message, ex.getCause()); } catch (InterruptedException ex) { Thread.currentThread().interrupt(); throw new BuildStepsExecutionException(helpfulSuggestions.none(), ex); } throw new IllegalStateException("unreachable"); }
@Test public void testBuildImage_insecureRegistryException() throws InterruptedException, IOException, CacheDirectoryCreationException, RegistryException, ExecutionException { InsecureRegistryException mockInsecureRegistryException = Mockito.mock(InsecureRegistryException.class); Mockito.doThrow(mockInsecureRegistryException) .when(mockJibContainerBuilder) .containerize(mockContainerizer); try { testJibBuildRunner.runBuild(); Assert.fail(); } catch (BuildStepsExecutionException ex) { Assert.assertEquals(TEST_HELPFUL_SUGGESTIONS.forInsecureRegistry(), ex.getMessage()); } }
public boolean add(final Integer value) { return add(value.intValue()); }
@Test void consecutiveValuesShouldBeCorrectlyStored() { for (int i = 0; i < 10_000; i++) { testSet.add(i); } assertThat(testSet, hasSize(10_000)); }
public T getValue() { return this.value; }
@Test @SuppressWarnings("unchecked") void testValue() { Pane<LongAdder> pane = mock(Pane.class); LongAdder count = new LongAdder(); when(pane.getValue()).thenReturn(count); assertEquals(count, pane.getValue()); when(pane.getValue()).thenReturn(null); assertNotEquals(count, pane.getValue()); }
@Bean @ConditionalOnProperty(prefix = "shenyu.discovery", name = "enable", matchIfMissing = false, havingValue = "true") @ConfigurationProperties(prefix = "shenyu.discovery") public ShenyuDiscoveryConfig shenyuDiscoveryConfig() { return new ShenyuDiscoveryConfig(); }
@Test public void testShenyuDiscoveryConfig() { MockedStatic<RegisterUtils> registerUtilsMockedStatic = mockStatic(RegisterUtils.class); registerUtilsMockedStatic.when(() -> RegisterUtils.doLogin(any(), any(), any())).thenReturn(Optional.ofNullable("token")); applicationContextRunner.run(context -> { ShenyuDiscoveryConfig config = context.getBean("shenyuDiscoveryConfig", ShenyuDiscoveryConfig.class); assertNotNull(config); assertThat(config.getType()).isEqualTo("local"); }); registerUtilsMockedStatic.close(); }
@Override public void check(final SQLStatement sqlStatement) { ShardingSpherePreconditions.checkState(judgeEngine.isSupported(sqlStatement), () -> new ClusterStateException(getType(), sqlStatement)); }
@Test void assertExecuteWithSupportedSQL() { new UnavailableProxyState().check(mock(ImportMetaDataStatement.class)); }
public static CsvWriter getWriter(String filePath, Charset charset) { return new CsvWriter(filePath, charset); }
@Test @Disabled public void writeBeansTest() { @Data class Student { Integer id; String name; Integer age; } String path = FileUtil.isWindows() ? "d:/test/testWriteBeans.csv" : "~/test/testWriteBeans.csv"; CsvWriter writer = CsvUtil.getWriter(path, CharsetUtil.CHARSET_UTF_8); List<Student> students = new ArrayList<>(); Student student1 = new Student(); student1.setId(1); student1.setName("张三"); student1.setAge(18); Student student2 = new Student(); student2.setId(2); student2.setName("李四"); student2.setAge(22); Student student3 = new Student(); student3.setId(3); student3.setName("王五"); student3.setAge(31); students.add(student1); students.add(student2); students.add(student3); writer.writeBeans(students); writer.close(); }
public static TriggerStateMachine stateMachineForTrigger(RunnerApi.Trigger trigger) { switch (trigger.getTriggerCase()) { case AFTER_ALL: return AfterAllStateMachine.of( stateMachinesForTriggers(trigger.getAfterAll().getSubtriggersList())); case AFTER_ANY: return AfterFirstStateMachine.of( stateMachinesForTriggers(trigger.getAfterAny().getSubtriggersList())); case AFTER_END_OF_WINDOW: return stateMachineForAfterEndOfWindow(trigger.getAfterEndOfWindow()); case ELEMENT_COUNT: return AfterPaneStateMachine.elementCountAtLeast( trigger.getElementCount().getElementCount()); case AFTER_SYNCHRONIZED_PROCESSING_TIME: return AfterSynchronizedProcessingTimeStateMachine.ofFirstElement(); case DEFAULT: return DefaultTriggerStateMachine.of(); case NEVER: return NeverStateMachine.ever(); case ALWAYS: return ReshuffleTriggerStateMachine.create(); case OR_FINALLY: return stateMachineForTrigger(trigger.getOrFinally().getMain()) .orFinally(stateMachineForTrigger(trigger.getOrFinally().getFinally())); case REPEAT: return RepeatedlyStateMachine.forever( stateMachineForTrigger(trigger.getRepeat().getSubtrigger())); case AFTER_EACH: return AfterEachStateMachine.inOrder( stateMachinesForTriggers(trigger.getAfterEach().getSubtriggersList())); case AFTER_PROCESSING_TIME: return stateMachineForAfterProcessingTime(trigger.getAfterProcessingTime()); case TRIGGER_NOT_SET: throw new IllegalArgumentException( String.format("Required field 'trigger' not set on %s", trigger)); default: throw new IllegalArgumentException(String.format("Unknown trigger type %s", trigger)); } }
@Test public void testStateMachineForAfterWatermark() { RunnerApi.Trigger trigger = RunnerApi.Trigger.newBuilder() .setAfterEndOfWindow(RunnerApi.Trigger.AfterEndOfWindow.getDefaultInstance()) .build(); assertThat( TriggerStateMachines.stateMachineForTrigger(trigger), instanceOf(AfterWatermarkStateMachine.FromEndOfWindow.class)); }
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception { return newGetter(object, parent, modifier, method.getReturnType(), method::invoke, (t, et) -> new MethodGetter(parent, method, modifier, t, et)); }
@Test public void newMethodGetter_whenExtractingFromNull_Collection_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType() throws Exception { OuterObject object = new OuterObject("name", InnerObject.nullInner("inner")); Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersCollectionMethod, "[any]"); Getter innerObjectNameGetter = GetterFactory.newMethodGetter(object, parentGetter, innerAttributesCollectionMethod, "[any]"); assertSame(NullMultiValueGetter.NULL_MULTIVALUE_GETTER, innerObjectNameGetter); }
@Override public void execute(final ConnectionSession connectionSession) throws SQLException { MetaDataContexts metaDataContexts = ProxyContext.getInstance().getContextManager().getMetaDataContexts(); JDBCExecutor jdbcExecutor = new JDBCExecutor(BackendExecutorContext.getInstance().getExecutorEngine(), connectionSession.getConnectionContext()); try (SQLFederationEngine sqlFederationEngine = new SQLFederationEngine(databaseName, PG_CATALOG, metaDataContexts.getMetaData(), metaDataContexts.getStatistics(), jdbcExecutor)) { DriverExecutionPrepareEngine<JDBCExecutionUnit, Connection> prepareEngine = createDriverExecutionPrepareEngine(metaDataContexts, connectionSession); SQLFederationContext context = new SQLFederationContext(false, new QueryContext(sqlStatementContext, sql, parameters, SQLHintUtils.extractHint(sql), connectionSession.getConnectionContext(), metaDataContexts.getMetaData()), metaDataContexts.getMetaData(), connectionSession.getProcessId()); ShardingSphereDatabase database = metaDataContexts.getMetaData().getDatabase(databaseName); ResultSet resultSet = sqlFederationEngine.executeQuery(prepareEngine, createOpenGaussSystemCatalogAdminQueryCallback(database.getProtocolType(), database.getResourceMetaData(), sqlStatementContext.getSqlStatement()), context); queryResultMetaData = new JDBCQueryResultMetaData(resultSet.getMetaData()); mergedResult = new IteratorStreamMergedResult(Collections.singletonList(new JDBCMemoryQueryResult(resultSet, connectionSession.getProtocolType()))); } }
@Test void assertExecuteSelectGsPasswordDeadlineAndIntervalToNum() throws SQLException { when(ProxyContext.getInstance()).thenReturn(mock(ProxyContext.class, RETURNS_DEEP_STUBS)); RuleMetaData ruleMetaData = mock(RuleMetaData.class); when(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getGlobalRuleMetaData()).thenReturn(ruleMetaData); ConfigurationProperties props = new ConfigurationProperties(new Properties()); when(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData().getProps()).thenReturn(props); Map<String, ShardingSphereDatabase> databases = createShardingSphereDatabaseMap(); SQLFederationRule sqlFederationRule = new SQLFederationRule(new SQLFederationRuleConfiguration(false, false, new CacheOption(1, 1L)), databases); OpenGaussSelectStatement sqlStatement = createSelectStatementForGsPasswordDeadlineAndIntervalToNum(); ShardingSphereMetaData metaData = new ShardingSphereMetaData(databases, mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), new RuleMetaData(Collections.singletonList(sqlFederationRule)), props); when(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData()).thenReturn(metaData); SelectStatementContext sqlStatementContext = new SelectStatementContext(metaData, Collections.emptyList(), sqlStatement, "sharding_db", Collections.emptyList()); OpenGaussSystemCatalogAdminQueryExecutor executor = new OpenGaussSystemCatalogAdminQueryExecutor(sqlStatementContext, "select intervaltonum(gs_password_deadline())", "sharding_db", Collections.emptyList()); ConnectionSession connectionSession = mock(ConnectionSession.class); when(connectionSession.getProtocolType()).thenReturn(TypedSPILoader.getService(DatabaseType.class, "openGauss")); ConnectionContext connectionContext = mockConnectionContext(); when(connectionSession.getConnectionContext()).thenReturn(connectionContext); executor.execute(connectionSession); QueryResultMetaData actualMetaData = executor.getQueryResultMetaData(); assertThat(actualMetaData.getColumnCount(), is(1)); assertThat(actualMetaData.getColumnType(1), is(Types.INTEGER)); MergedResult actualResult = executor.getMergedResult(); assertTrue(actualResult.next()); assertThat(actualResult.getValue(1, Integer.class), is(90)); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer, final Merger<? super K, V> sessionMerger) { return aggregate(initializer, sessionMerger, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullInitializer2OnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(null, sessionMerger, Materialized.as("test"))); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { if(!session.getClient().setFileType(FTP.BINARY_FILE_TYPE)) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } if(status.isAppend()) { session.getClient().setRestartOffset(status.getOffset()); } final InputStream in = new DataConnectionActionExecutor(session).data(new DataConnectionAction<InputStream>() { @Override public InputStream execute() throws BackgroundException { try { return session.getClient().retrieveFileStream(file.getAbsolute()); } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } } }); return new ReadReplyInputStream(in, status); } catch(IOException e) { throw new FTPExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testAbortPartialRead() throws Exception { final Path test = new Path(new FTPWorkdirService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new FTPTouchFeature(session).touch(test, new TransferStatus()); final OutputStream out = new FTPWriteFeature(session).write(test, new TransferStatus().withLength(20L), new DisabledConnectionCallback()); assertNotNull(out); final byte[] content = RandomUtils.nextBytes(2048); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); out.close(); final TransferStatus status = new TransferStatus(); status.setLength(20L); final Path workdir = new FTPWorkdirService(session).find(); final InputStream in = new FTPReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); assertTrue(in.read() > 0); // Send ABOR because stream was not read completly in.close(); // Make sure subsequent PWD command works assertEquals(workdir, new FTPWorkdirService(session).find()); new FTPDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@SuppressFBWarnings(value = "EI_EXPOSE_REP", justification = "requiredColumns is ImmutableSet") public Collection<? extends ColumnReferenceExp> get() { return requiredColumns; }
@Test public void shouldAddColRef() { // When: builder.add(COL0_REF).add(EXP0); // Then: assertThat(builder.build().get(), is(ImmutableSet.of(COL0_REF, COL1_REF, COL2_REF, COL3_REF))); }
@Override protected Drive connect(final ProxyFinder proxy, final HostKeyCallback callback, final LoginCallback prompt, final CancelCallback cancel) throws HostParserException, ConnectionCanceledException { final HttpClientBuilder configuration = builder.build(proxy, this, prompt); authorizationService = new OAuth2RequestInterceptor(builder.build(proxy, this, prompt).build(), host, prompt) .withRedirectUri(host.getProtocol().getOAuthRedirectUrl()); configuration.addInterceptorLast(authorizationService); configuration.setServiceUnavailableRetryStrategy(new CustomServiceUnavailableRetryStrategy(host, new ExecutionCountServiceUnavailableRetryStrategy(new OAuth2ErrorResponseInterceptor(host, authorizationService)))); if(new HostPreferences(host).getBoolean("googledrive.limit.requests.enable")) { configuration.addInterceptorLast(new RateLimitingHttpRequestInterceptor(new DefaultHttpRateLimiter( new HostPreferences(host).getInteger("googledrive.limit.requests.second") ))); } transport = new ApacheHttpTransport(configuration.build()); final UseragentProvider ua = new PreferencesUseragentProvider(); return new Drive.Builder(transport, new GsonFactory(), new UserAgentHttpRequestInitializer(ua)) .setApplicationName(ua.get()) .build(); }
@Test(expected = LoginCanceledException.class) public void testConnectInvalidKey() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new DriveProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/Google Drive.cyberduckprofile")); final Host host = new Host(profile, "www.googleapis.com", new Credentials()); final DriveSession session = new DriveSession(host, new DefaultX509TrustManager(), new DefaultX509KeyManager()); new LoginConnectionService(new DisabledLoginCallback() { @Override public Credentials prompt(final Host bookmark, final String username, final String title, final String reason, final LoginOptions options) throws LoginCanceledException { if("https://accounts.google.com/o/oauth2/auth?client_id=996125414232.apps.googleusercontent.com&redirect_uri=urn:ietf:wg:oauth:2.0:oob&response_type=code&scope=https://www.googleapis.com/auth/drive".equals(reason)) { return new VaultCredentials("t"); } throw new LoginCanceledException(); } }, new DisabledHostKeyCallback(), new DisabledPasswordStore(), new DisabledProgressListener() ).connect(session, new DisabledCancelCallback()); assertTrue(session.isConnected()); session.close(); assertFalse(session.isConnected()); }
public File savePublic(String filename) throws IOException { return publicConfig().save(filename); }
@Test public void testSavePublicFile() throws IOException { ZCert cert = new ZCert(); cert.savePublic(CERT_LOCATION + "/test.cert"); File file = new File(CERT_LOCATION + "/test.cert"); assertThat(file.exists(), is(true)); }
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) { if ( point1 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null")); } if ( point2 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null")); } try { boolean result = point1.compareTo( point2 ) == 0; return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2")); } }
@Test void invokeParamRangeAndRange() { FunctionTestUtil.assertResult( coincidesFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.TRUE ); FunctionTestUtil.assertResult( coincidesFunction.invoke( new RangeImpl( Range.RangeBoundary.OPEN, "a", "f", Range.RangeBoundary.OPEN ), new RangeImpl( Range.RangeBoundary.OPEN, "a", "f", Range.RangeBoundary.OPEN ) ), Boolean.TRUE ); FunctionTestUtil.assertResult( coincidesFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "g", "k", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( coincidesFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( coincidesFunction.invoke( new RangeImpl( Range.RangeBoundary.OPEN, "f", "k", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.CLOSED ) ), Boolean.FALSE ); FunctionTestUtil.assertResult( coincidesFunction.invoke( new RangeImpl( Range.RangeBoundary.CLOSED, "f", "k", Range.RangeBoundary.CLOSED ), new RangeImpl( Range.RangeBoundary.CLOSED, "a", "f", Range.RangeBoundary.OPEN ) ), Boolean.FALSE ); }
@InvokeOnHeader(Web3jConstants.ETH_GET_BLOCK_BY_HASH) void ethGetBlockByHash(Message message) throws IOException { Boolean fullTransactionObjects = message.getHeader(Web3jConstants.FULL_TRANSACTION_OBJECTS, configuration::isFullTransactionObjects, Boolean.class); String blockHash = message.getHeader(Web3jConstants.BLOCK_HASH, configuration::getBlockHash, String.class); Request<?, EthBlock> request = web3j.ethGetBlockByHash(blockHash, fullTransactionObjects); setRequestId(message, request); EthBlock response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getBlock()); } }
@Test public void ethGetBlockByHashTest() throws Exception { EthBlock response = Mockito.mock(EthBlock.class); Mockito.when(mockWeb3j.ethGetBlockByHash(any(), anyBoolean())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.getBlock()).thenReturn(Mockito.mock(EthBlock.Block.class)); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_BLOCK_BY_HASH); template.send(exchange); EthBlock.Block body = exchange.getIn().getBody(EthBlock.Block.class); assertNotNull(body); }
@Nonnull public <T> T getInstance(@Nonnull Class<T> type) { return getInstance(new Key<>(type)); }
@Test public void whenFactoryRequested_createsInjectedFactory() throws Exception { injector = builder.bind(Umm.class, MyUmm.class).build(); FooFactory factory = injector.getInstance(FooFactory.class); Foo chauncey = factory.create("Chauncey"); assertThat(chauncey.name).isEqualTo("Chauncey"); Foo anotherChauncey = factory.create("Chauncey"); assertThat(anotherChauncey).isNotSameInstanceAs(chauncey); assertThat(chauncey.thing).isSameInstanceAs(injector.getInstance(Thing.class)); }
@Override public void validate() throws TelegramApiValidationException { if (inlineQueryId.isEmpty()) { throw new TelegramApiValidationException("InlineQueryId can't be empty", this); } for (InlineQueryResult result : results) { result.validate(); } if (button != null) { button.validate(); } }
@Test void testSwitchPmParameterContainsUpTo64Chars() { answerInlineQuery.setInlineQueryId("RANDOMEID"); answerInlineQuery.setResults(new ArrayList<>()); answerInlineQuery.setButton(InlineQueryResultsButton .builder() .text("Test Text") .startParameter("2AAQlw4BwzXwFNXMk5rReQC3YbhbgNqq4BGqyozjRTtrsok4shsB8u4NXeslfpOsL") .build()); try { answerInlineQuery.validate(); } catch (TelegramApiValidationException e) { assertEquals("SwitchPmParameter can't be empty or longer than 64 chars", e.getMessage()); } }
@Override public long removedCount() { return head; }
@Test public void testRemovedCount() { queue.offer(1); queue.peek(); assertEquals(0, queue.removedCount()); queue.poll(); assertEquals(1, queue.removedCount()); }
static <E extends Enum<E>> String enumName(final E state) { return null == state ? "null" : state.name(); }
@Test void stateNameReturnsNullIfNull() { assertEquals("null", enumName(null)); }
@SuppressWarnings("ConstantConditions") public boolean removeActions(@NonNull Class<? extends Action> clazz) { if (clazz == null) { throw new IllegalArgumentException("Action type must be non-null"); } // CopyOnWriteArrayList does not support Iterator.remove, so need to do it this way: List<Action> old = new ArrayList<>(); List<Action> current = getActions(); for (Action a : current) { if (clazz.isInstance(a)) { old.add(a); } } return current.removeAll(old); }
@SuppressWarnings("deprecation") @Test public void removeActions() { CauseAction a1 = new CauseAction(); ParametersAction a2 = new ParametersAction(); thing.addAction(a1); thing.addAction(a2); assertEquals(Arrays.asList(a1, a2), thing.getActions()); assertThat(thing.removeActions(CauseAction.class), is(true)); assertEquals(List.of(a2), thing.getActions()); assertThat(thing.removeActions(CauseAction.class), is(false)); assertEquals(List.of(a2), thing.getActions()); }
@PUT @Path("/{pluginName}/config/validate") @Operation(summary = "Validate the provided configuration against the configuration definition for the specified pluginName") public ConfigInfos validateConfigs( final @PathParam("pluginName") String pluginName, final Map<String, String> connectorConfig ) throws Throwable { String includedConnType = connectorConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); if (includedConnType != null && !normalizedPluginName(includedConnType).endsWith(normalizedPluginName(pluginName))) { throw new BadRequestException( "Included connector type " + includedConnType + " does not match request type " + pluginName ); } // the validated configs don't need to be logged FutureCallback<ConfigInfos> validationCallback = new FutureCallback<>(); herder.validateConnectorConfig(connectorConfig, validationCallback, false); try { return validationCallback.get(requestTimeout.timeoutMs(), TimeUnit.MILLISECONDS); } catch (StagedTimeoutException e) { Stage stage = e.stage(); String message; if (stage.completed() != null) { message = "Request timed out. The last operation the worker completed was " + stage.description() + ", which began at " + Instant.ofEpochMilli(stage.started()) + " and completed at " + Instant.ofEpochMilli(stage.completed()); } else { message = "Request timed out. The worker is currently " + stage.description() + ", which began at " + Instant.ofEpochMilli(stage.started()); } // This timeout is for the operation itself. None of the timeout error codes are relevant, so internal server // error is the best option throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), message); } catch (TimeoutException e) { // This timeout is for the operation itself. None of the timeout error codes are relevant, so internal server // error is the best option throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), "Request timed out"); } catch (InterruptedException e) { throw new ConnectRestException(Response.Status.INTERNAL_SERVER_ERROR.getStatusCode(), "Request interrupted"); } }
@Test public void testValidateConfigWithSingleErrorDueToMissingConnectorClassname() throws Throwable { @SuppressWarnings("unchecked") ArgumentCaptor<Callback<ConfigInfos>> configInfosCallback = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List<ConfigValue> connectorConfigValues = connectorConfigDef.validate(PARTIAL_PROPS); Connector connector = new ConnectorPluginsResourceTestConnector(); Config config = connector.validate(PARTIAL_PROPS); ConfigDef configDef = connector.config(); Map<String, ConfigDef.ConfigKey> configKeys = configDef.configKeys(); List<ConfigValue> configValues = config.configValues(); Map<String, ConfigDef.ConfigKey> resultConfigKeys = new HashMap<>(configKeys); resultConfigKeys.putAll(connectorConfigDef.configKeys()); configValues.addAll(connectorConfigValues); ConfigInfos configInfos = AbstractHerder.generateResult( ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, Collections.singletonList("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; }).when(herder).validateConnectorConfig(eq(PARTIAL_PROPS), configInfosCallback.capture(), anyBoolean()); // This call to validateConfigs does not throw a BadRequestException because we've mocked // validateConnectorConfig. ConfigInfos configInfos = connectorPluginsResource.validateConfigs( ConnectorPluginsResourceTestConnector.class.getSimpleName(), PARTIAL_PROPS ); assertEquals(PARTIAL_CONFIG_INFOS.name(), configInfos.name()); assertEquals(PARTIAL_CONFIG_INFOS.errorCount(), configInfos.errorCount()); assertEquals(PARTIAL_CONFIG_INFOS.groups(), configInfos.groups()); assertEquals( new HashSet<>(PARTIAL_CONFIG_INFOS.values()), new HashSet<>(configInfos.values()) ); verify(herder).validateConnectorConfig(eq(PARTIAL_PROPS), any(), anyBoolean()); }
public static short translateBucketAcl(AccessControlList acl, String userId) { short mode = (short) 0; for (Grant grant : acl.getGrantsAsList()) { Permission perm = grant.getPermission(); Grantee grantee = grant.getGrantee(); if (perm.equals(Permission.Read)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is readable by the user, add r and x to the owner mode. mode |= (short) 0500; } } else if (perm.equals(Permission.Write)) { if (isUserIdInGrantee(grantee, userId)) { // If the bucket is writable by the user, +w to the owner mode. mode |= (short) 0200; } } else if (perm.equals(Permission.FullControl)) { if (isUserIdInGrantee(grantee, userId)) { // If the user has full control to the bucket, +rwx to the owner mode. mode |= (short) 0700; } } } return mode; }
@Test public void translateAuthenticatedUserReadPermission() { GroupGrantee authenticatedUsersGrantee = GroupGrantee.AuthenticatedUsers; mAcl.grantPermission(authenticatedUsersGrantee, Permission.Read); Assert.assertEquals((short) 0500, S3AUtils.translateBucketAcl(mAcl, ID)); Assert.assertEquals((short) 0500, S3AUtils.translateBucketAcl(mAcl, OTHER_ID)); }
public GroupInformation updateGroup(DbSession dbSession, GroupDto group, @Nullable String newName) { checkGroupIsNotDefault(dbSession, group); return groupDtoToGroupInformation(updateName(dbSession, group, newName), dbSession); }
@Test public void updateGroup_whenGroupIsDefault_throws() { GroupDto defaultGroup = mockDefaultGroup(); when(dbClient.groupDao().selectByName(dbSession, DEFAULT_GROUP_NAME)).thenReturn(Optional.of(defaultGroup)); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> groupService.updateGroup(dbSession, defaultGroup, "new-name", "New Description")) .withMessage("Default group 'sonar-users' cannot be used to perform this action"); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> groupService.updateGroup(dbSession, defaultGroup, "new-name")) .withMessage("Default group 'sonar-users' cannot be used to perform this action"); }
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) { if ( point1 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null")); } if ( point2 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null")); } try { boolean result = point1.compareTo( point2 ) > 0; return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2")); } }
@Test void invokeParamIsNull() { FunctionTestUtil.assertResultError(afterFunction.invoke((Comparable) null, "b"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(afterFunction.invoke("a", (Comparable) null), InvalidParametersEvent.class); }
public static String getShortenedStackTrace(Throwable t) { StringBuilder trace = new StringBuilder(); final List<Throwable> causalChain = Throwables.getCausalChain(t) .stream() .filter(c -> StringUtils.isNotBlank(c.getMessage())) .collect(Collectors.toList()); int position = 0; for (Throwable c : causalChain) { if (position > 0) { trace.append("Caused by: "); } appendWithNewline(trace, c); Arrays.stream(c.getStackTrace()).findFirst().ifPresent(firstStackElement -> { trace.append("\tat "); appendWithNewline(trace, firstStackElement); final int more = c.getStackTrace().length - 1; if (more > 0) { trace.append("\t... ").append(more); appendWithNewline(trace, " more"); } }); position++; } return trace.toString(); }
@Test public void getShortenedStackTrace() { final IOException ioException = new IOException("io message"); setTestStackTrace(ioException, "FileReader", "process", 42); final RuntimeException ex1 = new RuntimeException("socket message", ioException); setTestStackTrace(ex1, "TCPSocket", "read", 23); final RuntimeException ex0 = new RuntimeException("parent message", ex1); setTestStackTrace(ex0, "Main", "loop", 78); final String shortTrace = ExceptionUtils.getShortenedStackTrace(ex0); final String expected = "java.lang.RuntimeException: parent message\n" + "\tat Main.loop(Main.java:78)\n" + "\t... 1 more\n" + "Caused by: java.lang.RuntimeException: socket message\n" + "\tat TCPSocket.read(TCPSocket.java:23)\n" + "\t... 1 more\n" + "Caused by: java.io.IOException: io message\n" + "\tat FileReader.process(FileReader.java:42)\n" + "\t... 1 more\n"; assertThat(shortTrace).isEqualTo(expected); }
@Override @SuppressWarnings("unchecked") public I read() throws Exception { LOG.debug("reading new item..."); I item = (I) consumerTemplate.receiveBody(endpointUri); LOG.debug("read item [{}]", item); return item; }
@Test public void shouldReadMessage() throws Exception { // When String messageRead = camelItemReader.read(); // Then assertEquals(message, messageRead); }
public boolean isEmpty() { return this.overridingProperties.isEmpty(); }
@Test public final void testIfIsEmptyRecognizesThatConfigurationPropertiesOverridesAreEmpty() { final ConfigurationPropertiesOverrides objectUnderTest = new ConfigurationPropertiesOverrides(); assertTrue( "isEmpty() should have noticed that the ConfigurationPropertiesOverrides instance is indeed empty. However, it didn't.", objectUnderTest.isEmpty()); }
@Override public int drainTo(Collection<? super V> c) { return get(drainToAsync(c)); }
@Test public void testDrainTo() { RBoundedBlockingQueue<Integer> queue = redisson.getBoundedBlockingQueue("queue"); queue.trySetCapacity(100); for (int i = 0 ; i < 100; i++) { assertThat(queue.offer(i)).isTrue(); } Assertions.assertEquals(100, queue.size()); Set<Integer> batch = new HashSet<Integer>(); assertThat(queue.remainingCapacity()).isEqualTo(0); int count = queue.drainTo(batch, 10); assertThat(queue.remainingCapacity()).isEqualTo(10); Assertions.assertEquals(10, count); Assertions.assertEquals(10, batch.size()); Assertions.assertEquals(90, queue.size()); queue.drainTo(batch, 10); assertThat(queue.remainingCapacity()).isEqualTo(20); queue.drainTo(batch, 20); assertThat(queue.remainingCapacity()).isEqualTo(40); queue.drainTo(batch, 60); assertThat(queue.remainingCapacity()).isEqualTo(100); Assertions.assertEquals(0, queue.size()); }
public void addProviders( List<NamedProvider> providers ) { if ( providers == null || providers.isEmpty() ) { return; } for ( NamedProvider provider : providers ) { model.add( provider ); } model.setSelectedItem( model.getModelObjects().get( 0 ) ); }
@Test public void testAddProviders() { controller.addProviders( providers ); assertEquals( 8, controller.getModel().getModelObjects().size() ); }
@VisibleForTesting boolean isMounted(String ufs) { ufs = PathUtils.normalizePath(ufs, AlluxioURI.SEPARATOR); for (Map.Entry<String, MountPointInfo> entry : mFileSystemMaster.getMountPointInfoSummary(false).entrySet()) { String escaped = MetricsSystem.escape(new AlluxioURI(entry.getValue().getUfsUri())); escaped = PathUtils.normalizePath(escaped, AlluxioURI.SEPARATOR); if (escaped.equals(ufs)) { return true; } } return false; }
@Test public void isMounted() { String s3Uri = "s3a://test/dir_1/dir-2"; String hdfsUri = "hdfs://test"; Map<String, MountPointInfo> mountTable = new HashMap<>(); mountTable.put("/s3", new MountPointInfo().setUfsUri(s3Uri)); FileSystemMaster mockMaster = mock(FileSystemMaster.class); when(mockMaster.getMountPointInfoSummary(false)).thenReturn(mountTable); AlluxioMasterProcess masterProcess = PowerMockito.mock(AlluxioMasterProcess.class); when(masterProcess.getMaster(FileSystemMaster.class)).thenReturn(mockMaster); ServletContext context = mock(ServletContext.class); when(context.getAttribute(MasterWebServer.ALLUXIO_MASTER_SERVLET_RESOURCE_KEY)).thenReturn( masterProcess); AlluxioMasterRestServiceHandler handler = new AlluxioMasterRestServiceHandler(context); assertFalse(handler.isMounted(s3Uri)); assertTrue(handler.isMounted(MetricsSystem.escape(new AlluxioURI(s3Uri)))); assertTrue(handler.isMounted(MetricsSystem.escape(new AlluxioURI(s3Uri + "/")))); assertFalse(handler.isMounted(hdfsUri)); assertFalse(handler.isMounted(MetricsSystem.escape(new AlluxioURI(hdfsUri)))); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuilder buf = new StringBuilder(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case START_STATE: handleStartState(c, tokenList, buf); break; case DEFAULT_VAL_STATE: handleDefaultValueState(c, tokenList, buf); default: } } // EOS switch (state) { case LITERAL_STATE: addLiteralToken(tokenList, buf); break; case DEFAULT_VAL_STATE: // trailing colon. see also LOGBACK-1140 buf.append(CoreConstants.COLON_CHAR); addLiteralToken(tokenList, buf); break; case START_STATE: // trailing $. see also LOGBACK-1149 buf.append(CoreConstants.DOLLAR); addLiteralToken(tokenList, buf); break; } return tokenList; }
@Test public void literalOnly() throws ScanException { String input = "abc"; Tokenizer tokenizer = new Tokenizer(input); List<Token> tokenList = tokenizer.tokenize(); witnessList.add(new Token(Token.Type.LITERAL, input)); assertEquals(witnessList, tokenList); }
@Override public List<Object> handle(String targetName, List<Object> instances, RequestData requestData) { if (!shouldHandle(instances)) { return instances; } List<Object> result = getTargetInstancesByRules(targetName, instances); return super.handle(targetName, result, requestData); }
@Test public void testGetTargetInstancesByTagRules() { RuleInitializationUtils.initTagMatchRule(); List<Object> instances = new ArrayList<>(); ServiceInstance instance1 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.0"); instances.add(instance1); ServiceInstance instance2 = TestDefaultServiceInstance.getTestDefaultServiceInstance("1.0.1"); instances.add(instance2); Map<String, String> metadata = new HashMap<>(); metadata.put("group", "red"); AppCache.INSTANCE.setMetadata(metadata); List<Object> targetInstances = tagRouteHandler.handle("foo", instances, new RequestData(null, null, null)); Assert.assertEquals(1, targetInstances.size()); Assert.assertEquals(instance2, targetInstances.get(0)); ConfigCache.getLabel(RouterConstant.SPRING_CACHE_NAME).resetRouteRule(Collections.emptyMap()); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void longlivedLightPortalLightapi() throws Exception { JwtClaims claims = ClaimsUtil.getTestClaims("stevehu@gmail.com", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e72", Arrays.asList("portal.r", "portal.w"), "user lightapi.net admin"); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***Long lived token for portal lightapi***: " + jwt); }
public Concept getRoot() { return root; }
@Test public void testGetPathFromRoot() { System.out.println("getPathToRoot"); LinkedList<Concept> expResult = new LinkedList<>(); expResult.add(taxonomy.getRoot()); expResult.add(ad); expResult.add(a); expResult.add(c); expResult.add(f); List<Concept> result = f.getPathFromRoot(); assertEquals(expResult, result); }
@Nonnull public MappingResults applyToPrimaryResource(@Nonnull Mappings mappings) { mappings = enrich(mappings); WorkspaceResource resource = workspace.getPrimaryResource(); MappingResults results = new MappingResults(mappings, listeners.createBundledMappingApplicationListener()) .withAggregateManager(aggregateMappingManager); // Apply mappings to all classes in the primary resource, collecting into the results model. Mappings finalMappings = mappings; ExecutorService service = ThreadUtil.phasingService(applierThreadPool); Stream.concat(resource.jvmClassBundleStream(), resource.versionedJvmClassBundleStream()).forEach(bundle -> { bundle.forEach(classInfo -> { service.execute(() -> dumpIntoResults(results, workspace, resource, bundle, classInfo, finalMappings)); }); }); ThreadUtil.blockUntilComplete(service); // Yield results return results; }
@Test void applyAnonymousLambda() { String stringSupplierName = StringSupplier.class.getName().replace('.', '/'); String anonymousLambdaName = AnonymousLambda.class.getName().replace('.', '/'); // Create mappings for all classes but the runner 'AnonymousLambda' Mappings mappings = mappingGenerator.generate(workspace, resource, inheritanceGraph, nameGenerator, new NameGeneratorFilter(null, true) { @Override public boolean shouldMapClass(@Nonnull ClassInfo info) { return !info.getName().equals(anonymousLambdaName); } @Override public boolean shouldMapMethod(@Nonnull ClassInfo owner, @Nonnull MethodMember method) { return shouldMapClass(owner); } }); // Preview the mapping operation MappingResults results = mappingApplier.applyToPrimaryResource(mappings); // The supplier class we define should be remapped. // The runner class (AnonymousLambda) itself should not be remapped, but should be updated to point to // the new StringSupplier class name. String mappedStringSupplierName = mappings.getMappedClassName(stringSupplierName); assertNotNull(mappedStringSupplierName, "StringSupplier should be remapped"); assertNull(mappings.getMappedClassName(anonymousLambdaName), "AnonymousLambda should not be remapped"); assertTrue(results.wasMapped(stringSupplierName), "StringSupplier should have updated"); assertTrue(results.wasMapped(anonymousLambdaName), "AnonymousLambda should have updated"); // Verify that the original name is stored as a property. ClassPathNode classPath = results.getPostMappingPath(stringSupplierName); assertNotNull(classPath, "Could not find mapped StringSupplier in workspace"); JvmClassInfo mappedStringSupplier = classPath.getValue().asJvmClass(); assertEquals(stringSupplierName, OriginalClassNameProperty.get(mappedStringSupplier), "Did not record original name after applying mappings"); // Assert that the method is still runnable. String result = runMapped(AnonymousLambda.class, "run"); assertTrue(result.contains("One: java.util.function.Supplier"), "JDK class reference should not be mapped"); assertFalse(result.contains(stringSupplierName), "Class reference to '" + stringSupplierName + "' should have been remapped"); // Assert aggregate updated too. // We will validate this is only done AFTER 'results.apply()' is run. // For future tests we will skip this since if it works here, it works there. AggregatedMappings aggregatedMappings = aggregateMappingManager.getAggregatedMappings(); assertNull(aggregatedMappings.getMappedClassName(stringSupplierName), "StringSupplier should not yet be tracked in aggregate"); results.apply(); assertNotNull(aggregatedMappings.getMappedClassName(stringSupplierName), "StringSupplier should be tracked in aggregate"); }
@Override public int onDataRead(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, boolean endOfStream) throws Http2Exception { if (endOfStream || data.isReadable()) { emptyDataFrames = 0; } else if (emptyDataFrames++ == maxConsecutiveEmptyFrames && !violationDetected) { violationDetected = true; throw Http2Exception.connectionError(Http2Error.ENHANCE_YOUR_CALM, "Maximum number %d of empty data frames without end_of_stream flag received", maxConsecutiveEmptyFrames); } return super.onDataRead(ctx, streamId, data, padding, endOfStream); }
@Test public void testEmptyDataFramesWithEndOfStreamInBetween() throws Http2Exception { final Http2EmptyDataFrameListener listener = new Http2EmptyDataFrameListener(frameListener, 2); listener.onDataRead(ctx, 1, Unpooled.EMPTY_BUFFER, 0, false); listener.onDataRead(ctx, 1, Unpooled.EMPTY_BUFFER, 0, true); listener.onDataRead(ctx, 1, Unpooled.EMPTY_BUFFER, 0, false); listener.onDataRead(ctx, 1, Unpooled.EMPTY_BUFFER, 0, false); assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { listener.onDataRead(ctx, 1, Unpooled.EMPTY_BUFFER, 0, false); } }); verify(frameListener, times(1)).onDataRead(eq(ctx), eq(1), any(ByteBuf.class), eq(0), eq(true)); verify(frameListener, times(3)).onDataRead(eq(ctx), eq(1), any(ByteBuf.class), eq(0), eq(false)); }
public MetricName tagged(Map<String, String> add) { final Map<String, String> tags = new HashMap<>(add); tags.putAll(this.tags); return new MetricName(key, tags); }
@Test(expected=IllegalArgumentException.class) public void testTaggedNotPairs2() { MetricName.EMPTY.tagged("foo", "bar", "baz"); }