focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public int hashCode() { int hash = Objects.hashCode(tableName); hash += Objects.hashCode(allColumns); hash += Objects.hashCode(allIndexes); hash += Objects.hashCode(isCaseSensitive); return hash; }
@Test public void testHashCode() { assertEquals(tableMeta.hashCode(), tableMeta2.hashCode()); tableMeta2.setTableName("different_table"); assertNotEquals(tableMeta.hashCode(), tableMeta2.hashCode()); }
public List<Issue> validateMetadata(ExtensionVersion extVersion) { return Observation.createNotStarted("ExtensionValidator#validateMetadata", observations).observe(() -> { var issues = new ArrayList<Issue>(); checkVersion(extVersion.getVersion(), issues); checkTargetPlatform(extVersion.getTargetPlatform(), issues); checkCharacters(extVersion.getDisplayName(), "displayName", issues); checkFieldSize(extVersion.getDisplayName(), DEFAULT_STRING_SIZE, "displayName", issues); checkCharacters(extVersion.getDescription(), "description", issues); checkFieldSize(extVersion.getDescription(), DESCRIPTION_SIZE, "description", issues); checkCharacters(extVersion.getCategories(), "categories", issues); checkFieldSize(extVersion.getCategories(), DEFAULT_STRING_SIZE, "categories", issues); checkCharacters(extVersion.getTags(), "keywords", issues); checkFieldSize(extVersion.getTags(), DEFAULT_STRING_SIZE, "keywords", issues); checkCharacters(extVersion.getLicense(), "license", issues); checkFieldSize(extVersion.getLicense(), DEFAULT_STRING_SIZE, "license", issues); checkURL(extVersion.getHomepage(), "homepage", issues); checkFieldSize(extVersion.getHomepage(), DEFAULT_STRING_SIZE, "homepage", issues); checkURL(extVersion.getRepository(), "repository", issues); checkFieldSize(extVersion.getRepository(), DEFAULT_STRING_SIZE, "repository", issues); checkURL(extVersion.getBugs(), "bugs", issues); checkFieldSize(extVersion.getBugs(), DEFAULT_STRING_SIZE, "bugs", issues); checkInvalid(extVersion.getMarkdown(), s -> !MARKDOWN_VALUES.contains(s), "markdown", issues, MARKDOWN_VALUES.toString()); checkCharacters(extVersion.getGalleryColor(), "galleryBanner.color", issues); checkFieldSize(extVersion.getGalleryColor(), GALLERY_COLOR_SIZE, "galleryBanner.color", issues); checkInvalid(extVersion.getGalleryTheme(), s -> !GALLERY_THEME_VALUES.contains(s), "galleryBanner.theme", issues, GALLERY_THEME_VALUES.toString()); checkFieldSize(extVersion.getLocalizedLanguages(), DEFAULT_STRING_SIZE, "localizedLanguages", issues); checkInvalid(extVersion.getQna(), s -> !QNA_VALUES.contains(s) && isInvalidURL(s), "qna", issues, QNA_VALUES.toString() + " or a URL"); checkFieldSize(extVersion.getQna(), DEFAULT_STRING_SIZE, "qna", issues); return issues; }); }
@Test public void testMailtoURL() { var extension = new ExtensionVersion(); extension.setTargetPlatform(TargetPlatform.NAME_UNIVERSAL); extension.setVersion("1.0.0"); extension.setRepository("mailto:foo@bar.net"); var issues = validator.validateMetadata(extension); assertThat(issues).isEmpty(); }
@Override public AlarmApiCallResult createOrUpdateActiveAlarm(AlarmCreateOrUpdateActiveRequest request, boolean alarmCreationEnabled) { UUID tenantUUID = request.getTenantId().getId(); log.debug("[{}] createOrUpdateActiveAlarm [{}] {}", tenantUUID, alarmCreationEnabled, request); AlarmPropagationInfo ap = getSafePropagationInfo(request.getPropagation()); return toAlarmApiResult(alarmRepository.createOrUpdateActiveAlarm( tenantUUID, request.getCustomerId() != null ? request.getCustomerId().getId() : CustomerId.NULL_UUID, request.getEdgeAlarmId() != null ? request.getEdgeAlarmId().getId() : UUID.randomUUID(), System.currentTimeMillis(), request.getOriginator().getId(), request.getOriginator().getEntityType().ordinal(), request.getType(), request.getSeverity().name(), request.getStartTs(), request.getEndTs(), getDetailsAsString(request.getDetails()), ap.isPropagate(), ap.isPropagateToOwner(), ap.isPropagateToTenant(), getPropagationTypes(ap), alarmCreationEnabled )); }
@Test public void testCantCreateAlarmIfCreateIsDisabled() { TenantId tenantId = TenantId.fromUUID(UUID.randomUUID()); DeviceId deviceId = new DeviceId(UUID.randomUUID()); AlarmCreateOrUpdateActiveRequest request = AlarmCreateOrUpdateActiveRequest.builder() .tenantId(tenantId) .originator(deviceId) .type("ALARM_TYPE") .severity(AlarmSeverity.MAJOR) .build(); AlarmApiCallResult result = alarmDao.createOrUpdateActiveAlarm(request, false); assertFalse(result.isSuccessful()); }
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException { final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class); final var status = OpenSAMLUtils.buildSAMLObject(Status.class); final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class); final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class); return ArtifactResponseBuilder .newInstance(artifactResponse) .addID() .addIssueInstant() .addInResponseTo(artifactResolveRequest.getArtifactResolve().getID()) .addStatus(StatusBuilder .newInstance(status) .addStatusCode(statusCode, StatusCode.SUCCESS) .build()) .addIssuer(issuer, entityId) .addMessage(buildResponse(artifactResolveRequest, entityId, signType)) .addSignature(signatureService, signType) .build(); }
@Test void parseArtifactResolveInvalidRequesterId() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException { ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("invalid", true, false, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD); assertEquals("urn:oasis:names:tc:SAML:2.0:status:Requester", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getValue()); assertEquals("urn:oasis:names:tc:SAML:2.0:status:NoSupportedIDP", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getStatusCode().getValue()); }
public static boolean containsInterruptedException(Throwable t) { return !Iterables .isEmpty(Iterables.filter(Throwables.getCausalChain(t), x -> isInterrupted(x))); }
@Test public void containsInterruptedException() { Throwable t1 = new IOException(new RuntimeException(new InterruptedException())); Throwable t2 = new IOException(new InterruptedIOException()); Throwable t3 = new InterruptedException(); Throwable t4 = new IOException(new RuntimeException(new IOException())); assertTrue(ExceptionUtils.containsInterruptedException(t1)); assertTrue(ExceptionUtils.containsInterruptedException(t2)); assertTrue(ExceptionUtils.containsInterruptedException(t3)); assertFalse(ExceptionUtils.containsInterruptedException(t4)); }
protected void preHandle(HttpServletRequest request, Object handler, SpanCustomizer customizer) { if (WebMvcRuntime.get().isHandlerMethod(handler)) { HandlerMethod handlerMethod = ((HandlerMethod) handler); customizer.tag(CONTROLLER_CLASS, handlerMethod.getBeanType().getSimpleName()); customizer.tag(CONTROLLER_METHOD, handlerMethod.getMethod().getName()); } else { customizer.tag(CONTROLLER_CLASS, handler.getClass().getSimpleName()); } }
@Test void preHandle_Handler_addsClassTag() { parser.preHandle(request, controller, customizer); verify(customizer).tag("mvc.controller.class", "TestController"); verifyNoMoreInteractions(request, customizer); }
@Override public void preAction(WebService.Action action, Request request) { Level logLevel = getLogLevel(); String deprecatedSinceEndpoint = action.deprecatedSince(); if (deprecatedSinceEndpoint != null) { logWebServiceMessage(logLevel, deprecatedSinceEndpoint); } action.params().forEach(param -> logParamMessage(request, logLevel, param)); }
@Test @UseDataProvider("userSessions") public void preAction_whenParameterIsDeprecatedAndHasReplacementAndBrowserSession_shouldLogWarning(boolean isLoggedIn, boolean isAuthenticatedBrowserSession, Level expectedLogLevel) { when(userSession.hasSession()).thenReturn(true); when(userSession.isLoggedIn()).thenReturn(isLoggedIn); when(userSession.isAuthenticatedBrowserSession()).thenReturn(isAuthenticatedBrowserSession); WebService.Action action = mock(WebService.Action.class); when(action.path()).thenReturn("api/issues/search"); when(action.deprecatedSince()).thenReturn(null); WebService.Param mockParam = mock(WebService.Param.class); when(mockParam.deprecatedKeySince()).thenReturn("9.6"); when(mockParam.deprecatedKey()).thenReturn("sansTop25"); when(mockParam.key()).thenReturn("sansTop25New"); when(action.params()).thenReturn(List.of(mockParam)); when(action.param("sansTop25")).thenReturn(mockParam); Request request = mock(Request.class); Request.StringParam stringParam = mock(Request.StringParam.class); when(stringParam.isPresent()).thenReturn(true); when(request.hasParam("sansTop25")).thenReturn(true); when(request.getParams()).thenReturn(Map.of("sansTop25", new String[]{})); underTest.preAction(action, request); assertThat(logTester.logs(expectedLogLevel)) .contains("Parameter 'sansTop25' is deprecated since 9.6 and will be removed in a future version."); }
@Override public ReservationListResponse listReservations( ReservationListRequest request) throws YarnException, IOException { if (request == null || request.getReservationId() == null) { routerMetrics.incrListReservationsFailedRetrieved(); String msg = "Missing listReservations request."; RouterAuditLogger.logFailure(user.getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, null); } long startTime = clock.getTime(); ClientMethod remoteMethod = new ClientMethod("listReservations", new Class[] {ReservationListRequest.class}, new Object[] {request}); Collection<ReservationListResponse> listResponses = null; try { listResponses = invokeConcurrent(remoteMethod, ReservationListResponse.class); } catch (Exception ex) { String msg = "Unable to list reservations node due to exception."; routerMetrics.incrListReservationsFailedRetrieved(); RouterAuditLogger.logFailure(user.getShortUserName(), LIST_RESERVATIONS, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, ex); } long stopTime = clock.getTime(); routerMetrics.succeededListReservationsRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), LIST_RESERVATIONS, TARGET_CLIENT_RM_SERVICE); // Merge the ReservationListResponse return RouterYarnClientUtils.mergeReservationsList(listResponses); }
@Test public void testListReservations() throws Exception { LOG.info("Test FederationClientInterceptor : Get ListReservations request."); // null request LambdaTestUtils.intercept(YarnException.class, "Missing listReservations request.", () -> interceptor.listReservations(null)); // normal request ReservationId reservationId = ReservationId.newInstance(1653487680L, 1L); ReservationListResponse response = interceptor.listReservations( ReservationListRequest.newInstance("root.decided", reservationId.toString())); Assert.assertNotNull(response); Assert.assertEquals(0, response.getReservationAllocationState().size()); }
public XATopicConnection xaTopicConnection(XATopicConnection connection) { return TracingXAConnection.create(connection, this); }
@Test void xaTopicConnection_doesntDoubleWrap() { XATopicConnection wrapped = jmsTracing.xaTopicConnection(mock(XATopicConnection.class)); assertThat(jmsTracing.xaTopicConnection(wrapped)) .isSameAs(wrapped); }
@Udf public boolean check(@UdfParameter(description = "The input JSON string") final String input) { if (input == null) { return false; } try { return !UdfJsonMapper.parseJson(input).isMissingNode(); } catch (KsqlFunctionException e) { return false; } }
@Test public void shouldInterpretArray() { assertTrue(udf.check("[1, 2, 3]")); }
public File[] getFiles(File configRepoCheckoutDirectory, PartialConfigLoadContext context) { String pattern = defaultPattern; Configuration configuration = context.configuration(); if (configuration != null) { ConfigurationProperty explicitPattern = configuration.getProperty("pattern"); if (explicitPattern != null) { pattern = explicitPattern.getValue(); } } return getFiles(configRepoCheckoutDirectory, pattern); }
@Test public void shouldGetFilesToLoadMatchingPattern() throws Exception { GoConfigMother mother = new GoConfigMother(); PipelineConfig pipe1 = mother.cruiseConfigWithOnePipelineGroup().getAllPipelineConfigs().get(0); File file1 = helper.addFileWithPipeline("pipe1.gocd.xml", pipe1); File file2 = helper.addFileWithPipeline("pipe1.gcd.xml", pipe1); File file3 = helper.addFileWithPipeline("subdir/pipe1.gocd.xml", pipe1); File file4 = helper.addFileWithPipeline("subdir/sub/pipe1.gocd.xml", pipe1); File[] matchingFiles = xmlPartialProvider.getFiles(tmpFolder, mock(PartialConfigLoadContext.class)); File[] expected = new File[] {file1, file3, file4}; assertArrayEquals(expected, matchingFiles, "Matched files are: " + List.of(matchingFiles)); }
@VisibleForTesting void startKsql(final KsqlConfig ksqlConfigWithPort) { cleanupOldState(); initialize(ksqlConfigWithPort); }
@Test public void shouldReplayCommandsBeforeSettingReady() { // When: app.startKsql(ksqlConfig); // Then: final InOrder inOrder = Mockito.inOrder(commandRunner, serverState); inOrder.verify(commandRunner).processPriorCommands(queryCleanupArgumentCaptor.capture()); inOrder.verify(serverState).setReady(); }
@Override public void copyVariablesFrom( VariableSpace space ) { variables.copyVariablesFrom( space ); }
@Test public void testCopyVariablesFrom() { assertNull( meta.getVariable( "var1" ) ); VariableSpace vars = mock( VariableSpace.class ); when( vars.getVariable( "var1" ) ).thenReturn( "x" ); when( vars.listVariables() ).thenReturn( new String[]{ "var1" } ); meta.copyVariablesFrom( vars ); assertEquals( "x", meta.getVariable( "var1", "y" ) ); }
public Controller build() { if (nowSupplier == null) { nowSupplier = Instant::now; } if (minDelay == null || minDelay.isNegative() || minDelay.isZero()) { minDelay = Duration.ofMillis(5); } if (maxDelay == null || maxDelay.isNegative() || maxDelay.isZero()) { maxDelay = Duration.ofSeconds(1000); } Assert.isTrue(minDelay.compareTo(maxDelay) <= 0, "Min delay must be less than or equal to max delay"); Assert.notNull(extension, "Extension must not be null"); Assert.notNull(reconciler, "Reconciler must not be null"); var queue = new DefaultQueue<Request>(nowSupplier, minDelay); var extensionMatchers = WatcherExtensionMatchers.builder(client, extension.groupVersionKind()) .onAddMatcher(onAddMatcher) .onUpdateMatcher(onUpdateMatcher) .onDeleteMatcher(onDeleteMatcher) .build(); var watcher = new ExtensionWatcher(queue, extensionMatchers); var synchronizer = new RequestSynchronizer(syncAllOnStart, client, extension, watcher, determineSyncAllListOptions()); return new DefaultController<>(name, reconciler, queue, synchronizer, minDelay, maxDelay, workerCount); }
@Test void buildWithNullReconciler() { assertThrows(IllegalArgumentException.class, () -> new ControllerBuilder(null, client).build(), "Reconciler must not be null"); }
public static SchemaBuilder builder() { return SchemaBuilder.int32() .name(LOGICAL_NAME) .version(1); }
@Test public void testBuilder() { Schema plain = Date.SCHEMA; assertEquals(Date.LOGICAL_NAME, plain.name()); assertEquals(1, (Object) plain.version()); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldNotFailIfAvroSchemaEvolvable() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); final Schema evolvableSchema = SchemaBuilder .record("KsqlDataSourceSchema").fields() .nullableInt("f1", 1) .endRecord(); givenTopicWithValueSchema("T", evolvableSchema); // When: KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "CREATE TABLE T WITH(VALUE_FORMAT='AVRO') AS SELECT * FROM TEST2;", ksqlConfig, Collections.emptyMap() ); // Then: assertThat(metaStore.getSource(SourceName.of("T")), is(notNullValue())); }
public static void combine(LongDecimalWithOverflowState state, LongDecimalWithOverflowState otherState) { long overflowToAdd = otherState.getOverflow(); Slice currentState = state.getLongDecimal(); Slice otherDecimal = otherState.getLongDecimal(); if (currentState == null) { state.setLongDecimal(otherDecimal); } else { overflowToAdd += UnscaledDecimal128Arithmetic.addWithOverflow(currentState, otherDecimal, currentState); } state.addOverflow(overflowToAdd); }
@Test public void testCombineOverflow() { addToState(state, TWO.pow(125)); addToState(state, TWO.pow(126)); LongDecimalWithOverflowState otherState = new LongDecimalWithOverflowStateFactory().createSingleState(); addToState(otherState, TWO.pow(125)); addToState(otherState, TWO.pow(126)); DecimalSumAggregation.combine(state, otherState); assertEquals(state.getOverflow(), 1); assertEquals(state.getLongDecimal(), unscaledDecimal(TWO.pow(126))); }
boolean hasEnoughResource(ContinuousResource request) { double allocated = allocations.stream() .filter(x -> x.resource() instanceof ContinuousResource) .map(x -> (ContinuousResource) x.resource()) .mapToDouble(ContinuousResource::value) .sum(); double left = original.value() - allocated; return request.value() <= left; }
@Test public void testHasEnoughResourceWhenSmallResourceIsRequested() { ContinuousResource original = Resources.continuous(DID, PN1, Bandwidth.class).resource(Bandwidth.gbps(1).bps()); ContinuousResource allocated = Resources.continuous(DID, PN1, Bandwidth.class).resource(Bandwidth.mbps(500).bps()); ResourceConsumer consumer = IntentId.valueOf(1); ContinuousResourceAllocation sut = new ContinuousResourceAllocation(original, ImmutableList.of(new ResourceAllocation(allocated, consumer))); ContinuousResource request = Resources.continuous(DID, PN1, Bandwidth.class).resource(Bandwidth.mbps(200).bps()); assertThat(sut.hasEnoughResource(request), is(true)); }
public ApplicationBuilder shutwait(String shutwait) { this.shutwait = shutwait; return getThis(); }
@Test void shutwait() { ApplicationBuilder builder = new ApplicationBuilder(); builder.shutwait("shutwait"); Assertions.assertEquals("shutwait", builder.build().getShutwait()); }
public Object getCell(final int columnIndex) { Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.size() + 1); return data.get(columnIndex - 1); }
@SuppressWarnings("UnnecessaryBoxing") @Test void assertGetCellWithLongValue() { LocalDataQueryResultRow actual = new LocalDataQueryResultRow(1L, Long.valueOf(2L)); assertThat(actual.getCell(1), is("1")); assertThat(actual.getCell(2), is("2")); }
@Override public String convertDestination(ProtocolConverter converter, Destination d) { if (d == null) { return null; } ActiveMQDestination activeMQDestination = (ActiveMQDestination)d; String physicalName = activeMQDestination.getPhysicalName(); String rc = converter.getCreatedTempDestinationName(activeMQDestination); if( rc!=null ) { return rc; } StringBuilder buffer = new StringBuilder(); if (activeMQDestination.isQueue()) { if (activeMQDestination.isTemporary()) { buffer.append("/remote-temp-queue/"); } else { buffer.append("/queue/"); } } else { if (activeMQDestination.isTemporary()) { buffer.append("/remote-temp-topic/"); } else { buffer.append("/topic/"); } } buffer.append(physicalName); return buffer.toString(); }
@Test(timeout = 10000) public void testConvertTopic() throws Exception { ActiveMQDestination destination = translator.convertDestination(converter, "/topic/test", false); assertFalse(destination.isComposite()); assertEquals("test", destination.getPhysicalName()); assertEquals(ActiveMQDestination.TOPIC_TYPE, destination.getDestinationType()); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); // Fetch some records and establish an incremental fetch session. LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions1 = new LinkedHashMap<>(); partitions1.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setHighWatermark(2) .setLastStableOffset(2) .setLogStartOffset(0) .setRecords(records)); partitions1.put(tidp1, new FetchResponseData.PartitionData() .setPartitionIndex(tp1.partition()) .setHighWatermark(100) .setLogStartOffset(0) .setRecords(emptyRecords)); FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1); client.prepareResponse(resp1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); networkClientDelegate.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); assertFalse(fetchedRecords.containsKey(tp1)); List<ConsumerRecord<byte[], byte[]>> recordsToTest = fetchedRecords.get(tp0); assertEquals(2, recordsToTest.size()); assertEquals(3L, subscriptions.position(tp0).offset); assertEquals(1L, subscriptions.position(tp1).offset); assertEquals(1, recordsToTest.get(0).offset()); assertEquals(2, recordsToTest.get(1).offset()); // There is still a buffered record. assertEquals(0, sendFetches()); fetchedRecords = fetchRecords(); assertFalse(fetchedRecords.containsKey(tp1)); recordsToTest = fetchedRecords.get(tp0); assertEquals(1, recordsToTest.size()); assertEquals(3, recordsToTest.get(0).offset()); assertEquals(4L, subscriptions.position(tp0).offset); // The second response contains no new records. LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions2 = new LinkedHashMap<>(); FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2); client.prepareResponse(resp2); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.isEmpty()); assertEquals(4L, subscriptions.position(tp0).offset); assertEquals(1L, subscriptions.position(tp1).offset); // The third response contains some new records for tp0. LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions3 = new LinkedHashMap<>(); partitions3.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setHighWatermark(100) .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3); client.prepareResponse(resp3); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); fetchedRecords = fetchRecords(); assertFalse(fetchedRecords.containsKey(tp1)); recordsToTest = fetchedRecords.get(tp0); assertEquals(2, recordsToTest.size()); assertEquals(6L, subscriptions.position(tp0).offset); assertEquals(1L, subscriptions.position(tp1).offset); assertEquals(4, recordsToTest.get(0).offset()); assertEquals(5, recordsToTest.get(1).offset()); }
public int getAndAdvanceCurrentIndex() { int idx = this.getCurrentIndex(); this.advanceIndex(); return idx; }
@Test public void testCustomPattern() { // 1x0 1x1 Configuration conf = new Configuration(); conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY, "1", "1"); mux = new WeightedRoundRobinMultiplexer(2, "test.custom", conf); assertThat(mux.getAndAdvanceCurrentIndex()).isZero(); assertThat(mux.getAndAdvanceCurrentIndex()).isOne(); assertThat(mux.getAndAdvanceCurrentIndex()).isZero(); assertThat(mux.getAndAdvanceCurrentIndex()).isOne(); // 1x0 3x1 2x2 conf.setStrings("test.custom." + IPC_CALLQUEUE_WRRMUX_WEIGHTS_KEY, "1", "3", "2"); mux = new WeightedRoundRobinMultiplexer(3, "test.custom", conf); for(int i = 0; i < 5; i++) { assertThat(mux.getAndAdvanceCurrentIndex()).isZero(); assertThat(mux.getAndAdvanceCurrentIndex()).isOne(); assertThat(mux.getAndAdvanceCurrentIndex()).isOne(); assertThat(mux.getAndAdvanceCurrentIndex()).isOne(); assertThat(mux.getAndAdvanceCurrentIndex()).isEqualTo(2); assertThat(mux.getAndAdvanceCurrentIndex()).isEqualTo(2); } // Ensure pattern repeats }
public static void addTransferRateMetric(final DataNodeMetrics metrics, final long read, final long durationInNS) { metrics.addReadTransferRate(getTransferRateInBytesPerSecond(read, durationInNS)); }
@Test public void testAddTransferRateMetricNegativeTransferBytes() { DataNodeMetrics mockMetrics = mock(DataNodeMetrics.class); DFSUtil.addTransferRateMetric(mockMetrics, -1L, 0); verify(mockMetrics).addReadTransferRate(0L); }
public static AnnotatedType unwrapReference(AnnotatedType type) { if (type == null) { return type; } else if (type.getType() == null) { return null; } try { final JavaType jtype; if (type.getType() instanceof JavaType) { jtype = (JavaType) type.getType(); } else { jtype = Json.mapper().constructType(type.getType()); } if (_isReferenceType(jtype)) { AnnotatedType aType = new AnnotatedType() .type(jtype.containedType(0)) .name(type.getName()) .parent(type.getParent()) .jsonUnwrappedHandler(type.getJsonUnwrappedHandler()) .skipOverride(type.isSkipOverride()) .schemaProperty(type.isSchemaProperty()) .ctxAnnotations(type.getCtxAnnotations()) .resolveAsRef(type.isResolveAsRef()) .jsonViewAnnotation(type.getJsonViewAnnotation()) .skipSchemaName(type.isSkipSchemaName()) .skipJsonIdentity(type.isSkipJsonIdentity()) .components(type.getComponents()) .propertyName(type.getPropertyName()); return aType; } else { return null; } } catch (Exception e) { LOGGER.error("Error unwrapping optional", e); return null; } }
@Test(description = "AtomicReference should be unwrapped when read from Java bean") public void testUnwrapWithAtomicReferenceMemberFromJavaBean() throws Exception { final JavaType expectedReferredType = TypeFactory.defaultInstance().constructType(BigDecimal.class); final Type genericType = TypeWithAtomicReferenceMember.class.getDeclaredField("member").getGenericType(); final AnnotatedType actualUnwrappedType = ReferenceTypeUtils.unwrapReference(new AnnotatedType(genericType)); Assert.assertEquals(actualUnwrappedType.getType(), expectedReferredType, genericType.getTypeName() + "Reference type not correctly unwrapped"); }
@Override public <T> T convert(DataTable dataTable, Type type) { return convert(dataTable, type, false); }
@Test void convert_to_primitive__empty_table_to_null() { DataTable table = emptyDataTable(); assertNull(converter.convert(table, Integer.class)); }
@Override public CeWorker create(int ordinal) { String uuid = uuidFactory.create(); CeWorkerImpl ceWorker = new CeWorkerImpl(ordinal, uuid, queue, taskProcessorRepository, ceWorkerController, executionListeners); ceWorkers = Stream.concat(ceWorkers.stream(), Stream.of(ceWorker)).collect(Collectors.toSet()); return ceWorker; }
@Test public void create_return_CeWorker_object_with_specified_ordinal() { CeWorker ceWorker = underTest.create(randomOrdinal); assertThat(ceWorker.getOrdinal()).isEqualTo(randomOrdinal); }
@Override public ParsedLine parse(final String line, final int cursor, final ParseContext context) { final String trimmed = line.trim(); final int adjCursor = adjustCursor(line, trimmed, cursor); return delegate.parse(trimmed, adjCursor, context); }
@Test public void shouldPassThroughAnyLineThatDoesNotNeedTrimming() { expect(delegate.parse("line without spaces at ends", 4, ACCEPT_LINE)) .andReturn(parsedLine); replay(delegate); final ParsedLine line = parser.parse("line without spaces at ends", 4, ACCEPT_LINE); assertThat(line, is(parsedLine)); }
public boolean updateNode(String key, String group, String data) { return zkClient.publishConfig(key, group, data); }
@Test public void testUpdateNode() { boolean result = zooKeeperBufferedClient.updateNode(PARENT_PATH, null, NODE_CONTENT); Assert.assertTrue(result); }
static int assignActiveTaskMovements(final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients, final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag, final Map<ProcessId, ClientState> clientStates, final Map<ProcessId, Set<TaskId>> warmups, final AtomicInteger remainingWarmupReplicas) { final BiFunction<ProcessId, TaskId, Boolean> caughtUpPredicate = (client, task) -> taskIsCaughtUpOnClient(task, client, tasksToCaughtUpClients); final ConstrainedPrioritySet caughtUpClientsByTaskLoad = new ConstrainedPrioritySet( caughtUpPredicate, client -> clientStates.get(client).assignedTaskLoad() ); final Queue<TaskMovement> taskMovements = new PriorityQueue<>( Comparator.comparing(TaskMovement::numCaughtUpClients).thenComparing(TaskMovement::task) ); for (final Map.Entry<ProcessId, ClientState> clientStateEntry : clientStates.entrySet()) { final ProcessId client = clientStateEntry.getKey(); final ClientState state = clientStateEntry.getValue(); for (final TaskId task : state.activeTasks()) { // if the desired client is not caught up, and there is another client that _is_ more caught up, then // we schedule a movement, so we can move the active task to a more caught-up client. We'll try to // assign a warm-up to the desired client so that we can move it later on. if (taskIsNotCaughtUpOnClientAndOtherMoreCaughtUpClientsExist(task, client, clientStates, tasksToCaughtUpClients, tasksToClientByLag)) { taskMovements.add(new TaskMovement(task, client, tasksToCaughtUpClients.get(task))); } } caughtUpClientsByTaskLoad.offer(client); } final int movementsNeeded = taskMovements.size(); while (!taskMovements.isEmpty()) { final TaskMovement movement = taskMovements.poll(); // Attempt to find a caught up standby, otherwise find any caught up client, failing that use the most // caught up client. final boolean moved = tryToSwapStandbyAndActiveOnCaughtUpClient(clientStates, caughtUpClientsByTaskLoad, movement) || tryToMoveActiveToCaughtUpClientAndTryToWarmUp(clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement) || tryToMoveActiveToMostCaughtUpClient(tasksToClientByLag, clientStates, warmups, remainingWarmupReplicas, caughtUpClientsByTaskLoad, movement); if (!moved) { throw new IllegalStateException("Tried to move task to more caught-up client as scheduled before but none exist"); } } return movementsNeeded; }
@Test public void shouldAssignAllTasksToClientsAndReturnFalseIfNoClientsAreCaughtUp() { final int maxWarmupReplicas = Integer.MAX_VALUE; final Set<TaskId> allTasks = mkSet(TASK_0_0, TASK_0_1, TASK_0_2, TASK_1_0, TASK_1_1, TASK_1_2); final ClientState client1 = getClientStateWithActiveAssignment(mkSet(TASK_0_0, TASK_1_0), mkSet(), allTasks); final ClientState client2 = getClientStateWithActiveAssignment(mkSet(TASK_0_1, TASK_1_1), mkSet(), allTasks); final ClientState client3 = getClientStateWithActiveAssignment(mkSet(TASK_0_2, TASK_1_2), mkSet(), allTasks); final Map<TaskId, SortedSet<ProcessId>> tasksToCaughtUpClients = mkMap( mkEntry(TASK_0_0, emptySortedSet()), mkEntry(TASK_0_1, emptySortedSet()), mkEntry(TASK_0_2, emptySortedSet()), mkEntry(TASK_1_0, emptySortedSet()), mkEntry(TASK_1_1, emptySortedSet()), mkEntry(TASK_1_2, emptySortedSet()) ); final Map<TaskId, SortedSet<ProcessId>> tasksToClientByLag = mkMap( mkEntry(TASK_0_0, mkOrderedSet(PID_1, PID_2, PID_3)), mkEntry(TASK_0_1, mkOrderedSet(PID_1, PID_2, PID_3)), mkEntry(TASK_0_2, mkOrderedSet(PID_1, PID_2, PID_3)), mkEntry(TASK_1_0, mkOrderedSet(PID_1, PID_2, PID_3)), mkEntry(TASK_1_1, mkOrderedSet(PID_1, PID_2, PID_3)), mkEntry(TASK_1_2, mkOrderedSet(PID_1, PID_2, PID_3)) ); assertThat( assignActiveTaskMovements( tasksToCaughtUpClients, tasksToClientByLag, getClientStatesMap(client1, client2, client3), new TreeMap<>(), new AtomicInteger(maxWarmupReplicas) ), is(0) ); }
List<DataflowPackage> stageClasspathElements( Collection<StagedFile> classpathElements, String stagingPath, CreateOptions createOptions) { return stageClasspathElements(classpathElements, stagingPath, DEFAULT_SLEEPER, createOptions); }
@Test public void testPackageUploadWithEmptyDirectorySucceeds() throws Exception { Pipe pipe = Pipe.open(); File tmpDirectory = tmpFolder.newFolder("folder"); when(mockGcsUtil.getObjects(anyListOf(GcsPath.class))) .thenReturn( ImmutableList.of( StorageObjectOrIOException.create(new FileNotFoundException("some/path")))); when(mockGcsUtil.create(any(GcsPath.class), any(GcsUtil.CreateOptions.class))) .thenReturn(pipe.sink()); List<DataflowPackage> targets = defaultPackageUtil.stageClasspathElements( ImmutableList.of(makeStagedFile(tmpDirectory.getAbsolutePath())), STAGING_PATH, createOptions); DataflowPackage target = Iterables.getOnlyElement(targets); verify(mockGcsUtil).getObjects(anyListOf(GcsPath.class)); verify(mockGcsUtil).create(any(GcsPath.class), any(GcsUtil.CreateOptions.class)); verifyNoMoreInteractions(mockGcsUtil); assertThat(target.getName(), endsWith(".jar")); assertThat(target.getLocation(), equalTo(STAGING_PATH + target.getName())); try (ZipInputStream zipInputStream = new ZipInputStream(Channels.newInputStream(pipe.source()))) { assertNull(zipInputStream.getNextEntry()); } }
@Override public @Nullable T decode(InputStream inStream) throws IOException, CoderException { return decode(inStream, Context.NESTED); }
@Test public void testDecodingError() throws Exception { thrown.expect(CoderException.class); thrown.expectMessage( equalTo("NullableCoder expects either a byte valued 0 (null) " + "or 1 (present), got 5")); InputStream input = new ByteArrayInputStream(new byte[] {5}); TEST_CODER.decode(input, Coder.Context.OUTER); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final SDSApiClient client = session.getClient(); final DownloadTokenGenerateResponse token = new NodesApi(session.getClient()).generateDownloadUrl(Long.valueOf(nodeid.getVersionId(file)), StringUtils.EMPTY); final HttpUriRequest request = new HttpGet(token.getDownloadUrl()); request.addHeader("X-Sds-Auth-Token", StringUtils.EMPTY); if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); final String header; if(TransferStatus.UNKNOWN_LENGTH == range.getEnd()) { header = String.format("bytes=%d-", range.getStart()); } else { header = String.format("bytes=%d-%d", range.getStart(), range.getEnd()); } if(log.isDebugEnabled()) { log.debug(String.format("Add range header %s for file %s", header, file)); } request.addHeader(new BasicHeader(HttpHeaders.RANGE, header)); // Disable compression request.addHeader(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, "identity")); } final HttpResponse response = client.getClient().execute(request); switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_OK: case HttpStatus.SC_PARTIAL_CONTENT: return new HttpMethodReleaseInputStream(response, status); case HttpStatus.SC_NOT_FOUND: nodeid.cache(file, null); // Break through default: throw new DefaultHttpResponseExceptionMappingService().map("Download {0} failed", new HttpResponseException( response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { final TransferStatus status = new TransferStatus(); final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); try { new SDSReadFeature(session, nodeid).read(new Path(room, "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback()); } finally { new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); } }
public void removeRuleData(final RuleData ruleData) { Optional.ofNullable(ruleData).ifPresent(data -> { final List<RuleData> ruleDataList = RULE_MAP.get(data.getSelectorId()); synchronized (RULE_MAP) { Optional.ofNullable(ruleDataList).ifPresent(list -> list.removeIf(rule -> rule.getId().equals(data.getId()))); } }); }
@Test public void testRemoveRuleData() throws NoSuchFieldException, IllegalAccessException { RuleData ruleData = RuleData.builder().id("1").selectorId(mockSelectorId1).build(); ConcurrentHashMap<String, List<RuleData>> ruleMap = getFieldByName(ruleMapStr); ruleMap.put(mockSelectorId1, Lists.newArrayList(ruleData)); BaseDataCache.getInstance().removeRuleData(ruleData); assertEquals(Lists.newArrayList(), ruleMap.get(mockSelectorId1)); }
public static byte[] min(final byte[] a, final byte[] b) { return getDefaultByteArrayComparator().compare(a, b) < 0 ? a : b; }
@Test public void testMin() { byte[] array = new byte[] { 1, 2 }; Assert.assertArrayEquals(array, BytesUtil.min(array, array)); Assert.assertArrayEquals(array, BytesUtil.min(array, new byte[] { 3, 4 })); }
@Override public void onEvent(ServerListChangedEvent event) { // do nothing in http client }
@Test void testOnEvent() { clientProxy.onEvent(new ServerListChangedEvent()); // Do nothing }
@Override public Iterable<DefaultSchedulingPipelinedRegion> getAllPipelinedRegions() { checkNotNull(pipelinedRegions); return Collections.unmodifiableCollection(pipelinedRegions); }
@Test void testGetAllPipelinedRegions() { final Iterable<DefaultSchedulingPipelinedRegion> allPipelinedRegions = adapter.getAllPipelinedRegions(); assertThat(allPipelinedRegions).hasSize(1); }
static void dissectFrame( final DriverEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); builder.append(": address="); encodedLength += dissectSocketAddress(buffer, offset + encodedLength, builder); builder.append(" "); final int frameOffset = offset + encodedLength; final int frameType = frameType(buffer, frameOffset); switch (frameType) { case HeaderFlyweight.HDR_TYPE_PAD: case HeaderFlyweight.HDR_TYPE_DATA: DATA_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectDataFrame(builder); break; case HeaderFlyweight.HDR_TYPE_SM: SM_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectStatusFrame(builder); break; case HeaderFlyweight.HDR_TYPE_NAK: NAK_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectNakFrame(builder); break; case HeaderFlyweight.HDR_TYPE_SETUP: SETUP_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectSetupFrame(builder); break; case HeaderFlyweight.HDR_TYPE_RTTM: RTT_MEASUREMENT.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectRttFrame(builder); break; case HeaderFlyweight.HDR_TYPE_RES: dissectResFrame(buffer, frameOffset, builder); break; case HeaderFlyweight.HDR_TYPE_RSP_SETUP: RSP_SETUP.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectRspSetupFrame(builder); break; default: builder.append("type=UNKNOWN(").append(frameType).append(")"); break; } }
@Test void dissectFrameTypeSetup() { internalEncodeLogHeader(buffer, 0, 3, 3, () -> 3_000_000_000L); final int socketAddressOffset = encodeSocketAddress( buffer, LOG_HEADER_LENGTH, new InetSocketAddress("localhost", 8888)); final SetupFlyweight flyweight = new SetupFlyweight(); flyweight.wrap(buffer, LOG_HEADER_LENGTH + socketAddressOffset, 300); flyweight.headerType(HDR_TYPE_SETUP); flyweight.flags((short)200); flyweight.frameLength(1); flyweight.sessionId(15); flyweight.streamId(18); flyweight.activeTermId(81); flyweight.initialTermId(69); flyweight.termOffset(10); flyweight.termLength(444); flyweight.mtuLength(8096); flyweight.ttl(20_000); dissectFrame(FRAME_IN, buffer, 0, builder); assertEquals("[3.000000000] " + CONTEXT + ": " + FRAME_IN.name() + " [3/3]: address=127.0.0.1:8888 type=SETUP flags=11001000 frameLength=1 sessionId=15 streamId=18 " + "activeTermId=81 initialTermId=69 termOffset=10 termLength=444 mtu=8096 ttl=20000", builder.toString()); }
public ChannelFuture writeOneOutbound(Object msg) { return writeOneOutbound(msg, newPromise()); }
@Test public void testWriteOneOutbound() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger flushCount = new AtomicInteger(0); EmbeddedChannel channel = new EmbeddedChannel(new ChannelOutboundHandlerAdapter() { @Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { ctx.write(msg, promise); latch.countDown(); } @Override public void flush(ChannelHandlerContext ctx) throws Exception { flushCount.incrementAndGet(); } }); // This shouldn't trigger a #flush() channel.writeOneOutbound("Hello, Netty!"); if (!latch.await(1L, TimeUnit.SECONDS)) { fail("Nobody called #write() in time."); } channel.close().syncUninterruptibly(); // There was no #flushOutbound() call so nobody should have called #flush() assertEquals(0, flushCount.get()); }
private static boolean canSatisfyConstraints(ApplicationId appId, PlacementConstraint constraint, SchedulerNode node, AllocationTagsManager atm, Optional<DiagnosticsCollector> dcOpt) throws InvalidAllocationTagsQueryException { if (constraint == null) { LOG.debug("Constraint is found empty during constraint validation for" + " app:{}", appId); return true; } // If this is a single constraint, transform to SingleConstraint SingleConstraintTransformer singleTransformer = new SingleConstraintTransformer(constraint); constraint = singleTransformer.transform(); AbstractConstraint sConstraintExpr = constraint.getConstraintExpr(); // TODO handle other type of constraints, e.g CompositeConstraint if (sConstraintExpr instanceof SingleConstraint) { SingleConstraint single = (SingleConstraint) sConstraintExpr; return canSatisfySingleConstraint(appId, single, node, atm, dcOpt); } else if (sConstraintExpr instanceof And) { And and = (And) sConstraintExpr; return canSatisfyAndConstraint(appId, and, node, atm, dcOpt); } else if (sConstraintExpr instanceof Or) { Or or = (Or) sConstraintExpr; return canSatisfyOrConstraint(appId, or, node, atm, dcOpt); } else { throw new InvalidAllocationTagsQueryException( "Unsupported type of constraint: " + sConstraintExpr.getClass().getSimpleName()); } }
@Test public void testORConstraintAssignment() throws InvalidAllocationTagsQueryException { AllocationTagsManager tm = new AllocationTagsManager(rmContext); PlacementConstraintManagerService pcm = new MemoryPlacementConstraintManager(); // Register App1 with anti-affinity constraint map. pcm.registerApplication(appId1, constraintMap4); RMNode n0r1 = rmNodes.get(0); RMNode n1r1 = rmNodes.get(1); RMNode n2r2 = rmNodes.get(2); RMNode n3r2 = rmNodes.get(3); /** * Place container: * n0: hbase-m(1) * n1: "" * n2: hbase-rs(1) * n3: "" */ tm.addContainer(n0r1.getNodeID(), newContainerId(appId1, 1), ImmutableSet.of("hbase-m")); tm.addContainer(n2r2.getNodeID(), newContainerId(appId1, 2), ImmutableSet.of("hbase-rs")); Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n0r1.getNodeID()) .get("hbase-m").longValue()); Assert.assertEquals(1L, tm.getAllocationTagsWithCount(n2r2.getNodeID()) .get("hbase-rs").longValue()); SchedulerNode schedulerNode0 =newSchedulerNode(n0r1.getHostName(), n0r1.getRackName(), n0r1.getNodeID()); SchedulerNode schedulerNode1 =newSchedulerNode(n1r1.getHostName(), n1r1.getRackName(), n1r1.getNodeID()); SchedulerNode schedulerNode2 =newSchedulerNode(n2r2.getHostName(), n2r2.getRackName(), n2r2.getNodeID()); SchedulerNode schedulerNode3 =newSchedulerNode(n3r2.getHostName(), n3r2.getRackName(), n3r2.getNodeID()); // n0 and n2 should be qualified for allocation as // they either have hbase-m or hbase-rs tag Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode0, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode1, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode2, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm)); /** * Place container: * n0: hbase-m(1) * n1: "" * n2: hbase-rs(1) * n3: hbase-rs(1) */ tm.addContainer(n3r2.getNodeID(), newContainerId(appId1, 2), ImmutableSet.of("hbase-rs")); // n3 is qualified now because it is allocated with hbase-rs tag Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag1), schedulerNode3, pcm, tm)); /** * Place container: * n0: hbase-m(1) * n1: "" * n2: hbase-rs(1), spark(1) * n3: hbase-rs(1) */ // Place tm.addContainer(n2r2.getNodeID(), newContainerId(appId1, 3), ImmutableSet.of("spark")); // According to constraint, "zk" is allowed to be placed on a node // has "hbase-m" tag OR a node has both "hbase-rs" and "spark" tags. Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag2), schedulerNode0, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag2), schedulerNode1, pcm, tm)); Assert.assertTrue(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag2), schedulerNode2, pcm, tm)); Assert.assertFalse(PlacementConstraintsUtil.canSatisfyConstraints(appId1, createSchedulingRequest(sourceTag2), schedulerNode3, pcm, tm)); }
public static CommandExecutor newInstance(final CommandPacketType commandPacketType, final PostgreSQLCommandPacket commandPacket, final ConnectionSession connectionSession, final PortalContext portalContext) throws SQLException { if (commandPacket instanceof SQLReceivedPacket) { log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL()); } else { log.debug("Execute packet type: {}", commandPacketType); } if (!(commandPacket instanceof PostgreSQLAggregatedCommandPacket)) { return getCommandExecutor(commandPacketType, commandPacket, connectionSession, portalContext); } PostgreSQLAggregatedCommandPacket aggregatedCommandPacket = (PostgreSQLAggregatedCommandPacket) commandPacket; if (aggregatedCommandPacket.isContainsBatchedStatements() && aggregatedCommandPacket.getPackets().stream().noneMatch(OpenGaussComBatchBindPacket.class::isInstance)) { return new PostgreSQLAggregatedCommandExecutor(getExecutorsOfAggregatedBatchedStatements(aggregatedCommandPacket, connectionSession, portalContext)); } List<CommandExecutor> result = new ArrayList<>(aggregatedCommandPacket.getPackets().size()); for (PostgreSQLCommandPacket each : aggregatedCommandPacket.getPackets()) { result.add(getCommandExecutor((CommandPacketType) each.getIdentifier(), each, connectionSession, portalContext)); } return new PostgreSQLAggregatedCommandExecutor(result); }
@Test void assertAggregatedPacketIsBatchedStatements() throws SQLException { PostgreSQLComParsePacket parsePacket = mock(PostgreSQLComParsePacket.class); when(parsePacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.PARSE_COMMAND); PostgreSQLComBindPacket bindPacket = mock(PostgreSQLComBindPacket.class); PostgreSQLComDescribePacket describePacket = mock(PostgreSQLComDescribePacket.class); PostgreSQLComExecutePacket executePacket = mock(PostgreSQLComExecutePacket.class); PostgreSQLComClosePacket closePacket = mock(PostgreSQLComClosePacket.class); when(closePacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.CLOSE_COMMAND); PostgreSQLComSyncPacket syncPacket = mock(PostgreSQLComSyncPacket.class); when(syncPacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.SYNC_COMMAND); PostgreSQLComTerminationPacket terminationPacket = mock(PostgreSQLComTerminationPacket.class); when(terminationPacket.getIdentifier()).thenReturn(PostgreSQLCommandPacketType.TERMINATE); PostgreSQLAggregatedCommandPacket packet = mock(PostgreSQLAggregatedCommandPacket.class); when(packet.isContainsBatchedStatements()).thenReturn(true); when(packet.getPackets()).thenReturn( Arrays.asList(parsePacket, bindPacket, describePacket, executePacket, bindPacket, describePacket, executePacket, closePacket, syncPacket, terminationPacket)); when(packet.getBatchPacketBeginIndex()).thenReturn(1); when(packet.getBatchPacketEndIndex()).thenReturn(6); CommandExecutor actual = OpenGaussCommandExecutorFactory.newInstance(null, packet, connectionSession, portalContext); assertThat(actual, instanceOf(PostgreSQLAggregatedCommandExecutor.class)); Iterator<CommandExecutor> actualPacketsIterator = getExecutorsFromAggregatedCommandExecutor((PostgreSQLAggregatedCommandExecutor) actual).iterator(); assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComParseExecutor.class)); assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLAggregatedBatchedStatementsCommandExecutor.class)); assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComCloseExecutor.class)); assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComSyncExecutor.class)); assertThat(actualPacketsIterator.next(), instanceOf(PostgreSQLComTerminationExecutor.class)); assertFalse(actualPacketsIterator.hasNext()); }
public static void removeDupes( final List<CharSequence> suggestions, List<CharSequence> stringsPool) { if (suggestions.size() < 2) return; int i = 1; // Don't cache suggestions.size(), since we may be removing items while (i < suggestions.size()) { final CharSequence cur = suggestions.get(i); // Compare each suggestion with each previous suggestion for (int j = 0; j < i; j++) { CharSequence previous = suggestions.get(j); if (TextUtils.equals(cur, previous)) { removeSuggestion(suggestions, i, stringsPool); i--; break; } } i++; } }
@Test public void testRemoveDupesOnlyDupesMultipleTypes() throws Exception { ArrayList<CharSequence> list = new ArrayList<>( Arrays.<CharSequence>asList( "typed", "something", "something", "typed", "banana", "banana", "something", "typed", "car", "typed")); IMEUtil.removeDupes(list, mStringPool); Assert.assertEquals(4, list.size()); Assert.assertEquals("typed", list.get(0)); Assert.assertEquals("something", list.get(1)); Assert.assertEquals("banana", list.get(2)); Assert.assertEquals("car", list.get(3)); }
public static <K, V> HashMap<K, V> ofHashMap(Object... kv) { if (kv == null || kv.length == 0) { throw new IllegalArgumentException("entries got no objects, which aren't pairs"); } if ((kv.length & 1) != 0) { throw new IllegalArgumentException( String.format("entries got %d objects, which aren't pairs", kv.length)); } int size = kv.length >> 1; HashMap map = new HashMap<>(size); for (int i = 0; i < kv.length; i += 2) { map.put(kv[i], kv[i + 1]); } return map; }
@Test public void testOfHashMap() { assertEquals(Collections.ofHashMap(1, 2, 3, 4), new HashMap<>(ImmutableMap.of(1, 2, 3, 4))); }
@Override protected void doStart() throws Exception { super.doStart(); LOG.debug("Creating connection to Azure ServiceBus"); client = getEndpoint().getServiceBusClientFactory().createServiceBusProcessorClient(getConfiguration(), this::processMessage, this::processError); client.start(); }
@Test void consumerSubmitsDeadLetterExchangeToProcessor() throws Exception { try (ServiceBusConsumer consumer = new ServiceBusConsumer(endpoint, processor)) { consumer.doStart(); verify(client).start(); verify(clientFactory).createServiceBusProcessorClient(any(), any(), any()); when(messageContext.getMessage()).thenReturn(message); configureMockDeadLetterMessage(); processMessageCaptor.getValue().accept(messageContext); verify(processor).process(any(Exchange.class), any(AsyncCallback.class)); Exchange exchange = exchangeCaptor.getValue(); assertThat(exchange).isNotNull(); Message inMessage = exchange.getIn(); assertThat(inMessage).isNotNull(); assertThat(inMessage.getBody()).isInstanceOf(BinaryData.class); assertThat(inMessage.getBody(BinaryData.class).toString()).isEqualTo(MESSAGE_BODY); assertThat(inMessage.getHeaders()).isEqualTo(createExpectedDeadLetterMessageHeaders()); } }
@Override public String pluginNamed() { return PluginEnum.LOGGING_ROCKETMQ.getName(); }
@Test public void testPluginNamed() { Assertions.assertEquals(loggingRocketMQPluginDataHandler.pluginNamed(), "loggingRocketMQ"); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { final Ds3Client client = new SpectraClientBuilder().wrap(session.getClient(), session.getHost()); if(containerService.isContainer(file)) { final HeadBucketResponse response = client.headBucket(new HeadBucketRequest(containerService.getContainer(file).getName())); switch(response.getStatus()) { case DOESNTEXIST: return false; } return true; } else { final HeadObjectResponse response = client.headObject(new HeadObjectRequest(containerService.getContainer(file).getName(), containerService.getKey(file))); switch(response.getStatus()) { case DOESNTEXIST: return false; } return true; } } catch(FailedRequestException e) { throw new SpectraExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testFindUnknownBucket() throws Exception { final Path test = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.volume, Path.Type.directory)); assertFalse(new SpectraFindFeature(session).find(test)); }
ClassicGroup getOrMaybeCreateClassicGroup( String groupId, boolean createIfNotExists ) throws GroupIdNotFoundException { Group group = groups.get(groupId); if (group == null && !createIfNotExists) { throw new GroupIdNotFoundException(String.format("Classic group %s not found.", groupId)); } if (group == null) { ClassicGroup classicGroup = new ClassicGroup(logContext, groupId, ClassicGroupState.EMPTY, time, metrics); groups.put(groupId, classicGroup); metrics.onClassicGroupStateTransition(null, classicGroup.currentState()); return classicGroup; } else { if (group.type() == CLASSIC) { return (ClassicGroup) group; } else { // We don't support upgrading/downgrading between protocols at the moment so // we throw an exception if a group exists with the wrong type. throw new GroupIdNotFoundException(String.format("Group %s is not a classic group.", groupId)); } } }
@Test public void testClassicGroupMemberSessionTimeoutDuringRebalance() throws Exception { GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .build(); JoinGroupResponseData leaderJoinResponse = context.joinClassicGroupAsDynamicMemberAndCompleteRebalance("group-id"); ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup("group-id", false); // Add a new member. This should trigger a rebalance. The new member has the // 'classicGroupNewMemberJoinTimeoutMs` session timeout, so it has a longer expiration than the existing member. GroupMetadataManagerTestContext.JoinResult otherJoinResult = context.sendClassicGroupJoin( new GroupMetadataManagerTestContext.JoinGroupRequestBuilder() .withGroupId("group-id") .withMemberId(UNKNOWN_MEMBER_ID) .withDefaultProtocolTypeAndProtocols() .withRebalanceTimeoutMs(10000) .withSessionTimeoutMs(5000) .build() ); assertTrue(otherJoinResult.records.isEmpty()); assertFalse(otherJoinResult.joinFuture.isDone()); assertTrue(group.isInState(PREPARING_REBALANCE)); // Advance clock by 1/2 of session timeout. GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(2500)); HeartbeatRequestData heartbeatRequest = new HeartbeatRequestData() .setGroupId("group-id") .setMemberId(leaderJoinResponse.memberId()) .setGenerationId(leaderJoinResponse.generationId()); HeartbeatResponseData heartbeatResponse = context.sendClassicGroupHeartbeat(heartbeatRequest).response(); assertEquals(Errors.REBALANCE_IN_PROGRESS.code(), heartbeatResponse.errorCode()); // Advance clock by first member's session timeout. GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(5000)); assertThrows(UnknownMemberIdException.class, () -> context.sendClassicGroupHeartbeat(heartbeatRequest)); // Advance clock by remaining rebalance timeout to complete join phase. GroupMetadataManagerTestContext.assertNoOrEmptyResult(context.sleep(2500)); assertTrue(otherJoinResult.joinFuture.isDone()); assertEquals(Errors.NONE.code(), otherJoinResult.joinFuture.get().errorCode()); assertEquals(1, group.numMembers()); assertEquals(2, group.generationId()); assertTrue(group.isInState(COMPLETING_REBALANCE)); }
@Override public void onClick(View v) { switch (v.getId()) { case R.id.go_to_start_setup: mSharedPrefs.edit().putBoolean(STARTED_PREF_KEY, true).apply(); refreshWizardPager(); break; case R.id.setup_wizard_welcome_privacy_action: String privacyUrl = getString(R.string.privacy_policy); startActivity(new Intent(Intent.ACTION_VIEW, Uri.parse(privacyUrl))); break; case R.id.skip_setup_wizard: startActivity(new Intent(getContext(), MainSettingsActivity.class)); // not returning to this Activity any longer. requireActivity().finish(); break; default: throw new IllegalArgumentException( "Failed to handle " + v.getId() + " in WizardPageDoneAndMoreSettingsFragment"); } }
@Test public void testClickedSkipped() { var fragment = startFragment(); final View link = fragment.getView().findViewById(R.id.skip_setup_wizard); var linkClickHandler = Shadows.shadowOf(link).getOnClickListener(); Assert.assertNotNull(linkClickHandler); linkClickHandler.onClick(link); final Intent nextStartedActivity = Shadows.shadowOf((Application) ApplicationProvider.getApplicationContext()) .getNextStartedActivity(); Assert.assertEquals( MainSettingsActivity.class.getName(), nextStartedActivity.getComponent().getClassName()); }
public boolean contains(BigInteger value) { return value.compareTo(minInclusive) >= 0 && value.compareTo(maxExclusive) < 0; }
@Test public void contains() { HashRange range = HashRange.range(1000, 2000); assertFalse(range.contains(new BigInteger("999"))); assertTrue(range.contains(new BigInteger("1000"))); assertTrue(range.contains(new BigInteger("1999"))); assertFalse(range.contains(new BigInteger("2000"))); assertFalse(range.contains(new BigInteger("2001"))); }
public FEELFnResult<Boolean> invoke(@ParameterName("negand") Object negand) { if ( negand != null && !(negand instanceof Boolean) ) { return FEELFnResult.ofError( new InvalidParametersEvent( Severity.ERROR, "negand", "must be a boolean value" ) ); } return FEELFnResult.ofResult( negand == null ? null : !((Boolean) negand) ); }
@Test void invokeNull() { FunctionTestUtil.assertResultNull(notFunction.invoke(null)); }
public static IntArrayList iota(int size) { return range(0, size); }
@Test public void testIota() { IntArrayList list = ArrayUtil.iota(15); assertEquals(15, list.buffer.length); assertEquals(15, list.elementsCount); assertEquals(14 / 2.0 * (14 + 1), Arrays.stream(list.buffer).sum()); }
@ConstantFunction(name = "str_to_date", argTypes = {VARCHAR, VARCHAR}, returnType = DATETIME) public static ConstantOperator dateParse(ConstantOperator date, ConstantOperator fmtLiteral) { DateTimeFormatter builder = DateUtils.unixDatetimeFormatter(fmtLiteral.getVarchar(), false); String dateStr = StringUtils.strip(date.getVarchar(), "\r\n\t "); if (HAS_TIME_PART.matcher(fmtLiteral.getVarchar()).matches()) { LocalDateTime ldt; try { ldt = LocalDateTime.from(builder.withResolverStyle(ResolverStyle.STRICT).parse(dateStr)); } catch (DateTimeParseException e) { // If parsing fails, it can be re-parsed from the position of the successful prefix string. // This way datetime string can use incomplete format // eg. str_to_date('2022-10-18 00:00:00','%Y-%m-%d %H:%s'); ldt = LocalDateTime.from(builder.withResolverStyle(ResolverStyle.STRICT) .parse(dateStr.substring(0, e.getErrorIndex()))); } return ConstantOperator.createDatetimeOrNull(ldt); } else { LocalDate ld = LocalDate.from(builder.withResolverStyle(ResolverStyle.STRICT).parse(dateStr)); return ConstantOperator.createDatetimeOrNull(ld.atTime(0, 0, 0)); } }
@Test public void dateParse() { assertEquals("2013-05-10T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013,05,10"), ConstantOperator.createVarchar("%Y,%m,%d")) .getDatetime().toString()); assertEquals("2013-05-10T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar(" 2013,05,10 "), ConstantOperator.createVarchar("%Y,%m,%d")) .getDatetime().toString()); assertEquals("2013-05-17T12:35:10", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-05-17 12:35:10"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s")).getDatetime().toString()); assertEquals("2013-01-17T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-1-17"), ConstantOperator.createVarchar("%Y-%m-%d")).getDatetime().toString()); assertEquals("2013-12-01T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013121"), ConstantOperator.createVarchar("%Y%m%d")).getDatetime().toString()); assertEquals("2013-05-17T12:35:10.000123", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-05-17 12:35:10.000123"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s.%f")).getDatetime().toString()); assertEquals("2013-05-17T12:35:10.000001", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-05-17 12:35:10.000001"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s.%f")).getDatetime().toString()); assertEquals("2013-05-17T12:35:10", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-05-17 12:35:10.00000"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s.%f")).getDatetime().toString()); assertEquals("2013-05-17T00:35:10", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-05-17 00:35:10"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s")).getDatetime().toString()); assertEquals("2013-05-17T23:35:10", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("abc 2013-05-17 fff 23:35:10 xyz"), ConstantOperator.createVarchar("abc %Y-%m-%d fff %H:%i:%s xyz")).getDatetime().toString()); assertEquals("2019-05-09T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2019,129"), ConstantOperator.createVarchar("%Y,%j")) .getDatetime().toString()); assertEquals("2019-05-09T12:10:45", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("12:10:45-20190509"), ConstantOperator.createVarchar("%T-%Y%m%d")).getDatetime().toString()); assertEquals("2019-05-09T09:10:45", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("20190509-9:10:45"), ConstantOperator.createVarchar("%Y%m%d-%k:%i:%S")).getDatetime().toString()); assertEquals("2020-02-21 00:00:00", ScalarOperatorFunctions.dateParse(ConstantOperator.createVarchar("2020-02-21"), ConstantOperator.createVarchar("%Y-%m-%d")).toString()); assertEquals("2020-02-21 00:00:00", ScalarOperatorFunctions.dateParse(ConstantOperator.createVarchar("20-02-21"), ConstantOperator.createVarchar("%y-%m-%d")).toString()); assertEquals("1998-02-21 00:00:00", ScalarOperatorFunctions.dateParse(ConstantOperator.createVarchar("98-02-21"), ConstantOperator.createVarchar("%y-%m-%d")).toString()); Assert.assertThrows(DateTimeException.class, () -> ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("201905"), ConstantOperator.createVarchar("%Y%m")).getDatetime()); Assert.assertThrows(DateTimeException.class, () -> ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("20190507"), ConstantOperator.createVarchar("%Y%m")).getDatetime()); Assert.assertThrows(DateTimeParseException.class, () -> ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2019-02-29"), ConstantOperator.createVarchar("%Y-%m-%d")).getDatetime()); Assert.assertThrows(DateTimeParseException.class, () -> ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2019-02-29 11:12:13"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s")).getDatetime()); Assert.assertThrows(IllegalArgumentException.class, () -> ScalarOperatorFunctions.dateParse(ConstantOperator.createVarchar("2020-2-21"), ConstantOperator.createVarchar("%w")).getVarchar()); Assert.assertThrows(IllegalArgumentException.class, () -> ScalarOperatorFunctions.dateParse(ConstantOperator.createVarchar("2020-02-21"), ConstantOperator.createVarchar("%w")).getVarchar()); assertEquals("2013-01-17T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("\t 2013-1-17"), ConstantOperator.createVarchar("%Y-%m-%d")).getDate().toString()); assertEquals("2013-01-17T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("\n 2013-1-17"), ConstantOperator.createVarchar("%Y-%m-%d")).getDate().toString()); assertEquals("2013-01-17T00:00", ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("\r 2013-1-17"), ConstantOperator.createVarchar("%Y-%m-%d")).getDate().toString()); Assert.assertThrows(DateTimeParseException.class, () -> ScalarOperatorFunctions.dateParse(ConstantOperator.createVarchar("\f 2020-02-21"), ConstantOperator.createVarchar("%Y-%m-%d")).getVarchar()); Assert.assertThrows("Unable to obtain LocalDateTime", DateTimeException.class, () -> ScalarOperatorFunctions .dateParse(ConstantOperator.createVarchar("2013-05-17 12:35:10"), ConstantOperator.createVarchar("%Y-%m-%d %h:%i:%s")).getDatetime()); assertEquals("2022-10-18T01:02:03", ScalarOperatorFunctions.dateParse( ConstantOperator.createVarchar("2022-10-18 01:02:03"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i:%s")). getDatetime().toString()); assertEquals("2022-10-18T01:02", ScalarOperatorFunctions.dateParse( ConstantOperator.createVarchar("2022-10-18 01:02:03"), ConstantOperator.createVarchar("%Y-%m-%d %H:%i")). getDatetime().toString()); Assert.assertThrows("Unable to obtain LocalDateTime", DateTimeException.class, () -> ScalarOperatorFunctions.dateParse( ConstantOperator.createVarchar("2022-10-18 01:02:03"), ConstantOperator.createVarchar("%Y-%m-%d %H:%s")).getDatetime()); }
public static Date getDate(Object date) { return getDate(date, Calendar.getInstance().getTime()); }
@Test @SuppressWarnings({ "UndefinedEquals", "JavaUtilDate" }) public void testGetDateObjectDateWithNullAndDateAsDefault() { Date date = new Date(); assertEquals(date, Converter.getDate(null, date)); }
@GET @Path("/apps/{appid}/appattempts") @Produces(MediaType.APPLICATION_JSON) public Set<TimelineEntity> getAppAttempts(@Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam("appid") String appId, @QueryParam("userid") String userId, @QueryParam("flowname") String flowName, @QueryParam("flowrunid") String flowRunId, @QueryParam("limit") String limit, @QueryParam("createdtimestart") String createdTimeStart, @QueryParam("createdtimeend") String createdTimeEnd, @QueryParam("relatesto") String relatesTo, @QueryParam("isrelatedto") String isRelatedTo, @QueryParam("infofilters") String infofilters, @QueryParam("conffilters") String conffilters, @QueryParam("metricfilters") String metricfilters, @QueryParam("eventfilters") String eventfilters, @QueryParam("confstoretrieve") String confsToRetrieve, @QueryParam("metricstoretrieve") String metricsToRetrieve, @QueryParam("fields") String fields, @QueryParam("metricslimit") String metricsLimit, @QueryParam("metricstimestart") String metricsTimeStart, @QueryParam("metricstimeend") String metricsTimeEnd, @QueryParam("fromid") String fromId) { return getAppAttempts(req, res, null, appId, userId, flowName, flowRunId, limit, createdTimeStart, createdTimeEnd, relatesTo, isRelatedTo, infofilters, conffilters, metricfilters, eventfilters, confsToRetrieve, metricsToRetrieve, fields, metricsLimit, metricsTimeStart, metricsTimeEnd, fromId); }
@Test void testGetAppAttempts() throws Exception { Client client = createClient(); try { URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/apps/app1/" + "entities/YARN_APPLICATION_ATTEMPT"); ClientResponse resp = getResponse(client, uri); Set<TimelineEntity> entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() { }); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, resp.getType().toString()); assertNotNull(entities); int totalEntities = entities.size(); assertEquals(2, totalEntities); assertTrue( entities.contains( newEntity(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), "app-attempt-1")), "Entity with app-attempt-2 should have been present in response."); assertTrue( entities.contains( newEntity(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), "app-attempt-2")), "Entity with app-attempt-2 should have been present in response."); uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/apps/app1/appattempts"); resp = getResponse(client, uri); entities = resp.getEntity(new GenericType<Set<TimelineEntity>>() { }); assertEquals(MediaType.APPLICATION_JSON_TYPE, resp.getType()); assertNotNull(entities); int retrievedEntity = entities.size(); assertEquals(2, retrievedEntity); assertTrue( entities.contains( newEntity(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), "app-attempt-1")), "Entity with app-attempt-2 should have been present in response."); assertTrue( entities.contains( newEntity(TimelineEntityType.YARN_APPLICATION_ATTEMPT.toString(), "app-attempt-2")), "Entity with app-attempt-2 should have been present in response."); assertEquals(totalEntities, retrievedEntity); } finally { client.destroy(); } }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testInvalidDefaultRecordBatch() { buildFetcher(); ByteBuffer buffer = ByteBuffer.allocate(1024); ByteBufferOutputStream out = new ByteBufferOutputStream(buffer); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(out, DefaultRecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, TimestampType.CREATE_TIME, 0L, 10L, 0L, (short) 0, 0, false, false, 0, 1024); builder.append(10L, "key".getBytes(), "value".getBytes()); builder.close(); buffer.flip(); // Garble the CRC buffer.position(17); buffer.put("beef".getBytes()); buffer.position(0); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // normal fetch assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); for (int i = 0; i < 2; i++) { // collectFetch() should always throw exception due to the bad batch. assertThrows(KafkaException.class, this::collectFetch); assertEquals(0, subscriptions.position(tp0).offset); } }
public static void notEmpty(final Collection<?> collection, final String message) { isTrue(!CollectionUtils.isEmpty(collection), message); }
@Test public void notEmptyTest() { Assertions.assertDoesNotThrow(() -> Assert.notEmpty(Collections.singleton(1), "error message")); Assertions.assertThrows(ValidFailException.class, () -> Assert.notEmpty(Collections.emptyList(), "error message")); }
public static String extractMarkdown(String schemaDoc) { String trimmed = schemaDoc.trim(); String withoutMargin = stripMargin(trimmed.substring(3, trimmed.length() - 2)); return unescapeDocstring(withoutMargin); }
@Test(dataProvider = "schemaDoc") public void testExtractMarkdown(String schemaDoc, String expectedDoc) { String extracted = PdlParseUtils.extractMarkdown(schemaDoc); assertEquals(extracted, expectedDoc); }
@Override protected boolean notExist() { return Stream.of(ApolloPathConstants.PLUGIN_DATA_ID, ApolloPathConstants.AUTH_DATA_ID, ApolloPathConstants.META_DATA_ID, ApolloPathConstants.PROXY_SELECTOR_DATA_ID) .map(NodeDataPathUtils::appendListStuff) .allMatch(this::dataIdNotExist); }
@Test public void testNotExist() { when(apolloClient.getItemValue(join(PLUGIN_DATA_ID))).thenReturn(PLUGIN_DATA_ID); boolean pluginExist = apolloDataChangedInit.notExist(); Assertions.assertFalse(pluginExist, "plugin exist."); when(apolloClient.getItemValue(join(PLUGIN_DATA_ID))).thenReturn(null); when(apolloClient.getItemValue(join(AUTH_DATA_ID))).thenReturn(AUTH_DATA_ID); boolean authExist = apolloDataChangedInit.notExist(); Assertions.assertFalse(authExist, "auth exist."); when(apolloClient.getItemValue(join(AUTH_DATA_ID))).thenReturn(null); when(apolloClient.getItemValue(join(META_DATA_ID))).thenReturn(META_DATA_ID); boolean metaDataExist = apolloDataChangedInit.notExist(); Assertions.assertFalse(metaDataExist, "metadata exist."); when(apolloClient.getItemValue(join(META_DATA_ID))).thenReturn(null); when(apolloClient.getItemValue(join(PROXY_SELECTOR_DATA_ID))).thenReturn(PROXY_SELECTOR_DATA_ID); boolean selectorDataExist = apolloDataChangedInit.notExist(); Assertions.assertFalse(selectorDataExist, "selector exist."); }
public static final MessageId getMessageId(long sequenceId) { // Demultiplex ledgerId and entryId from offset long ledgerId = sequenceId >>> 28; long entryId = sequenceId & 0x0F_FF_FF_FFL; return new MessageIdImpl(ledgerId, entryId, -1); }
@Test public void testGetMessageId() { long lid = 12345L; long eid = 34566L; long sequenceId = (lid << 28) | eid; MessageIdImpl id = (MessageIdImpl) FunctionCommon.getMessageId(sequenceId); assertEquals(lid, id.getLedgerId()); assertEquals(eid, id.getEntryId()); }
public Object transform(List<List<String>> raw) { try { return transformer.transform(raw); } catch (Throwable throwable) { throw new CucumberDataTableException( String.format("'%s' could not transform%n%s", toCanonical(), DataTable.create(raw)), throwable); } }
@Test void shouldTransformATableEntry() { DataTableType tableType = new DataTableType( Place.class, (Map<String, String> entry) -> new Place(entry.get("place"))); String here = "here"; // noinspection unchecked List<Place> transform = (List<Place>) tableType .transform(Arrays.asList(singletonList("place"), singletonList(here))); assertEquals(1, transform.size()); assertEquals(here, transform.get(0).name); }
public MapConfig setAsyncBackupCount(int asyncBackupCount) { this.asyncBackupCount = checkAsyncBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void setAsyncBackupCount_whenItsNegative() { MapConfig config = new MapConfig(); config.setAsyncBackupCount(-1); }
List<Endpoint> endpoints() { try { String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace); return enrichWithPublicAddresses(parsePodsList(callGet(urlString))); } catch (RestClientException e) { return handleKnownException(e); } }
@Test public void endpointsByNamespaceWithLoadBalancerPublicIp() throws JsonProcessingException { // given stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), podsListResponse()); stub(String.format("/api/v1/namespaces/%s/endpoints", NAMESPACE), endpointsListResponse()); stub(String.format("/api/v1/namespaces/%s/services/hazelcast-0", NAMESPACE), serviceLb(servicePort(32123, 5701, 31916), "35.232.226.200")); stub(String.format("/api/v1/namespaces/%s/services/service-1", NAMESPACE), serviceLb(servicePort(32124, 5701, 31917), "35.232.226.201")); // when List<Endpoint> result = kubernetesClient.endpoints(); // then assertThat(formatPrivate(result)).containsExactlyInAnyOrder(ready("192.168.0.25", 5701), ready("172.17.0.5", 5702)); assertThat(formatPublic(result)).containsExactlyInAnyOrder(ready("35.232.226.200", 32123), ready("35.232.226.201", 32124)); }
public static JsonElement parseReader(Reader reader) throws JsonIOException, JsonSyntaxException { try { JsonReader jsonReader = new JsonReader(reader); JsonElement element = parseReader(jsonReader); if (!element.isJsonNull() && jsonReader.peek() != JsonToken.END_DOCUMENT) { throw new JsonSyntaxException("Did not consume the entire document."); } return element; } catch (MalformedJsonException e) { throw new JsonSyntaxException(e); } catch (IOException e) { throw new JsonIOException(e); } catch (NumberFormatException e) { throw new JsonSyntaxException(e); } }
@Test public void testStrict() { JsonReader reader = new JsonReader(new StringReader("faLsE")); Strictness strictness = Strictness.STRICT; reader.setStrictness(strictness); var e = assertThrows(JsonSyntaxException.class, () -> JsonParser.parseReader(reader)); assertThat(e) .hasCauseThat() .hasMessageThat() .startsWith("Use JsonReader.setStrictness(Strictness.LENIENT) to accept malformed JSON"); // Original strictness was kept assertThat(reader.getStrictness()).isEqualTo(strictness); }
@Override public void getChildren(final String path, final boolean watch, final AsyncCallback.ChildrenCallback cb, final Object ctx) { if (!SymlinkUtil.containsSymlink(path)) { _zk.getChildren(path, watch, cb, ctx); } else { SymlinkChildrenCallback compositeCallback = new SymlinkChildrenCallback(path, _defaultWatcher, cb); getChildren0(path, watch ? compositeCallback : null, compositeCallback, ctx); } }
@Test public void testMultiSymlink() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); AsyncCallback.ChildrenCallback callback = new AsyncCallback.ChildrenCallback() { @Override public void processResult(int rc, String path, Object ctx, List<String> children) { KeeperException.Code result = KeeperException.Code.get(rc); Assert.assertEquals(result, KeeperException.Code.OK); Assert.assertEquals(path, "/$bar/$link"); Assert.assertEquals(children.size(), 10); latch.countDown(); } }; // symlink: /$bar -> /foo // symlink: /foo/$link -> /foo/bar _zkClient.getZooKeeper().getChildren("/$bar/$link", null, callback, null); latch.await(30, TimeUnit.SECONDS); }
@Deprecated public static QuotaResponse getQuota(LimitAPI limitAPI, String namespace, String service, int count, Map<String, String> labels, String method) { // build quota request QuotaRequest quotaRequest = new QuotaRequest(); quotaRequest.setNamespace(namespace); quotaRequest.setService(service); quotaRequest.setCount(count); quotaRequest.setLabels(labels); quotaRequest.setMethod(method); try { return limitAPI.getQuota(quotaRequest); } catch (Throwable throwable) { LOG.error("fail to invoke getQuota of LimitAPI with QuotaRequest[{}].", quotaRequest, throwable); return new QuotaResponse(new QuotaResult(QuotaResult.Code.QuotaResultOk, 0, "get quota failed")); } }
@Test public void testGetQuota2() { // Pass String serviceName = "TestApp1"; QuotaResponse quotaResponse = QuotaCheckUtils.getQuota(limitAPI, null, serviceName, 1, new HashSet<>(), null); assertThat(quotaResponse.getCode()).isEqualTo(QuotaResultCode.QuotaResultOk); assertThat(quotaResponse.getWaitMs()).isEqualTo(0); assertThat(quotaResponse.getInfo()).isEqualTo("QuotaResultOk"); // Unirate waiting 1000ms serviceName = "TestApp2"; quotaResponse = QuotaCheckUtils.getQuota(limitAPI, null, serviceName, 1, new HashSet<>(), null); assertThat(quotaResponse.getCode()).isEqualTo(QuotaResultCode.QuotaResultOk); assertThat(quotaResponse.getWaitMs()).isEqualTo(1000); assertThat(quotaResponse.getInfo()).isEqualTo("QuotaResultOk"); // Rate limited serviceName = "TestApp3"; quotaResponse = QuotaCheckUtils.getQuota(limitAPI, null, serviceName, 1, new HashSet<>(), null); assertThat(quotaResponse.getCode()).isEqualTo(QuotaResultCode.QuotaResultLimited); assertThat(quotaResponse.getWaitMs()).isEqualTo(0); assertThat(quotaResponse.getInfo()).isEqualTo("QuotaResultLimited"); // Exception serviceName = "TestApp4"; quotaResponse = QuotaCheckUtils.getQuota(limitAPI, null, serviceName, 1, new HashSet<>(), null); assertThat(quotaResponse.getCode()).isEqualTo(QuotaResultCode.QuotaResultOk); assertThat(quotaResponse.getWaitMs()).isEqualTo(0); assertThat(quotaResponse.getInfo()).isEqualTo("get quota failed"); }
public static <T> T getBean(Class<T> interfaceClass, Class typeClass) { Object object = serviceMap.get(interfaceClass.getName() + "<" + typeClass.getName() + ">"); if(object == null) return null; if(object instanceof Object[]) { return (T)Array.get(object, 0); } else { return (T)object; } }
@Test public void testInjectedBean() { InjectedBean injectedBean = SingletonServiceFactory.getBean(InjectedBean.class); Assert.assertEquals("Injected Bean", injectedBean.name()); }
public static String stripMargin(String schemadoc) { char marginChar = '*'; StringBuilder buf = new StringBuilder(); String[] schemadocByLine = schemadoc.split(System.lineSeparator()); for (int i = 0; i < schemadocByLine.length; i++) { String lineWithoutSeparator = schemadocByLine[i]; // Skip the first and last line if empty/whitespace. if ((i == 0 || i == schemadocByLine.length - 1) && (lineWithoutSeparator.trim().isEmpty())) { continue; } String line = lineWithoutSeparator + System.lineSeparator(); int len = line.length(); int index = 0; // Iterate past the leading whitespace. while (index < len && line.charAt(index) <= ' ') { index++; } // If at margin char, trim the leading whitespace // and also trim the one extra space which is after the margin char. if (index < len && line.charAt(index) == marginChar) { if (index + 1 < len && line.charAt(index + 1) == ' ') { index++; } buf.append(line.substring(index + 1)); } else { buf.append(line); } } String withoutMargin = buf.toString(); // Trim the line separator in the last line. if (withoutMargin.endsWith(System.lineSeparator())) { withoutMargin = withoutMargin.substring(0, withoutMargin.lastIndexOf(System.lineSeparator())); } return withoutMargin; }
@Test public void testStripMargin() { String docString = PdlParseUtils.stripMargin( " * The quick\n" + " * brown fox\n"); assertEquals(docString, "The quick\n" + "brown fox"); }
@Udf(description = "Returns the correctly rounded positive square root of a DOUBLE value") public Double sqrt( @UdfParameter( value = "value", description = "The value to get the square root of." ) final Integer value ) { return sqrt(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleZero() { assertThat(udf.sqrt(0), is(0.0)); assertThat(udf.sqrt(0L), is(0.0)); assertThat(udf.sqrt(0.0), is(0.0)); }
public static Optional<DefaultMqPushConsumerWrapper> wrapPushConsumer(DefaultMQPushConsumer pushConsumer) { DefaultMQPushConsumerImpl pushConsumerImpl = pushConsumer.getDefaultMQPushConsumerImpl(); MQClientInstance mqClientFactory = pushConsumerImpl.getmQClientFactory(); // Obtain the defaultMQPushConsumerImpl and mQClientFactory attribute values related to the consumer. If the // attribute value is null, do not cache the consumer if (pushConsumerImpl != null && mqClientFactory != null) { DefaultMqPushConsumerWrapper wrapper = new DefaultMqPushConsumerWrapper(pushConsumer, pushConsumerImpl, mqClientFactory); initWrapperServiceMeta(wrapper); return Optional.of(wrapper); } return Optional.empty(); }
@Test public void testWrapPushConsumer() { Optional<DefaultMqPushConsumerWrapper> pushConsumerWrapperOptional = RocketMqWrapperUtils .wrapPushConsumer(pushConsumer); Assert.assertTrue(pushConsumerWrapperOptional.isPresent()); Assert.assertEquals(pushConsumerWrapperOptional.get().getPushConsumer(), pushConsumer); }
public void writeTelemetryData(JsonWriter json, TelemetryData telemetryData) { json.beginObject(); json.prop("id", telemetryData.getServerId()); json.prop(VERSION, telemetryData.getVersion()); json.prop("messageSequenceNumber", telemetryData.getMessageSequenceNumber()); json.prop("localTimestamp", toUtc(system2.now())); json.prop(NCD_ID, telemetryData.getNcdId()); telemetryData.getEdition().ifPresent(e -> json.prop("edition", e.name().toLowerCase(Locale.ENGLISH))); json.prop("defaultQualityGate", telemetryData.getDefaultQualityGate()); json.prop("sonarway_quality_gate_uuid", telemetryData.getSonarWayQualityGate()); json.name("database"); json.beginObject(); json.prop("name", telemetryData.getDatabase().name()); json.prop(VERSION, telemetryData.getDatabase().version()); json.endObject(); json.name("plugins"); json.beginArray(); telemetryData.getPlugins().forEach((plugin, version) -> { json.beginObject(); json.prop("name", plugin); json.prop(VERSION, version); json.endObject(); }); json.endArray(); if (!telemetryData.getCustomSecurityConfigs().isEmpty()) { json.name("customSecurityConfig"); json.beginArray(); json.values(telemetryData.getCustomSecurityConfigs()); json.endArray(); } telemetryData.hasUnanalyzedC().ifPresent(hasUnanalyzedC -> json.prop("hasUnanalyzedC", hasUnanalyzedC)); telemetryData.hasUnanalyzedCpp().ifPresent(hasUnanalyzedCpp -> json.prop("hasUnanalyzedCpp", hasUnanalyzedCpp)); if (telemetryData.getInstallationDate() != null) { json.prop("installationDate", toUtc(telemetryData.getInstallationDate())); } if (telemetryData.getInstallationVersion() != null) { json.prop("installationVersion", telemetryData.getInstallationVersion()); } json.prop("container", telemetryData.isInContainer()); writeUserData(json, telemetryData); writeProjectData(json, telemetryData); writeProjectStatsData(json, telemetryData); writeBranches(json, telemetryData); writeNewCodeDefinitions(json, telemetryData); writeQualityGates(json, telemetryData); writeQualityProfiles(json, telemetryData); writeManagedInstanceInformation(json, telemetryData.getManagedInstanceInformation()); writeCloudUsage(json, telemetryData.getCloudUsage()); extensions.forEach(e -> e.write(json)); json.endObject(); }
@Test void write_installation_version() { String installationVersion = randomAlphabetic(5); TelemetryData data = telemetryBuilder() .setInstallationVersion(installationVersion) .build(); String json = writeTelemetryData(data); assertJson(json).isSimilarTo(""" { "installationVersion": "%s" } """.formatted(installationVersion)); } @ParameterizedTest @MethodSource("getFeatureFlagEnabledStates") void write_container_flag(boolean isIncontainer) { TelemetryData data = telemetryBuilder() .setInContainer(isIncontainer) .build(); String json = writeTelemetryData(data); assertJson(json).isSimilarTo(""" { "container": %s } """.formatted(isIncontainer)); } @DataProvider public static Object[][] getManagedInstanceData() { return new Object[][] { {true, "scim"}, {true, "github"}, {true, "gitlab"}, {false, null}, }; }
@Override public void handleRequest(RestRequest request, RequestContext requestContext, final Callback<RestResponse> callback) { if (HttpMethod.POST != HttpMethod.valueOf(request.getMethod())) { _log.error("POST is expected, but " + request.getMethod() + " received"); callback.onError(RestException.forError(HttpStatus.S_405_METHOD_NOT_ALLOWED.getCode(), "Invalid method")); return; } // Disable server-side latency instrumentation for multiplexed requests requestContext.putLocalAttr(TimingContextUtil.TIMINGS_DISABLED_KEY_NAME, true); IndividualRequestMap individualRequests; try { individualRequests = extractIndividualRequests(request); if (_multiplexerSingletonFilter != null) { individualRequests = _multiplexerSingletonFilter.filterRequests(individualRequests); } } catch (RestException e) { _log.error("Invalid multiplexed request", e); callback.onError(e); return; } catch (Exception e) { _log.error("Invalid multiplexed request", e); callback.onError(RestException.forError(HttpStatus.S_400_BAD_REQUEST.getCode(), e)); return; } // prepare the map of individual responses to be collected final IndividualResponseMap individualResponses = new IndividualResponseMap(individualRequests.size()); final Map<String, HttpCookie> responseCookies = new HashMap<>(); // all tasks are Void and side effect based, that will be useful when we add streaming Task<?> requestProcessingTask = createParallelRequestsTask(request, requestContext, individualRequests, individualResponses, responseCookies); Task<Void> responseAggregationTask = Task.action("send aggregated response", () -> { RestResponse aggregatedResponse = aggregateResponses(individualResponses, responseCookies); callback.onSuccess(aggregatedResponse); } ); _engine.run(requestProcessingTask.andThen(responseAggregationTask), MUX_PLAN_CLASS); }
@Test(dataProvider = "multiplexerConfigurations") public void testHandleEmptyRequest(MultiplexerRunMode multiplexerRunMode) throws Exception { MultiplexedRequestHandlerImpl multiplexer = createMultiplexer(null, multiplexerRunMode); RestRequest request = fakeMuxRestRequest(); FutureCallback<RestResponse> callback = new FutureCallback<>(); multiplexer.handleRequest(request, new RequestContext(), callback); assertEquals(getErrorStatus(callback), HttpStatus.S_400_BAD_REQUEST); }
public void setUserAgent(String userAgent) { this.userAgent = userAgent; }
@Test void testSetUserAgent() { assertNull(basicContext.getUserAgent()); basicContext.setUserAgent(VersionUtils.getFullClientVersion()); assertEquals(VersionUtils.getFullClientVersion(), basicContext.getUserAgent()); }
public static DateTime of(long timeMillis) { return new DateTime(timeMillis); }
@Test public void ofTest(){ assertThrows(IllegalArgumentException.class, () -> { String a = "2021-09-27 00:00:99"; new DateTime(a, DatePattern.NORM_DATETIME_FORMAT, false); }); }
public SkipCallbackWrapperException(Throwable cause) { super(cause); }
@Test void testSkipCallbackWrapperException() { assertThrowsExactly(SkipCallbackWrapperException.class, () -> { throw new SkipCallbackWrapperException(new Throwable("error")); }); }
public static void setAuthenticationMethod( AuthenticationMethod authenticationMethod, Configuration conf) { if (authenticationMethod == null) { authenticationMethod = AuthenticationMethod.SIMPLE; } conf.set(HADOOP_SECURITY_AUTHENTICATION, StringUtils.toLowerCase(authenticationMethod.toString())); }
@Test public void testSetAuthenticationMethod() { Configuration conf = new Configuration(); // default SecurityUtil.setAuthenticationMethod(null, conf); assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION)); // simple SecurityUtil.setAuthenticationMethod(SIMPLE, conf); assertEquals("simple", conf.get(HADOOP_SECURITY_AUTHENTICATION)); // kerberos SecurityUtil.setAuthenticationMethod(KERBEROS, conf); assertEquals("kerberos", conf.get(HADOOP_SECURITY_AUTHENTICATION)); }
public LRUCache(int capacity) { this(capacity, -1); }
@Test public void testLRUCache() throws InterruptedException { LRUCache<String, Integer> lruCache = new LRUCache<>(3, CACHE_EXPIRE_TIME); lruCache.put("1", 1); lruCache.put("2", 1); lruCache.put("3", 3); lruCache.put("4", 4); Assert.assertEquals(lruCache.size(), 3); Assert.assertNull(lruCache.get("1")); Assert.assertNotNull(lruCache.get("2")); Assert.assertNotNull(lruCache.get("3")); Assert.assertNotNull(lruCache.get("3")); lruCache.clear(); lruCache.put("1", 1); Thread.sleep(201); Assert.assertEquals(lruCache.size(), 1); lruCache.get("1"); Assert.assertEquals(lruCache.size(), 0); lruCache.put("2", 2); Assert.assertEquals(lruCache.size(), 1); lruCache.put("3", 3); Assert.assertEquals(lruCache.size(), 2); }
public static void createHardLinkMult(File parentDir, String[] fileBaseNames, File linkDir) throws IOException { if (parentDir == null) { throw new IOException( "invalid arguments to createHardLinkMult: parent directory is null"); } if (linkDir == null) { throw new IOException( "invalid arguments to createHardLinkMult: link directory is null"); } if (fileBaseNames == null) { throw new IOException( "invalid arguments to createHardLinkMult: " + "filename list can be empty but not null"); } if (!linkDir.exists()) { throw new FileNotFoundException(linkDir + " not found."); } for (String name : fileBaseNames) { createLink(linkDir.toPath().resolve(name), parentDir.toPath().resolve(name)); } }
@Test public void testCreateHardLinkMult() throws IOException { //hardlink a whole list of three files at once String[] fileNames = src.list(); createHardLinkMult(src, fileNames, tgt_mult); //validate by link count - each file has been linked once, //so each count is "2" assertEquals(2, getLinkCount(x1)); assertEquals(2, getLinkCount(x2)); assertEquals(2, getLinkCount(x3)); assertEquals(2, getLinkCount(x1_mult)); assertEquals(2, getLinkCount(x2_mult)); assertEquals(2, getLinkCount(x3_mult)); //validate by contents validateTgtMult(); //validate that change of content is reflected in the other linked files appendToFile(x1_mult, str3); assertTrue(fetchFileContents(x1_mult).equals(str1 + str3)); assertTrue(fetchFileContents(x1).equals(str1 + str3)); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void doNotRetryFromPredicateUsingCompletable() { RetryConfig config = RetryConfig.custom() .retryOnException(t -> t instanceof IOException) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); doThrow(new HelloWorldException()).when(helloWorldService).sayHelloWorld(); Completable.fromRunnable(helloWorldService::sayHelloWorld) .compose(RetryTransformer.of(retry)) .test() .assertError(HelloWorldException.class) .assertNotComplete(); then(helloWorldService).should().sayHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(1); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero(); }
public static <KLeftT, KRightT> KTableHolder<KLeftT> build( final KTableHolder<KLeftT> left, final KTableHolder<KRightT> right, final ForeignKeyTableTableJoin<KLeftT, KRightT> join, final RuntimeBuildContext buildContext ) { final LogicalSchema leftSchema = left.getSchema(); final LogicalSchema rightSchema = right.getSchema(); final ProcessingLogger logger = buildContext.getProcessingLogger( join.getProperties().getQueryContext() ); final ExpressionEvaluator expressionEvaluator; final CodeGenRunner codeGenRunner = new CodeGenRunner( leftSchema, buildContext.getKsqlConfig(), buildContext.getFunctionRegistry() ); final Optional<ColumnName> leftColumnName = join.getLeftJoinColumnName(); final Optional<Expression> leftJoinExpression = join.getLeftJoinExpression(); if (leftColumnName.isPresent()) { expressionEvaluator = codeGenRunner.buildCodeGenFromParseTree( new UnqualifiedColumnReferenceExp(leftColumnName.get()), "Left Join Expression" ); } else if (leftJoinExpression.isPresent()) { expressionEvaluator = codeGenRunner.buildCodeGenFromParseTree( leftJoinExpression.get(), "Left Join Expression" ); } else { throw new IllegalStateException("Both leftColumnName and leftJoinExpression are empty."); } final ForeignKeyJoinParams<KRightT> joinParams = ForeignKeyJoinParamsFactory .create(expressionEvaluator, leftSchema, rightSchema, logger); final Formats formats = join.getFormats(); final PhysicalSchema physicalSchema = PhysicalSchema.from( joinParams.getSchema(), formats.getKeyFeatures(), formats.getValueFeatures() ); final Serde<KLeftT> keySerde = left.getExecutionKeyFactory().buildKeySerde( formats.getKeyFormat(), physicalSchema, join.getProperties().getQueryContext() ); final Serde<GenericRow> valSerde = buildContext.buildValueSerde( formats.getValueFormat(), physicalSchema, join.getProperties().getQueryContext() ); final KTable<KLeftT, GenericRow> result; switch (join.getJoinType()) { case INNER: result = left.getTable().join( right.getTable(), joinParams.getKeyExtractor(), joinParams.getJoiner(), buildContext.getMaterializedFactory().create(keySerde, valSerde) ); break; case LEFT: result = left.getTable().leftJoin( right.getTable(), joinParams.getKeyExtractor(), joinParams.getJoiner(), buildContext.getMaterializedFactory().create(keySerde, valSerde) ); break; default: throw new IllegalStateException("invalid join type: " + join.getJoinType()); } return KTableHolder.unmaterialized( result, joinParams.getSchema(), left.getExecutionKeyFactory() ); }
@Test public void shouldReturnCorrectSchemaMultiKey() { // Given: givenInnerJoin(leftMultiKey, L_KEY); // When: final KTableHolder<Struct> result = join.build(planBuilder, planInfo); // Then: assertThat( result.getSchema(), is(LogicalSchema.builder() .keyColumns(LEFT_SCHEMA_MULTI_KEY.key()) .valueColumns(LEFT_SCHEMA_MULTI_KEY.value()) .valueColumns(RIGHT_SCHEMA.value()) .build()) ); }
public static Deserializer getDeserializer(Configuration configuration, Properties schema) { String name = getDeserializerClassName(schema); // for collection delimiter, Hive 1.x, 2.x uses "colelction.delim" but Hive 3.x uses "collection.delim" // see also https://issues.apache.org/jira/browse/HIVE-16922 if (name.equals("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")) { if (schema.containsKey("colelction.delim") && !schema.containsKey(COLLECTION_DELIM)) { schema.put(COLLECTION_DELIM, schema.getProperty("colelction.delim")); } } Deserializer deserializer = createDeserializer(getDeserializerClass(name)); initializeDeserializer(configuration, deserializer, schema); return deserializer; }
@Test public void testGetThriftDeserializer() { Properties schema = new Properties(); schema.setProperty(SERIALIZATION_LIB, ThriftDeserializer.class.getName()); schema.setProperty(SERIALIZATION_CLASS, IntString.class.getName()); schema.setProperty(SERIALIZATION_FORMAT, TBinaryProtocol.class.getName()); assertInstanceOf(getDeserializer(new Configuration(false), schema), ThriftDeserializer.class); }
@Override public Object execute(String command, byte[]... args) { for (Method method : this.getClass().getDeclaredMethods()) { if (method.getName().equalsIgnoreCase(command) && Modifier.isPublic(method.getModifiers()) && (method.getParameterTypes().length == args.length)) { try { Object t = execute(method, args); if (t instanceof String) { return ((String) t).getBytes(); } return t; } catch (IllegalArgumentException e) { if (isPipelined()) { throw new RedisPipelineException(e); } throw new InvalidDataAccessApiUsageException(e.getMessage(), e); } } } throw new UnsupportedOperationException(); }
@Test public void testExecute() { Long s = (Long) connection.execute("ttl", "key".getBytes()); assertThat(s).isEqualTo(-2); connection.execute("flushDb"); }
public SortedSet<Long> validWindows(Cluster cluster, double minMonitoredPartitionsPercentage) { AggregationOptions<String, PartitionEntity> options = new AggregationOptions<>(minMonitoredPartitionsPercentage, 0.0, 1, _maxAllowedExtrapolationsPerPartition, allPartitions(cluster), AggregationOptions.Granularity.ENTITY, true); MetricSampleCompleteness<String, PartitionEntity> completeness = completeness(-1, Long.MAX_VALUE, options); return windowIndicesToWindows(completeness.validWindowIndices(), _windowMs); }
@Test public void testValidWindowsWithInvalidPartitions() { TestContext ctx = setupScenario2(); KafkaPartitionMetricSampleAggregator aggregator = ctx.aggregator(); MetadataClient.ClusterAndGeneration clusterAndGeneration = ctx.clusterAndGeneration(0); SortedSet<Long> validWindows = aggregator.validWindows(clusterAndGeneration.cluster(), 1.0); assertEquals("Should have three invalid windows.", NUM_WINDOWS - 3, validWindows.size()); assertValidWindows(validWindows, NUM_WINDOWS - 1, Arrays.asList(6, 7)); // reduced monitored percentage should include every window. assertEquals(NUM_WINDOWS, aggregator.validWindows(clusterAndGeneration.cluster(), 0.5).size()); }
@Override public Session createSession(QueryId queryId, SessionContext context, WarningCollectorFactory warningCollectorFactory, Optional<AuthorizedIdentity> authorizedIdentity) { Identity identity = context.getIdentity(); if (authorizedIdentity.isPresent()) { identity = new Identity( identity.getUser(), identity.getPrincipal(), identity.getRoles(), identity.getExtraCredentials(), identity.getExtraAuthenticators(), Optional.of(authorizedIdentity.get().getUserName()), authorizedIdentity.get().getReasonForSelect()); log.info(String.format( "For query %s, given user is %s, authorized user is %s", queryId.getId(), identity.getUser(), authorizedIdentity.get().getUserName())); } SessionBuilder sessionBuilder = Session.builder(sessionPropertyManager) .setQueryId(queryId) .setIdentity(identity) .setSource(context.getSource()) .setCatalog(context.getCatalog()) .setSchema(context.getSchema()) .setRemoteUserAddress(context.getRemoteUserAddress()) .setUserAgent(context.getUserAgent()) .setClientInfo(context.getClientInfo()) .setClientTags(context.getClientTags()) .setTraceToken(context.getTraceToken()) .setResourceEstimates(context.getResourceEstimates()) .setTracer(context.getTracer()) .setRuntimeStats(context.getRuntimeStats()); if (forcedSessionTimeZone.isPresent()) { sessionBuilder.setTimeZoneKey(forcedSessionTimeZone.get()); } else if (context.getTimeZoneId() != null) { sessionBuilder.setTimeZoneKey(getTimeZoneKey(context.getTimeZoneId())); } if (context.getLanguage() != null) { sessionBuilder.setLocale(Locale.forLanguageTag(context.getLanguage())); } for (Entry<String, String> entry : context.getSystemProperties().entrySet()) { sessionBuilder.setSystemProperty(entry.getKey(), entry.getValue()); } for (Entry<String, Map<String, String>> catalogProperties : context.getCatalogSessionProperties().entrySet()) { String catalog = catalogProperties.getKey(); for (Entry<String, String> entry : catalogProperties.getValue().entrySet()) { sessionBuilder.setCatalogSessionProperty(catalog, entry.getKey(), entry.getValue()); } } for (Entry<String, String> preparedStatement : context.getPreparedStatements().entrySet()) { sessionBuilder.addPreparedStatement(preparedStatement.getKey(), preparedStatement.getValue()); } if (context.supportClientTransaction()) { sessionBuilder.setClientTransactionSupport(); } for (Entry<SqlFunctionId, SqlInvokedFunction> entry : context.getSessionFunctions().entrySet()) { sessionBuilder.addSessionFunction(entry.getKey(), entry.getValue()); } // Put after setSystemProperty are called WarningCollector warningCollector = warningCollectorFactory.create(sessionBuilder.getSystemProperty(WARNING_HANDLING, WarningHandlingLevel.class)); sessionBuilder.setWarningCollector(warningCollector); Session session = sessionBuilder.build(); if (context.getTransactionId().isPresent()) { session = session.beginTransactionId(context.getTransactionId().get(), transactionManager, accessControl); } return session; }
@Test(expectedExceptions = TimeZoneNotSupportedException.class) public void testInvalidTimeZone() { HttpServletRequest request = new MockHttpServletRequest( ImmutableListMultimap.<String, String>builder() .put(PRESTO_USER, "testUser") .put(PRESTO_TIME_ZONE, "unknown_timezone") .build(), "testRemote", ImmutableMap.of()); HttpRequestSessionContext context = new HttpRequestSessionContext(request, new SqlParserOptions()); QuerySessionSupplier sessionSupplier = new QuerySessionSupplier( createTestTransactionManager(), new AllowAllAccessControl(), new SessionPropertyManager(), new SqlEnvironmentConfig()); WarningCollectorFactory warningCollectorFactory = new WarningCollectorFactory() { @Override public WarningCollector create(WarningHandlingLevel warningHandlingLevel) { return WarningCollector.NOOP; } }; sessionSupplier.createSession(new QueryId("test_query_id"), context, warningCollectorFactory, Optional.empty()); }
public void register(final SimpleLoadBalancerStateListener listener) { trace(_log, "register listener: ", listener); _executor.execute(new PropertyEvent("add listener for state") { @Override public void innerRun() { _listeners.add(listener); } }); }
@Test(groups = { "small", "back-end" }) public void testRegister() { reset(); TestListener listener = new TestListener(); List<String> schemes = new ArrayList<>(); schemes.add("http"); _state.register(listener); assertNull(listener.scheme); assertNull(listener.strategy); assertNull(listener.serviceName); // trigger a strategy add // first add a cluster _state.listenToCluster("cluster-1", new NullStateListenerCallback()); _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1")); // then add a service _state.listenToService("service-1", new NullStateListenerCallback()); _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Arrays.asList("random"), Collections.<String, Object>emptyMap(), null, null, schemes, null)); // this should trigger a refresh assertEquals(listener.scheme, "http"); assertTrue(listener.strategy instanceof RandomLoadBalancerStrategy); assertEquals(listener.serviceName, "service-1"); // then update the cluster _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1")); // this triggered a second refresh, but also an onStrategyRemoved. The onStrategyRemoved should // be done first, and then the onStrategyAdd, so we should still see a valid strategy. assertEquals(listener.scheme, "http"); assertTrue(listener.strategy instanceof RandomLoadBalancerStrategy); assertEquals(listener.serviceName, "service-1"); _state.listenToCluster("partition-cluster-1", new NullStateListenerCallback()); _clusterRegistry.put("partition-cluster-1", new ClusterProperties("partition-cluster-1", null, new HashMap<>(), new HashSet<>(), new RangeBasedPartitionProperties("id=(\\d+)", 0, 100, 2))); _state.listenToService("partition-service-1", new NullStateListenerCallback()); _serviceRegistry.put("partition-service-1", new ServiceProperties("partition-service-1", "partition-cluster-1", "/partition-test", Arrays.asList("degraderV3"), Collections.<String, Object>emptyMap(), null, null, schemes, null)); assertEquals(listener.scheme, "http"); assertTrue(listener.strategy instanceof DegraderLoadBalancerStrategyV3); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void longlivedTokenizationJwt73() throws Exception { JwtClaims claims = ClaimsUtil.getTestClaims("steve", "EMPLOYEE", "f7d42348-c647-4efb-a52d-4c5787421e73", Arrays.asList("token.r", "token.w"), "user"); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***Long lived token for tokenizaiton***: " + jwt); }
@Override public void resolveArtifacts( ArtifactApi.ResolveArtifactsRequest request, StreamObserver<ArtifactApi.ResolveArtifactsResponse> responseObserver) { // Trying out artifact services in order till one succeeds. // If all services fail, re-raises the last error. // TODO: when all services fail, return an aggregated error with errors from all services. RuntimeException lastError = null; for (Endpoints.ApiServiceDescriptor endpoint : endpoints) { ArtifactResolver artifactResolver = this.artifactResolver != null ? this.artifactResolver : new EndpointBasedArtifactResolver(endpoint.getUrl()); try { responseObserver.onNext(artifactResolver.resolveArtifacts(request)); responseObserver.onCompleted(); return; } catch (RuntimeException exn) { lastError = exn; } finally { if (this.artifactResolver == null) { artifactResolver.shutdown(); } } } if (lastError == null) { lastError = new RuntimeException( "Could not successfully resolve the artifact for the request " + request); } throw lastError; }
@Test public void testArtifactResolveSecondEndpoint() { Path path = Paths.get("dummypath"); RunnerApi.ArtifactInformation fileArtifact = RunnerApi.ArtifactInformation.newBuilder() .setTypeUrn(ArtifactRetrievalService.FILE_ARTIFACT_URN) .setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(path.toString()) .build() .toByteString()) .setRoleUrn("") .build(); ArtifactApi.ResolveArtifactsRequest request = ArtifactApi.ResolveArtifactsRequest.newBuilder().addArtifacts(fileArtifact).build(); StreamObserver<ResolveArtifactsResponse> responseObserver = Mockito.mock(StreamObserver.class); Mockito.when(artifactResolver.resolveArtifacts(request)) .thenThrow(new RuntimeException("Failing artifact resolve")) .thenReturn(ArtifactApi.ResolveArtifactsResponse.newBuilder().build()); artifactService.resolveArtifacts(request, responseObserver); Mockito.verify(artifactResolver, Mockito.times(2)).resolveArtifacts(request); }
@Override public List<TableId> discoverDataCollections(JdbcSourceConfig sourceConfig) { try (JdbcConnection jdbc = openJdbcConnection(sourceConfig)) { return TableDiscoveryUtils.listTables( // there is always a single database provided sourceConfig.getDatabaseList().get(0), jdbc, sourceConfig.getTableFilters()); } catch (SQLException e) { throw new FlinkRuntimeException("Error to discover tables: " + e.getMessage(), e); } }
@Test public void testDiscoverDataCollectionsInMultiDatabases() { // initial two databases in same postgres instance customDatabase.createAndInitialize(); inventoryDatabase.createAndInitialize(); // get table named 'customer.customers' from customDatabase which is actual in // inventoryDatabase PostgresSourceConfigFactory configFactoryOfCustomDatabase = getMockPostgresSourceConfigFactory(customDatabase, "customer", "customers", 10); PostgresDialect dialectOfcustomDatabase = new PostgresDialect(configFactoryOfCustomDatabase.create(0)); List<TableId> tableIdsOfcustomDatabase = dialectOfcustomDatabase.discoverDataCollections( configFactoryOfCustomDatabase.create(0)); Assert.assertEquals(tableIdsOfcustomDatabase.get(0).toString(), "customer.customers"); // get table named 'inventory.products' from customDatabase which is actual in // inventoryDatabase // however, nothing is found PostgresSourceConfigFactory configFactoryOfInventoryDatabase = getMockPostgresSourceConfigFactory(inventoryDatabase, "inventory", "products", 10); PostgresDialect dialectOfInventoryDatabase = new PostgresDialect(configFactoryOfInventoryDatabase.create(0)); List<TableId> tableIdsOfInventoryDatabase = dialectOfInventoryDatabase.discoverDataCollections( configFactoryOfInventoryDatabase.create(0)); Assert.assertEquals(tableIdsOfInventoryDatabase.get(0).toString(), "inventory.products"); // get table named 'customer.customers' from customDatabase which is actual not in // customDatabase // however, something is found PostgresSourceConfigFactory configFactoryOfInventoryDatabase2 = getMockPostgresSourceConfigFactory(inventoryDatabase, "customer", "customers", 10); PostgresDialect dialectOfInventoryDatabase2 = new PostgresDialect(configFactoryOfInventoryDatabase2.create(0)); List<TableId> tableIdsOfInventoryDatabase2 = dialectOfInventoryDatabase2.discoverDataCollections( configFactoryOfInventoryDatabase2.create(0)); Assert.assertTrue(tableIdsOfInventoryDatabase2.isEmpty()); }
public <T> void putForm(String url, Header header, Query query, Map<String, String> bodyValues, Type responseType, Callback<T> callback) { execute(url, HttpMethod.PUT, new RequestHttpEntity(header.setContentType(MediaType.APPLICATION_FORM_URLENCODED), query, bodyValues), responseType, callback); }
@Test void testPutForm() throws Exception { Header header = Header.newInstance().setContentType(MediaType.APPLICATION_XML); restTemplate.putForm(TEST_URL, header, new HashMap<>(), String.class, mockCallback); verify(requestClient).execute(any(), eq("PUT"), any(), any(), eq(mockCallback)); assertEquals(MediaType.APPLICATION_FORM_URLENCODED, header.getValue(HttpHeaderConsts.CONTENT_TYPE)); }
public static String post(HttpURLConnection con, Map<String, String> headers, String requestBody, Integer connectTimeoutMs, Integer readTimeoutMs) throws IOException, UnretryableException { handleInput(con, headers, requestBody, connectTimeoutMs, readTimeoutMs); return handleOutput(con); }
@Test public void testEmptyResponse() throws IOException { HttpURLConnection mockedCon = createHttpURLConnection(""); assertThrows(IOException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); }
public final void tag(I input, ScopedSpan span) { if (input == null) throw new NullPointerException("input == null"); if (span == null) throw new NullPointerException("span == null"); if (span.isNoop()) return; tag(span, input, span.context()); }
@Test void tag_mutableSpan_empty() { when(parseValue.apply(input, context)).thenReturn(""); tag.tag(input, context, mutableSpan); verify(parseValue).apply(input, context); verifyNoMoreInteractions(parseValue); // doesn't parse twice MutableSpan expected = new MutableSpan(); expected.tag("key", ""); assertThat(mutableSpan).isEqualTo(expected); }
@Override public HttpResponseOutputStream<Node> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest() .directS3Upload(true) .timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null) .timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null) .parentId(Long.parseLong(nodeid.getVersionId(file.getParent()))) .name(file.getName()); final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient()) .createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY); if(log.isDebugEnabled()) { log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse)); } final MultipartOutputStream proxy = new MultipartOutputStream(createFileUploadResponse, file, status); return new HttpResponseOutputStream<Node>(new MemorySegementingOutputStream(proxy, partsize), new SDSAttributesAdapter(session), status) { @Override public Node getStatus() { return proxy.getResult(); } }; } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file); } }
@Test public void testWriteZeroLength() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final TransferStatus status = new TransferStatus(); final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final SDSDirectS3MultipartWriteFeature writer = new SDSDirectS3MultipartWriteFeature(session, nodeid); final StatusOutputStream<Node> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new NullInputStream(0L), out); assertEquals(0L, out.getStatus().getSize(), 0L); assertNotNull(test.attributes().getVersionId()); assertTrue(new DefaultFindFeature(session).find(test)); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@VisibleForTesting static EnumSet<MetricType> parseMetricTypes(String typeComponent) { final String[] split = typeComponent.split(LIST_DELIMITER); if (split.length == 1 && split[0].equals("*")) { return ALL_METRIC_TYPES; } return EnumSet.copyOf( Arrays.stream(split) .map(s -> ConfigurationUtils.convertToEnum(s, MetricType.class)) .collect(Collectors.toSet())); }
@Test void testParseMetricTypesCaseIgnored() { final EnumSet<MetricType> types = DefaultMetricFilter.parseMetricTypes("meter,CoUnTeR"); assertThat(types).containsExactlyInAnyOrder(MetricType.METER, MetricType.COUNTER); }
@Override public void start( final KsqlModuleType moduleType, final Properties ksqlProperties) { final BaseSupportConfig ksqlVersionCheckerConfig = new PhoneHomeConfig(ksqlProperties, "ksql"); if (!ksqlVersionCheckerConfig.isProactiveSupportEnabled()) { log.warn(legalDisclaimerProactiveSupportDisabled()); return; } try { final KsqlVersionChecker ksqlVersionChecker = versionCheckerFactory.create( ksqlVersionCheckerConfig, moduleType, enableSettlingTime, this::isActive ); ksqlVersionChecker.init(); ksqlVersionChecker.setUncaughtExceptionHandler((t, e) -> log.error("Uncaught exception in thread '{}':", t.getName(), e)); ksqlVersionChecker.start(); final long reportIntervalMs = ksqlVersionCheckerConfig.getReportIntervalMs(); final long reportIntervalHours = reportIntervalMs / (60 * 60 * 1000); // We log at WARN level to increase the visibility of this information. log.warn(legalDisclaimerProactiveSupportEnabled(reportIntervalHours)); } catch (final Exception e) { // We catch any exceptions to prevent collateral damage to the more important broker // threads that are running in the same JVM. log.error("Failed to start KsqlVersionCheckerAgent: {}", e.getMessage()); } }
@Test public void shouldCreateKsqlVersionCheckerWithCorrectKsqlModuleType() { // When: ksqlVersionCheckerAgent.start(KsqlModuleType.SERVER, properties); // Then: verify(versionCheckerFactory).create(any(), eq(KsqlModuleType.SERVER), anyBoolean(), any()); }
public SpoutOutputCollector getCollector() { return collector; }
@Test public void testSimpleSequenceFile() throws Exception { //1) create a couple files to consume source = new Path("/tmp/hdfsspout/source"); fs.mkdirs(source); archive = new Path("/tmp/hdfsspout/archive"); fs.mkdirs(archive); Path file1 = new Path(source + "/file1.seq"); createSeqFile(fs, file1, 5); Path file2 = new Path(source + "/file2.seq"); createSeqFile(fs, file2, 5); try (AutoCloseableHdfsSpout closeableSpout = makeSpout(Configs.SEQ, SequenceFileReader.defaultFields)) { HdfsSpout spout = closeableSpout.spout; Map<String, Object> conf = getCommonConfigs(); openSpout(spout, 0, conf); // consume both files List<String> res = runSpout(spout, "r11"); assertEquals(10, res.size()); assertEquals(2, listDir(archive).size()); Path f1 = new Path(archive + "/file1.seq"); Path f2 = new Path(archive + "/file2.seq"); checkCollectorOutput_seq((MockCollector) spout.getCollector(), f1, f2); } }
@Override @PublicAPI(usage = ACCESS) public JavaClass toErasure() { return erasure; }
@Test public void erased_unbound_wildcard_is_java_lang_Object() { @SuppressWarnings("unused") class ClassWithUnboundWildcard<T extends List<?>> { } JavaWildcardType type = importWildcardTypeOf(ClassWithUnboundWildcard.class); assertThatType(type.toErasure()).matches(Object.class); }
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers, final int maxRecords, final boolean checkCrcs) { // Creating an empty ShareInFlightBatch ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition); if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(cachedBatchException); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(cachedRecordException); cachedRecordException = null; return inFlightBatch; } if (isConsumed) return inFlightBatch; initializeNextAcquired(); try { int recordsInBatch = 0; while (recordsInBatch < maxRecords) { lastRecord = nextFetchedRecord(checkCrcs); if (lastRecord == null) { // Any remaining acquired records are gaps while (nextAcquired != null) { inFlightBatch.addGap(nextAcquired.offset); nextAcquired = nextAcquiredRecord(); } break; } while (nextAcquired != null) { if (lastRecord.offset() == nextAcquired.offset) { // It's acquired, so we parse it and add it to the batch Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord, nextAcquired.deliveryCount); inFlightBatch.addRecord(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); recordsInBatch++; nextAcquired = nextAcquiredRecord(); break; } else if (lastRecord.offset() < nextAcquired.offset) { // It's not acquired, so we skip it break; } else { // It's acquired, but there's no non-control record at this offset, so it's a gap inFlightBatch.addGap(nextAcquired.offset); } nextAcquired = nextAcquiredRecord(); } } } catch (SerializationException se) { nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(se); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); } } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(e); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); } } return inFlightBatch; }
@Test public void testAcquiredRecords() { long firstMessageId = 5; int startingOffset = 0; int numRecords = 10; // Records for 0-9 // Acquiring records 0-2 and 6-8 List<ShareFetchResponseData.AcquiredRecords> acquiredRecords = new ArrayList<>(acquiredRecords(0L, 3)); acquiredRecords.addAll(acquiredRecords(6L, 3)); ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() .setRecords(newRecords(startingOffset, numRecords, firstMessageId)) .setAcquiredRecords(acquiredRecords); Deserializers<String, String> deserializers = newStringDeserializers(); ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData); List<ConsumerRecord<String, String>> records = completedFetch.fetchRecords(deserializers, 10, true).getInFlightRecords(); assertEquals(6, records.size()); // The first offset should be 0 ConsumerRecord<String, String> record = records.get(0); assertEquals(0L, record.offset()); assertEquals(Optional.of((short) 1), record.deliveryCount()); // The third offset should be 6 record = records.get(3); assertEquals(6L, record.offset()); assertEquals(Optional.of((short) 1), record.deliveryCount()); records = completedFetch.fetchRecords(deserializers, 10, true).getInFlightRecords(); assertEquals(0, records.size()); }
public static String getRandomName(int number) { int combinationIdx = number % (LEFT.length * RIGHT.length); int rightIdx = combinationIdx / LEFT.length; int leftIdx = combinationIdx % LEFT.length; String name = String.format(NAME_FORMAT, LEFT[leftIdx], RIGHT[rightIdx]); String prefix = System.getProperty(MOBY_NAMING_PREFIX); if (prefix != null) { name = prefix + "_" + name; } return name; }
@Test public void allValuesReturnedFair() { int totalCombinations = 98 * 240; // MobyNames.LEFT.length * MobyNames.RIGHT.length Map<String, AtomicInteger> namesCounts = new HashMap<>(); for (int i = 0; i < totalCombinations * 2; i++) { String randomName = MobyNames.getRandomName(i); namesCounts.computeIfAbsent(randomName, key -> new AtomicInteger(0)).incrementAndGet(); } assertEquals(totalCombinations, namesCounts.size()); assertTrue(namesCounts.keySet().stream().noneMatch(StringUtil::isNullOrEmptyAfterTrim)); for (Map.Entry<String, AtomicInteger> entry : namesCounts.entrySet()) { assertEquals(entry.getKey(), 2, entry.getValue().get()); } }
@Override public CompletableFuture<Void> createConfigMap(KubernetesConfigMap configMap) { final String configMapName = configMap.getName(); return CompletableFuture.runAsync( () -> this.internalClient .resource(configMap.getInternalResource()) .create(), kubeClientExecutorService) .exceptionally( throwable -> { throw new CompletionException( new KubernetesException( "Failed to create ConfigMap " + configMapName, throwable)); }); }
@Test void testCreateConfigMap() throws Exception { final KubernetesConfigMap configMap = buildTestingConfigMap(); this.flinkKubeClient.createConfigMap(configMap).get(); final Optional<KubernetesConfigMap> currentOpt = this.flinkKubeClient.getConfigMap(TESTING_CONFIG_MAP_NAME); assertThat(currentOpt) .hasValueSatisfying( map -> assertThat(map.getData()) .containsEntry( TESTING_CONFIG_MAP_KEY, TESTING_CONFIG_MAP_VALUE)); }
@Override public List<SmsReceiveRespDTO> parseSmsReceiveStatus(String text) { JSONArray statuses = JSONUtil.parseArray(text); // 字段参考 return convertList(statuses, status -> { JSONObject statusObj = (JSONObject) status; return new SmsReceiveRespDTO() .setSuccess("SUCCESS".equals(statusObj.getStr("report_status"))) // 是否接收成功 .setErrorCode(statusObj.getStr("errmsg")) // 状态报告编码 .setMobile(statusObj.getStr("mobile")) // 手机号 .setReceiveTime(statusObj.getLocalDateTime("user_receive_time", null)) // 状态报告时间 .setSerialNo(statusObj.getStr("sid")); // 发送序列号 }); }
@Test public void testParseSmsReceiveStatus() { // 准备参数 String text = "[\n" + " {\n" + " \"user_receive_time\": \"2015-10-17 08:03:04\",\n" + " \"nationcode\": \"86\",\n" + " \"mobile\": \"13900000001\",\n" + " \"report_status\": \"SUCCESS\",\n" + " \"errmsg\": \"DELIVRD\",\n" + " \"description\": \"用户短信送达成功\",\n" + " \"sid\": \"12345\",\n" + " \"ext\": {\"logId\":\"67890\"}\n" + " }\n" + "]"; // 调用 List<SmsReceiveRespDTO> statuses = smsClient.parseSmsReceiveStatus(text); // 断言 assertEquals(1, statuses.size()); assertTrue(statuses.get(0).getSuccess()); assertEquals("DELIVRD", statuses.get(0).getErrorCode()); assertEquals("13900000001", statuses.get(0).getMobile()); assertEquals(LocalDateTime.of(2015, 10, 17, 8, 3, 4), statuses.get(0).getReceiveTime()); assertEquals("12345", statuses.get(0).getSerialNo()); }
public static String getFilename(String path) { if (path == null) { return null; } int separatorIndex = path.lastIndexOf(FOLDER_SEPARATOR); return (separatorIndex != -1 ? path.substring(separatorIndex + 1) : path); }
@Test void testGetFilename() { // Test case 1: null path String path1 = null; String result1 = StringUtils.getFilename(path1); assertNull(result1); // Test case 2: path without separator String path2 = "myFile.txt"; String expectedResult2 = "myFile.txt"; String result2 = StringUtils.getFilename(path2); assertEquals(expectedResult2, result2); // Test case 3: path with separator String path3 = "myPath/myFile.txt"; String expectedResult3 = "myFile.txt"; String result3 = StringUtils.getFilename(path3); assertEquals(expectedResult3, result3); // Test case 4: path with multiple separators String path4 = "myPath/subPath/myFile.txt"; String expectedResult4 = "myFile.txt"; String result4 = StringUtils.getFilename(path4); assertEquals(expectedResult4, result4); }