focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void write(T record) { recordConsumer.startMessage(); try { messageWriter.writeTopLevelMessage(record); } catch (RuntimeException e) { Message m = (record instanceof Message.Builder) ? ((Message.Builder) record).build() : (Message) record; LOG.error("Cannot write message {}: {}", e.getMessage(), m); throw e; } recordConsumer.endMessage(); }
@Test public void testProto3WrappedMessageWithNullsRoundTrip() throws Exception { TestProto3.WrappedMessage.Builder msg = TestProto3.WrappedMessage.newBuilder(); msg.setWrappedFloat(FloatValue.of(3.1415f)); msg.setWrappedString(StringValue.of("Good Will Hunting")); msg.setWrappedInt32(Int32Value.of(0)); // Write them out and read them back Path tmpFilePath = TestUtils.someTemporaryFilePath(); ParquetWriter<MessageOrBuilder> writer = ProtoParquetWriter.<MessageOrBuilder>builder(tmpFilePath) .withMessage(TestProto3.WrappedMessage.class) .config(ProtoWriteSupport.PB_UNWRAP_PROTO_WRAPPERS, "true") .build(); writer.write(msg); writer.close(); List<TestProto3.WrappedMessage> gotBack = TestUtils.readMessages(tmpFilePath, TestProto3.WrappedMessage.class); TestProto3.WrappedMessage gotBackFirst = gotBack.get(0); assertFalse(gotBackFirst.hasWrappedDouble()); assertEquals(3.1415f, gotBackFirst.getWrappedFloat().getValue(), 1e-5f); // double-check that nulls are honored assertTrue(gotBackFirst.hasWrappedFloat()); assertFalse(gotBackFirst.hasWrappedInt64()); assertFalse(gotBackFirst.hasWrappedUInt64()); assertTrue(gotBackFirst.hasWrappedInt32()); assertFalse(gotBackFirst.hasWrappedUInt32()); assertEquals(0, gotBackFirst.getWrappedUInt32().getValue()); assertFalse(gotBackFirst.hasWrappedBool()); assertEquals("Good Will Hunting", gotBackFirst.getWrappedString().getValue()); assertFalse(gotBackFirst.hasWrappedBytes()); }
@Override public Collection<PluginDescriptor> findPlugins() throws IOException { if (!Files.isDirectory(pluginsRootDir)) { throw new IOException( "Plugins root directory [" + pluginsRootDir + "] does not exist!"); } try (Stream<Path> stream = Files.list(pluginsRootDir)) { return stream.filter((Path path) -> Files.isDirectory(path)) .map( FunctionUtils.uncheckedFunction( this::createPluginDescriptorForSubDirectory)) .collect(Collectors.toList()); } }
@Test void createPluginDescriptorsForDirectory() throws Exception { File rootFolder = TempDirUtils.newFolder(tempFolder); PluginFinder descriptorsFactory = new DirectoryBasedPluginFinder(rootFolder.toPath()); Collection<PluginDescriptor> actual = descriptorsFactory.findPlugins(); assertThat(actual).isEmpty(); List<File> subDirs = Stream.of("A", "B", "C") .map(s -> new File(rootFolder, s)) .collect(Collectors.toList()); for (File subDir : subDirs) { Preconditions.checkState(subDir.mkdirs()); } assertThatThrownBy(descriptorsFactory::findPlugins) .isInstanceOf(RuntimeException.class) .hasCauseInstanceOf(IOException.class); for (File subDir : subDirs) { // we create a file and another subfolder to check that they are ignored Preconditions.checkState(new File(subDir, "ignore-test.zip").createNewFile()); Preconditions.checkState(new File(subDir, "ignore-dir").mkdirs()); } assertThatThrownBy(descriptorsFactory::findPlugins) .isInstanceOf(RuntimeException.class) .hasCauseInstanceOf(IOException.class); List<PluginDescriptor> expected = new ArrayList<>(3); for (int i = 0; i < subDirs.size(); ++i) { File subDir = subDirs.get(i); URL[] jarURLs = new URL[i + 1]; for (int j = 0; j <= i; ++j) { File file = new File(subDir, "jar-file-" + j + ".jar"); Preconditions.checkState(file.createNewFile()); jarURLs[j] = file.toURI().toURL(); } Arrays.sort(jarURLs, Comparator.comparing(URL::toString)); expected.add(new PluginDescriptor(subDir.getName(), jarURLs, new String[0])); } actual = descriptorsFactory.findPlugins(); assertThat(equalsIgnoreOrder(expected, new ArrayList<>(actual))).isTrue(); }
@Override public int compare(String s1, String s2) { if (s1 == s2) { // NOSONAR false-positive: Compare Objects With Equals return 0; } int h1 = s1.hashCode(); int h2 = s2.hashCode(); if (h1 < h2) { return -1; } else if (h1 > h2) { return 1; } else { return s1.compareTo(s2); } }
@Test public void sameObject() { String s1 = "a"; String s2 = s1; assertThat(s1).isSameAs(s2); assertThat(compare(s1, s2)).isZero(); assertThat(compare(s1, s2)).isZero(); }
@Override @MethodNotAvailable public LocalMapStats getLocalMapStats() { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testGetLocalMapStats() { adapter.getLocalMapStats(); }
@Override public FetchedAppReport getApplicationReport(ApplicationId appId) throws YarnException, IOException { SubClusterId scid = federationFacade.getApplicationHomeSubCluster(appId); createSubclusterIfAbsent(scid); ApplicationClientProtocol applicationsManager = subClusters.get(scid).getRight(); return super.getApplicationReport(applicationsManager, appId); }
@Test public void testFetchReportAHSEnabled() throws YarnException, IOException { testHelper(true); fetcher.getApplicationReport(appId1); fetcher.getApplicationReport(appId2); Mockito.verify(history, Mockito.times(2)) .getApplicationReport(Mockito.any(GetApplicationReportRequest.class)); Mockito.verify(appManager1, Mockito.times(1)) .getApplicationReport(Mockito.any(GetApplicationReportRequest.class)); Mockito.verify(appManager2, Mockito.times(1)) .getApplicationReport(Mockito.any(GetApplicationReportRequest.class)); }
public WorkflowInstanceActionResponse unblock( String workflowId, long workflowInstanceId, long workflowRunId, User caller) { WorkflowInstance instance = loadInstanceForAction( workflowId, workflowInstanceId, workflowRunId, Actions.WorkflowInstanceAction.UNBLOCK); LOG.debug("Unblocking workflow instance {} by user [{}]", instance.getIdentity(), caller); if (instance.getStatus() != WorkflowInstance.Status.FAILED) { throw new MaestroInvalidStatusException( "Cannot unblock a workflow instance %s from status [%s] as it is not FAILED status", instance.getIdentity(), instance.getStatus()); } TimelineEvent event = TimelineActionEvent.builder() .action(Actions.WorkflowAction.UNBLOCK) .author(caller) .reason("[API] call to UNBLOCK a failed workflow instance run.") .build(); boolean updated = instanceDao.tryUnblockFailedWorkflowInstance( workflowId, workflowInstanceId, instance.getWorkflowRunId(), event); workflowHelper.publishStartWorkflowEvent(workflowId, updated); return WorkflowInstanceActionResponse.from(instance, event, updated); }
@Test public void testInvalidUnblock() { when(instance.getStatus()).thenReturn(WorkflowInstance.Status.FAILED); when(workflowDao.getRunStrategy("test-workflow")) .thenReturn(RunStrategy.create("STRICT_SEQUENTIAL")); when(instance.getStatus()).thenReturn(WorkflowInstance.Status.STOPPED); AssertHelper.assertThrows( "can only unblock failed instance", MaestroInvalidStatusException.class, "from status [STOPPED] as it is not FAILED status", () -> actionHandler.unblock("test-workflow", 1, 1, user)); }
public static ConnectToSqlTypeConverter connectToSqlConverter() { return CONNECT_TO_SQL_CONVERTER; }
@Test public void shouldGetSqlTypeForEveryLogicalType() { SQL_TO_LOGICAL.inverse().forEach((logical, sqlType) -> { assertThat(SchemaConverters.connectToSqlConverter().toSqlType(logical), is(sqlType)); }); }
public void completeSegmentOperations(String tableNameWithType, SegmentMetadata segmentMetadata, FileUploadType uploadType, @Nullable URI finalSegmentLocationURI, File segmentFile, @Nullable String sourceDownloadURIStr, String segmentDownloadURIStr, @Nullable String crypterName, long segmentSizeInBytes, boolean enableParallelPushProtection, boolean allowRefresh, HttpHeaders headers) throws Exception { String segmentName = segmentMetadata.getName(); boolean refreshOnly = Boolean.parseBoolean(headers.getHeaderString(FileUploadDownloadClient.CustomHeaders.REFRESH_ONLY)); ZNRecord existingSegmentMetadataZNRecord = _pinotHelixResourceManager.getSegmentMetadataZnRecord(tableNameWithType, segmentName); if (existingSegmentMetadataZNRecord != null && shouldProcessAsNewSegment(tableNameWithType, segmentName, existingSegmentMetadataZNRecord, enableParallelPushProtection)) { LOGGER.warn("Removing segment ZK metadata (recovering from previous upload failure) for table: {}, segment: {}", tableNameWithType, segmentName); Preconditions.checkState(_pinotHelixResourceManager.removeSegmentZKMetadata(tableNameWithType, segmentName), "Failed to remove segment ZK metadata for table: %s, segment: %s", tableNameWithType, segmentName); existingSegmentMetadataZNRecord = null; } if (existingSegmentMetadataZNRecord == null) { // Add a new segment if (refreshOnly) { throw new ControllerApplicationException(LOGGER, String.format("Cannot refresh non-existing segment: %s for table: %s", segmentName, tableNameWithType), Response.Status.GONE); } LOGGER.info("Adding new segment: {} to table: {}", segmentName, tableNameWithType); processNewSegment(tableNameWithType, segmentMetadata, uploadType, finalSegmentLocationURI, segmentFile, sourceDownloadURIStr, segmentDownloadURIStr, crypterName, segmentSizeInBytes, enableParallelPushProtection, headers); } else { // Refresh an existing segment if (!allowRefresh) { // We cannot perform this check up-front in UploadSegment API call. If a segment doesn't exist during the check // done up-front but ends up getting created before the check here, we could incorrectly refresh an existing // segment. throw new ControllerApplicationException(LOGGER, String.format("Segment: %s already exists in table: %s. Refresh not permitted.", segmentName, tableNameWithType), Response.Status.CONFLICT); } LOGGER.info("Segment: {} already exists in table: {}, refreshing it", segmentName, tableNameWithType); processExistingSegment(tableNameWithType, segmentMetadata, uploadType, existingSegmentMetadataZNRecord, finalSegmentLocationURI, segmentFile, sourceDownloadURIStr, segmentDownloadURIStr, crypterName, segmentSizeInBytes, enableParallelPushProtection, headers); } }
@Test public void testMetadataUploadType() throws Exception { String segmentName = "metadataTest"; FileUtils.deleteQuietly(TEMP_DIR); ZKOperator zkOperator = new ZKOperator(_resourceManager, mock(ControllerConf.class), mock(ControllerMetrics.class)); SegmentMetadata segmentMetadata = mock(SegmentMetadata.class); when(segmentMetadata.getName()).thenReturn(segmentName); when(segmentMetadata.getCrc()).thenReturn("12345"); when(segmentMetadata.getIndexCreationTime()).thenReturn(123L); HttpHeaders httpHeaders = mock(HttpHeaders.class); File segmentTar = generateSegment(); String sourceDownloadURIStr = segmentTar.toURI().toString(); File segmentFile = new File("metadataOnly"); // with finalSegmentLocation not null File finalSegmentLocation = new File(DATA_DIR, segmentName); Assert.assertFalse(finalSegmentLocation.exists()); zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.METADATA, finalSegmentLocation.toURI(), segmentFile, sourceDownloadURIStr, "downloadUrl", "crypter", 10, true, true, httpHeaders); Assert.assertTrue(finalSegmentLocation.exists()); Assert.assertTrue(segmentTar.exists()); checkSegmentZkMetadata(segmentName, 12345L, 123L); _resourceManager.deleteSegment(OFFLINE_TABLE_NAME, segmentName); // Wait for the segment Zk entry to be deleted. TestUtils.waitForCondition(aVoid -> { SegmentZKMetadata segmentZKMetadata = _resourceManager.getSegmentZKMetadata(OFFLINE_TABLE_NAME, segmentName); return segmentZKMetadata == null; }, 30_000L, "Failed to delete segmentZkMetadata."); FileUtils.deleteQuietly(DATA_DIR); // with finalSegmentLocation null zkOperator.completeSegmentOperations(OFFLINE_TABLE_NAME, segmentMetadata, FileUploadType.METADATA, null, segmentFile, sourceDownloadURIStr, "downloadUrl", "crypter", 10, true, true, httpHeaders); Assert.assertFalse(finalSegmentLocation.exists()); Assert.assertTrue(segmentTar.exists()); checkSegmentZkMetadata(segmentName, 12345L, 123L); }
@Override public void notifyCheckpointComplete(long checkpointId) throws Exception { this.minHeap.add(checkpointId); if (this.minHeap.size() <= this.lsnCommitCheckpointsDelay) { LOG.info("Pending checkpoints '{}'.", this.minHeap); return; } final long checkpointIdToCommit = this.minHeap.poll(); LOG.info( "Pending checkpoints '{}', to be committed checkpoint id '{}'.", this.minHeap, checkpointIdToCommit); // After all snapshot splits are finished, update stream split's metadata and reset start // offset, which maybe smaller than before. // In case that new start-offset of stream split has been recycled, don't commit offset // during new table added phase. if (isCommitOffset()) { super.notifyCheckpointComplete(checkpointIdToCommit); } }
@Test void testNotifyCheckpointWindowSizeDefault() throws Exception { final PostgresSourceReader reader = createReader(3); final List<Long> completedCheckpointIds = new ArrayList<>(); MockPostgresDialect.setNotifyCheckpointCompleteCallback( id -> completedCheckpointIds.add(id)); reader.notifyCheckpointComplete(103L); assertThat(completedCheckpointIds).isEmpty(); reader.notifyCheckpointComplete(102L); assertThat(completedCheckpointIds).isEmpty(); reader.notifyCheckpointComplete(101L); assertThat(completedCheckpointIds).isEmpty(); reader.notifyCheckpointComplete(104L); assertThat(completedCheckpointIds).containsExactly(101L); }
public static JSONObject parseObj(String jsonStr) { return new JSONObject(jsonStr); }
@Test public void toJsonStrTest2() { final Map<String, Object> model = new HashMap<>(); model.put("mobile", "17610836523"); model.put("type", 1); final Map<String, Object> data = new HashMap<>(); data.put("model", model); data.put("model2", model); final JSONObject jsonObject = JSONUtil.parseObj(data); assertTrue(jsonObject.containsKey("model")); assertEquals(1, jsonObject.getJSONObject("model").getInt("type").intValue()); assertEquals("17610836523", jsonObject.getJSONObject("model").getStr("mobile")); // assertEquals("{\"model\":{\"type\":1,\"mobile\":\"17610836523\"}}", jsonObject.toString()); }
@SuppressWarnings("MethodLength") public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { messageHeaderDecoder.wrap(buffer, offset); final int templateId = messageHeaderDecoder.templateId(); final int schemaId = messageHeaderDecoder.schemaId(); if (schemaId != MessageHeaderDecoder.SCHEMA_ID) { if (listenerExtension != null) { listenerExtension.onExtensionMessage( messageHeaderDecoder.blockLength(), templateId, schemaId, messageHeaderDecoder.version(), buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, length - MessageHeaderDecoder.ENCODED_LENGTH); return; } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); } switch (templateId) { case SessionMessageHeaderDecoder.TEMPLATE_ID: { sessionMessageHeaderDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionMessageHeaderDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onMessage( sessionId, sessionMessageHeaderDecoder.timestamp(), buffer, offset + SESSION_HEADER_LENGTH, length - SESSION_HEADER_LENGTH, header); } break; } case SessionEventDecoder.TEMPLATE_ID: { sessionEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onSessionEvent( sessionEventDecoder.correlationId(), sessionId, sessionEventDecoder.leadershipTermId(), sessionEventDecoder.leaderMemberId(), sessionEventDecoder.code(), sessionEventDecoder.detail()); } break; } case NewLeaderEventDecoder.TEMPLATE_ID: { newLeaderEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = newLeaderEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onNewLeader( sessionId, newLeaderEventDecoder.leadershipTermId(), newLeaderEventDecoder.leaderMemberId(), newLeaderEventDecoder.ingressEndpoints()); } break; } case AdminResponseDecoder.TEMPLATE_ID: { adminResponseDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = adminResponseDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { final long correlationId = adminResponseDecoder.correlationId(); final AdminRequestType requestType = adminResponseDecoder.requestType(); final AdminResponseCode responseCode = adminResponseDecoder.responseCode(); final String message = adminResponseDecoder.message(); final int payloadOffset = adminResponseDecoder.offset() + AdminResponseDecoder.BLOCK_LENGTH + AdminResponseDecoder.messageHeaderLength() + message.length() + AdminResponseDecoder.payloadHeaderLength(); final int payloadLength = adminResponseDecoder.payloadLength(); listener.onAdminResponse( sessionId, correlationId, requestType, responseCode, message, buffer, payloadOffset, payloadLength); } break; } default: break; } }
@Test void onFragmentShouldInvokeOnSessionEventCallbackIfSessionIdMatches() { final int offset = 8; final long clusterSessionId = 42; final long correlationId = 777; final long leadershipTermId = 6; final int leaderMemberId = 3; final EventCode eventCode = EventCode.REDIRECT; final int version = 18; final String eventDetail = "Event details"; sessionEventEncoder .wrapAndApplyHeader(buffer, offset, messageHeaderEncoder) .clusterSessionId(clusterSessionId) .correlationId(correlationId) .leadershipTermId(leadershipTermId) .leaderMemberId(leaderMemberId) .code(eventCode) .version(version) .detail(eventDetail); final EgressListener egressListener = mock(EgressListener.class); final Header header = new Header(1, 3); final EgressAdapter adapter = new EgressAdapter(egressListener, clusterSessionId, mock(Subscription.class), 10); adapter.onFragment(buffer, offset, sessionEventEncoder.encodedLength(), header); verify(egressListener).onSessionEvent( correlationId, clusterSessionId, leadershipTermId, leaderMemberId, eventCode, eventDetail); verifyNoMoreInteractions(egressListener); }
public Node parse() throws ScanException { if (tokenList == null || tokenList.isEmpty()) return null; return E(); }
@Test public void literalWithAccolade1() throws ScanException { Tokenizer tokenizer = new Tokenizer("%x{a}"); Parser parser = new Parser(tokenizer.tokenize()); Node node = parser.parse(); Node witness = new Node(Node.Type.LITERAL, "%x"); Node t = witness.next = new Node(Node.Type.LITERAL, "{"); t.next = new Node(Node.Type.LITERAL, "a"); t = t.next; t.next = new Node(Node.Type.LITERAL, "}"); assertEquals(witness, node); }
public void logReplicationEnded( final int memberId, final String purpose, final String channel, final long srcRecordingId, final long dstRecordingId, final long position, final boolean hasSynced) { final int length = ClusterEventEncoder.replicationEndedLength(purpose, channel); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(REPLICATION_ENDED.toEventCodeId(), encodedLength); if (index > 0) { try { ClusterEventEncoder.encodeReplicationEnded( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, memberId, purpose, channel, srcRecordingId, dstRecordingId, position, hasSynced); } finally { ringBuffer.commit(index); } } }
@Test void logReplicationEnded() { final int memberId = 222; final String purpose = "STANDBY_SNAPSHOT"; final String channel = "aeron:udp?endpoint=localhost:9090"; final long srcRecordingId = 234; final long dstRecordingId = 8435; final long position = 982342; final boolean hasSynced = true; final int offset = 64; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); logger.logReplicationEnded( memberId, purpose, channel, srcRecordingId, dstRecordingId, position, hasSynced); verifyLogHeader( logBuffer, offset, REPLICATION_ENDED.toEventCodeId(), replicationEndedLength(purpose, channel), replicationEndedLength(purpose, channel)); final int index = encodedMsgOffset(offset) + LOG_HEADER_LENGTH; assertEquals(srcRecordingId, logBuffer.getLong(index, LITTLE_ENDIAN)); assertEquals(dstRecordingId, logBuffer.getLong(index + SIZE_OF_LONG, LITTLE_ENDIAN)); assertEquals(position, logBuffer.getLong(index + (2 * SIZE_OF_LONG), LITTLE_ENDIAN)); assertEquals(memberId, logBuffer.getInt(index + (3 * SIZE_OF_LONG), LITTLE_ENDIAN)); assertEquals(1, logBuffer.getByte(index + (3 * SIZE_OF_LONG) + (SIZE_OF_INT))); final int purposeIndex = index + (3 * SIZE_OF_LONG) + (SIZE_OF_INT) + (SIZE_OF_BYTE); assertEquals(purpose, logBuffer.getStringAscii(purposeIndex)); final int channelIndex = purposeIndex + SIZE_OF_INT + purpose.length(); assertEquals(channel, logBuffer.getStringAscii(channelIndex, LITTLE_ENDIAN)); final StringBuilder sb = new StringBuilder(); ClusterEventDissector.dissectReplicationEnded( REPLICATION_ENDED, logBuffer, encodedMsgOffset(offset), sb); final String expectedMessagePattern = "\\[[0-9]+\\.[0-9]+] CLUSTER: REPLICATION_ENDED \\[86/86]: memberId=222 " + "purpose=STANDBY_SNAPSHOT channel=aeron:udp\\?endpoint=localhost:9090 srcRecordingId=234 " + "dstRecordingId=8435 position=982342 hasSynced=true"; assertThat(sb.toString(), Matchers.matchesPattern(expectedMessagePattern)); }
@Override public AppResponse process(Flow flow, ActivateWithCodeRequest request) throws SharedServiceClientException { digidClient.remoteLog("1092", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); if (appAuthenticator.getCreatedAt().isBefore(ZonedDateTime.now().minusDays(Integer.parseInt(appAuthenticator.getGeldigheidstermijn())))) { digidClient.remoteLog("90", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); return new EnterActivationResponse("expired", Map.of(DAYS_VALID, Integer.valueOf(appAuthenticator.getGeldigheidstermijn()))); } if (correctActivationCode(request.getActivationCode()) && digidClient.activateAccount(appSession.getAccountId(), appAuthenticator.getIssuerType()).get(lowerUnderscore(STATUS)).equals("OK")) { ((ActivationFlow) flow).activateApp(appAuthenticator, appSession); attemptService.removeAttemptsForAppAuthenticator(appAuthenticator, "activation"); return new OkResponse(); } else if (attemptService.registerFailedAttempt(appAuthenticator, "activation")) { digidClient.remoteLog("87", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); if(appAuthenticator.getStatus().equals("pending")) appAuthenticatorService.destroyExistingAppsByInstanceId(appAuthenticator.getInstanceId()); appSession.setState("CANCELLED"); appSessionService.save(appSession); setValid(false); return new StatusResponse(BLOCKED); } else { digidClient.remoteLog("88", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId())); var letterSent = digidClient.letterSendDate((appSession.getRegistrationId())); return new EnterActivationResponse(INVALID, Map.of(REMAINING_ATTEMPTS, attemptService.remainingAttempts(appAuthenticator, "activation"), DATE_LETTER_SENT, letterSent.get("date"))); } }
@Test void activationCodeBlockedPendingResponse() throws SharedServiceClientException { //given mockedAppAuthenticator.setCreatedAt(ZonedDateTime.now()); mockedAppAuthenticator.setActivationCode("3"); mockedAppAuthenticator.setStatus("pending"); mockedAppAuthenticator.setInstanceId("test"); when(attemptService.registerFailedAttempt(mockedAppAuthenticator, "activation")).thenReturn(true); mockedAppAuthenticator.setStatus("pending"); //when AppResponse appResponse = enterActivationCode.process(mockedFlow, activateWithCodeRequest); //then verify(appAuthenticatorService, times(1)).destroyExistingAppsByInstanceId(mockedAppAuthenticator.getInstanceId()); assertTrue(appResponse instanceof StatusResponse); assertEquals("BLOCKED", ((StatusResponse) appResponse).getStatus()); }
public Controller build() { if (nowSupplier == null) { nowSupplier = Instant::now; } if (minDelay == null || minDelay.isNegative() || minDelay.isZero()) { minDelay = Duration.ofMillis(5); } if (maxDelay == null || maxDelay.isNegative() || maxDelay.isZero()) { maxDelay = Duration.ofSeconds(1000); } Assert.isTrue(minDelay.compareTo(maxDelay) <= 0, "Min delay must be less than or equal to max delay"); Assert.notNull(extension, "Extension must not be null"); Assert.notNull(reconciler, "Reconciler must not be null"); var queue = new DefaultQueue<Request>(nowSupplier, minDelay); var extensionMatchers = WatcherExtensionMatchers.builder(client, extension.groupVersionKind()) .onAddMatcher(onAddMatcher) .onUpdateMatcher(onUpdateMatcher) .onDeleteMatcher(onDeleteMatcher) .build(); var watcher = new ExtensionWatcher(queue, extensionMatchers); var synchronizer = new RequestSynchronizer(syncAllOnStart, client, extension, watcher, determineSyncAllListOptions()); return new DefaultController<>(name, reconciler, queue, synchronizer, minDelay, maxDelay, workerCount); }
@Test void buildTest() { assertThrows(IllegalArgumentException.class, () -> new ControllerBuilder(new FakeReconciler(), client) .build(), "Extension must not be null"); assertNotNull(fakeBuilder().build()); assertNotNull(fakeBuilder() .syncAllOnStart(true) .nowSupplier(Instant::now) .minDelay(Duration.ofMillis(5)) .maxDelay(Duration.ofSeconds(1000)) .build()); assertNotNull(fakeBuilder() .syncAllOnStart(true) .minDelay(Duration.ofMillis(5)) .maxDelay(Duration.ofSeconds(1000)) .onAddMatcher(null) .onUpdateMatcher(null) .onDeleteMatcher(null) .build() ); }
public Cookie decode(String header) { final int headerLen = checkNotNull(header, "header").length(); if (headerLen == 0) { return null; } CookieBuilder cookieBuilder = null; loop: for (int i = 0;;) { // Skip spaces and separators. for (;;) { if (i == headerLen) { break loop; } char c = header.charAt(i); if (c == ',') { // Having multiple cookies in a single Set-Cookie header is // deprecated, modern browsers only parse the first one break loop; } else if (c == '\t' || c == '\n' || c == 0x0b || c == '\f' || c == '\r' || c == ' ' || c == ';') { i++; continue; } break; } int nameBegin = i; int nameEnd; int valueBegin; int valueEnd; for (;;) { char curChar = header.charAt(i); if (curChar == ';') { // NAME; (no value till ';') nameEnd = i; valueBegin = valueEnd = -1; break; } else if (curChar == '=') { // NAME=VALUE nameEnd = i; i++; if (i == headerLen) { // NAME= (empty value, i.e. nothing after '=') valueBegin = valueEnd = 0; break; } valueBegin = i; // NAME=VALUE; int semiPos = header.indexOf(';', i); valueEnd = i = semiPos > 0 ? semiPos : headerLen; break; } else { i++; } if (i == headerLen) { // NAME (no value till the end of string) nameEnd = headerLen; valueBegin = valueEnd = -1; break; } } if (valueEnd > 0 && header.charAt(valueEnd - 1) == ',') { // old multiple cookies separator, skipping it valueEnd--; } if (cookieBuilder == null) { // cookie name-value pair DefaultCookie cookie = initCookie(header, nameBegin, nameEnd, valueBegin, valueEnd); if (cookie == null) { return null; } cookieBuilder = new CookieBuilder(cookie, header); } else { // cookie attribute cookieBuilder.appendAttribute(nameBegin, nameEnd, valueBegin, valueEnd); } } return cookieBuilder != null ? cookieBuilder.cookie() : null; }
@Test public void testDecodingValueWithCommaFails() { String source = "UserCookie=timeZoneName=(GMT+04:00) Moscow, St. Petersburg, Volgograd&promocode=&region=BE;" + " expires=Sat, 01-Dec-2012 10:53:31 GMT; path=/"; Cookie cookie = ClientCookieDecoder.STRICT.decode(source); assertNull(cookie); }
public void adjustFavoriteToFirst(long favoriteId) { Favorite favorite = favoriteRepository.findById(favoriteId).orElse(null); checkUserOperatePermission(favorite); String userId = favorite.getUserId(); Favorite firstFavorite = favoriteRepository.findFirstByUserIdOrderByPositionAscDataChangeCreatedTimeAsc(userId); long minPosition = firstFavorite.getPosition(); favorite.setPosition(minPosition - 1); favoriteRepository.save(favorite); }
@Test(expected = BadRequestException.class) @Sql(scripts = "/sql/favorites/favorites.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testAdjustFavoriteError() { long anotherPersonFavoriteId = 23; favoriteService.adjustFavoriteToFirst(anotherPersonFavoriteId); }
@Override protected void doProcess(ITemplateContext context, IProcessableElementTag tag, IElementTagStructureHandler structureHandler) { var commentWidget = (CommentWidget) context.getVariable( CommentEnabledVariableProcessor.COMMENT_WIDGET_OBJECT_VARIABLE); if (commentWidget == null) { structureHandler.replaceWith("", false); return; } commentWidget.render(context, tag, structureHandler); }
@Test void doProcess() { Context context = getContext(); when(applicationContext.getBean(eq(SystemConfigurableEnvironmentFetcher.class))) .thenReturn(environmentFetcher); var commentSetting = mock(SystemSetting.Comment.class); when(environmentFetcher.fetchComment()) .thenReturn(Mono.just(commentSetting)); when(commentSetting.getEnable()).thenReturn(true); when(extensionGetter.getEnabledExtensions(eq(CommentWidget.class))) .thenReturn(Flux.empty()); String result = templateEngine.process("commentWidget", context); assertThat(result).isEqualTo(""" <!DOCTYPE html> <html lang="en"> <body> <p>comment widget:</p> \s </body> </html> """); when(extensionGetter.getEnabledExtensions(eq(CommentWidget.class))) .thenReturn(Flux.just(new DefaultCommentWidget())); result = templateEngine.process("commentWidget", context); assertThat(result).isEqualTo(""" <!DOCTYPE html> <html lang="en"> <body> <p>comment widget:</p> <p>Comment in default widget</p> </body> </html> """); } static class DefaultCommentWidget implements CommentWidget { @Override public void render(ITemplateContext context, IProcessableElementTag tag, IElementTagStructureHandler structureHandler) { structureHandler.replaceWith("<p>Comment in default widget</p>", false); }
public static HostInfo buildFromEndpoint(final String endPoint) { if (Utils.isBlank(endPoint)) { return null; } final String host = getHost(endPoint); final Integer port = getPort(endPoint); if (host == null || port == null) { throw new ConfigException( String.format("Error parsing host address %s. Expected format host:port.", endPoint) ); } return new HostInfo(host, port); }
@Test public void shouldReturnNullHostInfoForEmptyEndPoint() { assertNull(HostInfo.buildFromEndpoint(" ")); }
public Expression rewrite(final Expression expression) { return new ExpressionTreeRewriter<>(new OperatorPlugin()::process) .rewrite(expression, null); }
@Test public void shouldThrowParseError() { // Given: final Expression predicate = getPredicate("SELECT * FROM orders where ROWTIME = '2017-01-01';"); when(parser.parse(any())).thenThrow(new IllegalArgumentException("it no good")); // When: final Exception e = assertThrows( IllegalArgumentException.class, () -> rewriter.rewrite(predicate) ); // Then: assertThat(e.getMessage(), containsString("it no good")); }
@Override public boolean open(final String url) { return false; }
@Test public void testOpen() { assertFalse(new DisabledBrowserLauncher().open("")); }
@Override public int hashCode() { int result = (int) major; result = 31 * result + (int) minor; result = 31 * result + (int) patch; return result; }
@Test public void testHashCode() { assertEquals(version.hashCode(), version.hashCode()); assertEquals(version.hashCode(), versionSameAttributes.hashCode()); assumeDifferentHashCodes(); assertNotEquals(version.hashCode(), versionOtherMajor.hashCode()); assertNotEquals(version.hashCode(), versionOtherMinor.hashCode()); assertNotEquals(version.hashCode(), versionOtherPath.hashCode()); }
@Subscribe public void onVarbitChanged(VarbitChanged event) { if (event.getVarbitId() == Varbits.IN_RAID) { removeVarTimer(OVERLOAD_RAID); removeGameTimer(PRAYER_ENHANCE); } if (event.getVarbitId() == Varbits.VENGEANCE_COOLDOWN && config.showVengeance()) { if (event.getValue() == 1) { createGameTimer(VENGEANCE); } else { removeGameTimer(VENGEANCE); } } if (event.getVarbitId() == Varbits.SPELLBOOK_SWAP && config.showSpellbookSwap()) { if (event.getValue() == 1) { createGameTimer(SPELLBOOK_SWAP); } else { removeGameTimer(SPELLBOOK_SWAP); } } if (event.getVarbitId() == Varbits.HEAL_GROUP_COOLDOWN && config.showHealGroup()) { if (event.getValue() == 1) { createGameTimer(HEAL_GROUP); } else { removeGameTimer(HEAL_GROUP); } } if (event.getVarbitId() == Varbits.DEATH_CHARGE_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(DEATH_CHARGE_COOLDOWN); } else { removeGameTimer(DEATH_CHARGE_COOLDOWN); } } if (event.getVarbitId() == Varbits.CORRUPTION_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(CORRUPTION_COOLDOWN); } else { removeGameTimer(CORRUPTION_COOLDOWN); } } if (event.getVarbitId() == Varbits.RESURRECT_THRALL_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(RESURRECT_THRALL_COOLDOWN); } else { removeGameTimer(RESURRECT_THRALL_COOLDOWN); } } if (event.getVarbitId() == Varbits.SHADOW_VEIL_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(SHADOW_VEIL_COOLDOWN); } else { removeGameTimer(SHADOW_VEIL_COOLDOWN); } } if (event.getVarbitId() == Varbits.WARD_OF_ARCEUUS_COOLDOWN && config.showArceuusCooldown()) { if (event.getValue() == 1) { createGameTimer(WARD_OF_ARCEUUS_COOLDOWN); } else { removeGameTimer(WARD_OF_ARCEUUS_COOLDOWN); } } if (event.getVarbitId() == Varbits.VENGEANCE_ACTIVE && config.showVengeanceActive()) { updateVarCounter(VENGEANCE_ACTIVE, event.getValue()); } if (event.getVarbitId() == Varbits.DEATH_CHARGE && config.showArceuus()) { if (event.getValue() == 1) { createGameTimer(DEATH_CHARGE, Duration.of(client.getRealSkillLevel(Skill.MAGIC), RSTimeUnit.GAME_TICKS)); } else { removeGameTimer(DEATH_CHARGE); } } if (event.getVarbitId() == Varbits.RESURRECT_THRALL && event.getValue() == 0 && config.showArceuus()) { removeGameTimer(RESURRECT_THRALL); } if (event.getVarbitId() == Varbits.SHADOW_VEIL && event.getValue() == 0 && config.showArceuus()) { removeGameTimer(SHADOW_VEIL); } if (event.getVarpId() == VarPlayer.POISON && config.showAntiPoison()) { final int poisonVarp = event.getValue(); final int tickCount = client.getTickCount(); if (poisonVarp == 0) { nextPoisonTick = -1; } else if (nextPoisonTick - tickCount <= 0) { nextPoisonTick = tickCount + POISON_TICK_LENGTH; } updateVarTimer(ANTIPOISON, event.getValue(), i -> i >= 0 || i < VENOM_VALUE_CUTOFF, i -> nextPoisonTick - tickCount + Math.abs((i + 1) * POISON_TICK_LENGTH)); updateVarTimer(ANTIVENOM, event.getValue(), i -> i >= VENOM_VALUE_CUTOFF, i -> nextPoisonTick - tickCount + Math.abs((i + 1 - VENOM_VALUE_CUTOFF) * POISON_TICK_LENGTH)); } if ((event.getVarbitId() == Varbits.NMZ_OVERLOAD_REFRESHES_REMAINING || event.getVarbitId() == Varbits.COX_OVERLOAD_REFRESHES_REMAINING) && config.showOverload()) { final int overloadVarb = event.getValue(); final int tickCount = client.getTickCount(); if (overloadVarb <= 0) { nextOverloadRefreshTick = -1; } else if (nextOverloadRefreshTick - tickCount <= 0) { nextOverloadRefreshTick = tickCount + OVERLOAD_TICK_LENGTH; } GameTimer overloadTimer = client.getVarbitValue(Varbits.IN_RAID) == 1 ? OVERLOAD_RAID : OVERLOAD; updateVarTimer(overloadTimer, overloadVarb, i -> nextOverloadRefreshTick - tickCount + (i - 1) * OVERLOAD_TICK_LENGTH); } if (event.getVarbitId() == Varbits.TELEBLOCK && config.showTeleblock()) { updateVarTimer(TELEBLOCK, event.getValue() - 100, i -> i <= 0, IntUnaryOperator.identity()); } if (event.getVarpId() == VarPlayer.CHARGE_GOD_SPELL && config.showCharge()) { updateVarTimer(CHARGE, event.getValue(), i -> i * 2); } if (event.getVarbitId() == Varbits.IMBUED_HEART_COOLDOWN && config.showImbuedHeart()) { updateVarTimer(IMBUEDHEART, event.getValue(), i -> i * 10); } if (event.getVarbitId() == Varbits.DRAGONFIRE_SHIELD_COOLDOWN && config.showDFSSpecial()) { updateVarTimer(DRAGON_FIRE_SHIELD, event.getValue(), i -> i * 8); } if (event.getVarpId() == LAST_HOME_TELEPORT && config.showHomeMinigameTeleports()) { checkTeleport(LAST_HOME_TELEPORT); } if (event.getVarpId() == LAST_MINIGAME_TELEPORT && config.showHomeMinigameTeleports()) { checkTeleport(LAST_MINIGAME_TELEPORT); } if (event.getVarbitId() == Varbits.RUN_SLOWED_DEPLETION_ACTIVE || event.getVarbitId() == Varbits.STAMINA_EFFECT || event.getVarbitId() == Varbits.RING_OF_ENDURANCE_EFFECT) { // staminaEffectActive is checked to match https://github.com/Joshua-F/cs2-scripts/blob/741271f0c3395048c1bad4af7881a13734516adf/scripts/%5Bproc%2Cbuff_bar_get_value%5D.cs2#L25 int staminaEffectActive = client.getVarbitValue(Varbits.RUN_SLOWED_DEPLETION_ACTIVE); int staminaPotionEffectVarb = client.getVarbitValue(Varbits.STAMINA_EFFECT); int enduranceRingEffectVarb = client.getVarbitValue(Varbits.RING_OF_ENDURANCE_EFFECT); final int totalStaminaEffect = staminaPotionEffectVarb + enduranceRingEffectVarb; if (staminaEffectActive == 1 && config.showStamina()) { updateVarTimer(STAMINA, totalStaminaEffect, i -> i * 10); } } if (event.getVarbitId() == Varbits.ANTIFIRE && config.showAntiFire()) { final int antifireVarb = event.getValue(); final int tickCount = client.getTickCount(); if (antifireVarb == 0) { nextAntifireTick = -1; } else if (nextAntifireTick - tickCount <= 0) { nextAntifireTick = tickCount + ANTIFIRE_TICK_LENGTH; } updateVarTimer(ANTIFIRE, antifireVarb, i -> nextAntifireTick - tickCount + (i - 1) * ANTIFIRE_TICK_LENGTH); } if (event.getVarbitId() == Varbits.SUPER_ANTIFIRE && config.showAntiFire()) { final int superAntifireVarb = event.getValue(); final int tickCount = client.getTickCount(); if (superAntifireVarb == 0) { nextSuperAntifireTick = -1; } else if (nextSuperAntifireTick - tickCount <= 0) { nextSuperAntifireTick = tickCount + SUPERANTIFIRE_TICK_LENGTH; } updateVarTimer(SUPERANTIFIRE, event.getValue(), i -> nextSuperAntifireTick - tickCount + (i - 1) * SUPERANTIFIRE_TICK_LENGTH); } if (event.getVarbitId() == Varbits.MAGIC_IMBUE && config.showMagicImbue()) { updateVarTimer(MAGICIMBUE, event.getValue(), i -> i * 10); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_ATTACK && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()) { return; } updateVarTimer(DIVINE_SUPER_ATTACK, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_STRENGTH && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue()) { return; } updateVarTimer(DIVINE_SUPER_STRENGTH, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_DEFENCE && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_COMBAT) > event.getValue() || client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue() || client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue() // When drinking a dose of moonlight potion while already under its effects, desync between // Varbits.MOONLIGHT_POTION and Varbits.DIVINE_SUPER_DEFENCE can occur, with the latter being 1 tick // greater || client.getVarbitValue(Varbits.MOONLIGHT_POTION) >= event.getValue()) { return; } if (client.getVarbitValue(Varbits.MOONLIGHT_POTION) < event.getValue()) { removeVarTimer(MOONLIGHT_POTION); } updateVarTimer(DIVINE_SUPER_DEFENCE, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_RANGING && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_BASTION) > event.getValue()) { return; } updateVarTimer(DIVINE_RANGING, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_MAGIC && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_BATTLEMAGE) > event.getValue()) { return; } updateVarTimer(DIVINE_MAGIC, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_SUPER_COMBAT && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_SUPER_ATTACK) == event.getValue()) { removeVarTimer(DIVINE_SUPER_ATTACK); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_STRENGTH) == event.getValue()) { removeVarTimer(DIVINE_SUPER_STRENGTH); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_SUPER_COMBAT, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_BASTION && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_RANGING) == event.getValue()) { removeVarTimer(DIVINE_RANGING); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_BASTION, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.DIVINE_BATTLEMAGE && config.showDivine()) { if (client.getVarbitValue(Varbits.DIVINE_MAGIC) == event.getValue()) { removeVarTimer(DIVINE_MAGIC); } if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == event.getValue()) { removeVarTimer(DIVINE_SUPER_DEFENCE); } updateVarTimer(DIVINE_BATTLEMAGE, event.getValue(), IntUnaryOperator.identity()); } if (event.getVarbitId() == Varbits.BUFF_STAT_BOOST && config.showOverload()) { updateVarTimer(SMELLING_SALTS, event.getValue(), i -> i * 25); } if (event.getVarbitId() == Varbits.MENAPHITE_REMEDY && config.showMenaphiteRemedy()) { updateVarTimer(MENAPHITE_REMEDY, event.getValue(), i -> i * 25); } if (event.getVarbitId() == Varbits.LIQUID_ADERNALINE_ACTIVE && event.getValue() == 0 && config.showLiquidAdrenaline()) { removeGameTimer(LIQUID_ADRENALINE); } if (event.getVarbitId() == Varbits.FARMERS_AFFINITY && config.showFarmersAffinity()) { updateVarTimer(FARMERS_AFFINITY, event.getValue(), i -> i * 20); } if (event.getVarbitId() == Varbits.GOD_WARS_ALTAR_COOLDOWN && config.showGodWarsAltar()) { updateVarTimer(GOD_WARS_ALTAR, event.getValue(), i -> i * 100); } if (event.getVarbitId() == Varbits.CURSE_OF_THE_MOONS && config.showCurseOfTheMoons()) { final int regionID = WorldPoint.fromLocal(client, client.getLocalPlayer().getLocalLocation()).getRegionID(); if (regionID == ECLIPSE_MOON_REGION_ID) { updateVarCounter(CURSE_OF_THE_MOONS_ECLIPSE, event.getValue()); } else { updateVarCounter(CURSE_OF_THE_MOONS_BLUE, event.getValue()); } } if (event.getVarbitId() == Varbits.COLOSSEUM_DOOM && config.showColosseumDoom()) { updateVarCounter(COLOSSEUM_DOOM, event.getValue()); } if (event.getVarbitId() == Varbits.MOONLIGHT_POTION && config.showMoonlightPotion()) { int moonlightValue = event.getValue(); // Increase the timer by 1 tick in case of desync due to drinking a dose of moonlight potion while already // under its effects. Otherwise, the timer would be 1 tick shorter than it is meant to be. if (client.getVarbitValue(Varbits.DIVINE_SUPER_DEFENCE) == moonlightValue + 1) { moonlightValue++; } updateVarTimer(MOONLIGHT_POTION, moonlightValue, IntUnaryOperator.identity()); } }
@Test public void testArceuusWardCooldown() { when(timersAndBuffsConfig.showArceuusCooldown()).thenReturn(true); VarbitChanged varbitChanged = new VarbitChanged(); varbitChanged.setVarbitId(Varbits.WARD_OF_ARCEUUS_COOLDOWN); varbitChanged.setValue(1); timersAndBuffsPlugin.onVarbitChanged(varbitChanged); ArgumentCaptor<InfoBox> captor = ArgumentCaptor.forClass(InfoBox.class); verify(infoBoxManager).addInfoBox(captor.capture()); TimerTimer infoBox = (TimerTimer) captor.getValue(); assertEquals(GameTimer.WARD_OF_ARCEUUS_COOLDOWN, infoBox.getTimer()); }
@Override public PollResult poll(long currentTimeMs) { if (memberId == null) { return PollResult.EMPTY; } // Send any pending acknowledgements before fetching more records. PollResult pollResult = processAcknowledgements(currentTimeMs); if (pollResult != null) { return pollResult; } if (!fetchMoreRecords || closing) { return PollResult.EMPTY; } Map<Node, ShareSessionHandler> handlerMap = new HashMap<>(); Map<String, Uuid> topicIds = metadata.topicIds(); for (TopicPartition partition : partitionsToFetch()) { Optional<Node> leaderOpt = metadata.currentLeader(partition).leader; if (!leaderOpt.isPresent()) { log.debug("Requesting metadata update for partition {} since current leader node is missing", partition); metadata.requestUpdate(false); continue; } Uuid topicId = topicIds.get(partition.topic()); if (topicId == null) { log.debug("Requesting metadata update for partition {} since topic ID is missing", partition); metadata.requestUpdate(false); continue; } Node node = leaderOpt.get(); if (nodesWithPendingRequests.contains(node.id())) { log.trace("Skipping fetch for partition {} because previous fetch request to {} has not been processed", partition, node.id()); } else { // if there is a leader and no in-flight requests, issue a new fetch ShareSessionHandler handler = handlerMap.computeIfAbsent(node, k -> sessionHandlers.computeIfAbsent(node.id(), n -> new ShareSessionHandler(logContext, n, memberId))); TopicIdPartition tip = new TopicIdPartition(topicId, partition); Acknowledgements acknowledgementsToSend = fetchAcknowledgementsMap.get(tip); if (acknowledgementsToSend != null) { metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); } handler.addPartitionToFetch(tip, acknowledgementsToSend); log.debug("Added fetch request for partition {} to node {}", partition, node.id()); } } Map<Node, ShareFetchRequest.Builder> builderMap = new LinkedHashMap<>(); for (Map.Entry<Node, ShareSessionHandler> entry : handlerMap.entrySet()) { builderMap.put(entry.getKey(), entry.getValue().newShareFetchBuilder(groupId, fetchConfig)); } List<UnsentRequest> requests = builderMap.entrySet().stream().map(entry -> { Node target = entry.getKey(); log.trace("Building ShareFetch request to send to node {}", target.id()); ShareFetchRequest.Builder requestBuilder = entry.getValue(); nodesWithPendingRequests.add(target.id()); BiConsumer<ClientResponse, Throwable> responseHandler = (clientResponse, error) -> { if (error != null) { handleShareFetchFailure(target, requestBuilder.data(), error); } else { handleShareFetchSuccess(target, requestBuilder.data(), clientResponse); } }; return new UnsentRequest(requestBuilder, Optional.of(target)).whenComplete(responseHandler); }).collect(Collectors.toList()); return new PollResult(requests); }
@Test public void testHeaders() { buildRequestManager(); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, 1L); builder.append(0L, "key".getBytes(), "value-1".getBytes()); Header[] headersArray = new Header[1]; headersArray[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8)); builder.append(0L, "key".getBytes(), "value-2".getBytes(), headersArray); Header[] headersArray2 = new Header[2]; headersArray2[0] = new RecordHeader("headerKey", "headerValue".getBytes(StandardCharsets.UTF_8)); headersArray2[1] = new RecordHeader("headerKey", "headerValue2".getBytes(StandardCharsets.UTF_8)); builder.append(0L, "key".getBytes(), "value-3".getBytes(), headersArray2); MemoryRecords memoryRecords = builder.build(); List<ConsumerRecord<byte[], byte[]>> records; assignFromSubscribed(singleton(tp0)); client.prepareResponse(fullFetchResponse(tip0, memoryRecords, ShareCompletedFetchTest.acquiredRecords(1L, 3), Errors.NONE)); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> recordsByPartition = fetchRecords(); records = recordsByPartition.get(tp0); assertEquals(3, records.size()); Iterator<ConsumerRecord<byte[], byte[]>> recordIterator = records.iterator(); ConsumerRecord<byte[], byte[]> record = recordIterator.next(); assertNull(record.headers().lastHeader("headerKey")); record = recordIterator.next(); assertEquals("headerValue", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8)); assertEquals("headerKey", record.headers().lastHeader("headerKey").key()); record = recordIterator.next(); assertEquals("headerValue2", new String(record.headers().lastHeader("headerKey").value(), StandardCharsets.UTF_8)); assertEquals("headerKey", record.headers().lastHeader("headerKey").key()); }
boolean valid(int nodeId, MetadataImage image) { TopicImage topicImage = image.topics().getTopic(topicIdPartition.topicId()); if (topicImage == null) { return false; // The topic has been deleted. } PartitionRegistration partition = topicImage.partitions().get(topicIdPartition.partitionId()); if (partition == null) { return false; // The partition no longer exists. } // Check if this broker is still a replica. return Replicas.contains(partition.replicas, nodeId); }
@Test public void testValidAssignment() { assertTrue(new Assignment( new TopicIdPartition(Uuid.fromString("rTudty6ITOCcO_ldVyzZYg"), 0), Uuid.fromString("rzRT8XZaSbKsP6j238zogg"), 0, NoOpRunnable.INSTANCE).valid(0, TEST_IMAGE)); }
@Override public String getExpressionString() { return expression.getExpressionString(); }
@Test void testGetExpressionString() { ExpressionParser parser = new SpelExpressionParser(); Expression defaultExpression = parser.parseExpression("'Hello World'.concat('!')"); SpringELExpression springELExpression = new SpringELExpression(defaultExpression); Assertions.assertEquals(springELExpression.getExpressionString(), "'Hello World'.concat('!')"); }
@Override public String toString() { return "DataChecksum(type=" + type + ", chunkSize=" + bytesPerChecksum + ")"; }
@Test public void testToString() { assertEquals("DataChecksum(type=CRC32, chunkSize=512)", DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512).toString()); }
@Nullable Throwable maybeError() { Object maybeError = delegate.getAttribute("error"); if (maybeError instanceof Throwable) return (Throwable) maybeError; maybeError = delegate.getAttribute(RequestDispatcher.ERROR_EXCEPTION); if (maybeError instanceof Throwable) return (Throwable) maybeError; return null; }
@Test void maybeError_badRequestAttribute() { when(request.getAttribute("error")).thenReturn(new Object()); assertThat(wrapper.maybeError()).isNull(); }
public int maxAllowedPlanningFailures() { return maxAllowedPlanningFailures; }
@Test void testMaxAllowedPlanningFailures() { ScanContext context = ScanContext.builder().maxAllowedPlanningFailures(-2).build(); assertException( context, "Cannot set maxAllowedPlanningFailures to a negative number other than -1."); }
public static UpdateRequirement fromJson(String json) { return JsonUtil.parse(json, UpdateRequirementParser::fromJson); }
@Test public void testAssertCurrentSchemaIdFromJson() { String requirementType = UpdateRequirementParser.ASSERT_CURRENT_SCHEMA_ID; int schemaId = 4; String json = String.format("{\"type\":\"%s\",\"current-schema-id\":%d}", requirementType, schemaId); UpdateRequirement expected = new UpdateRequirement.AssertCurrentSchemaID(schemaId); assertEquals(requirementType, expected, UpdateRequirementParser.fromJson(json)); }
@SuppressWarnings("unchecked") public static void removeAll() { InternalThreadLocalMap threadLocalMap = InternalThreadLocalMap.getIfSet(); if (threadLocalMap == null) { return; } try { Object v = threadLocalMap.indexedVariable(VARIABLES_TO_REMOVE_INDEX); if (v != null && v != InternalThreadLocalMap.UNSET) { Set<InternalThreadLocal<?>> variablesToRemove = (Set<InternalThreadLocal<?>>) v; InternalThreadLocal<?>[] variablesToRemoveArray = variablesToRemove.toArray(new InternalThreadLocal[0]); for (InternalThreadLocal<?> tlv : variablesToRemoveArray) { tlv.remove(threadLocalMap); } } } finally { InternalThreadLocalMap.remove(); } }
@Test void testRemoveAll() { final InternalThreadLocal<Integer> internalThreadLocal = new InternalThreadLocal<Integer>(); internalThreadLocal.set(1); Assertions.assertEquals(1, (int) internalThreadLocal.get(), "set failed"); final InternalThreadLocal<String> internalThreadLocalString = new InternalThreadLocal<String>(); internalThreadLocalString.set("value"); Assertions.assertEquals("value", internalThreadLocalString.get(), "set failed"); InternalThreadLocal.removeAll(); Assertions.assertNull(internalThreadLocal.get(), "removeAll failed!"); Assertions.assertNull(internalThreadLocalString.get(), "removeAll failed!"); }
@VisibleForTesting protected RequestInterceptorChainWrapper getInterceptorChain( final HttpServletRequest hsr) { String user = ""; if (hsr != null) { user = hsr.getRemoteUser(); } try { if (user == null || user.equals("")) { // Yarn Router user user = UserGroupInformation.getCurrentUser().getUserName(); } } catch (IOException e) { LOG.error("Cannot get user: {}", e.getMessage()); } RequestInterceptorChainWrapper chain = userPipelineMap.get(user); if (chain != null && chain.getRootInterceptor() != null) { return chain; } return initializePipeline(user); }
@Test public void testWebPipelineConcurrent() throws InterruptedException { final String user = "test1"; /* * ClientTestThread is a thread to simulate a client request to get a * RESTRequestInterceptor for the user. */ class ClientTestThread extends Thread { private RESTRequestInterceptor interceptor; @Override public void run() { try { interceptor = pipeline(); } catch (IOException | InterruptedException e) { e.printStackTrace(); } } private RESTRequestInterceptor pipeline() throws IOException, InterruptedException { return UserGroupInformation.createRemoteUser(user).doAs( new PrivilegedExceptionAction<RESTRequestInterceptor>() { @Override public RESTRequestInterceptor run() throws Exception { RequestInterceptorChainWrapper wrapper = getInterceptorChain(user); RESTRequestInterceptor interceptor = wrapper.getRootInterceptor(); Assert.assertNotNull(interceptor); LOG.info("init web interceptor success for user" + user); return interceptor; } }); } } /* * We start the first thread. It should not finish initing a chainWrapper * before the other thread starts. In this way, the second thread can * init at the same time of the first one. In the end, we validate that * the 2 threads get the same chainWrapper without going into error. */ ClientTestThread client1 = new ClientTestThread(); ClientTestThread client2 = new ClientTestThread(); client1.start(); client2.start(); client1.join(); client2.join(); Assert.assertNotNull(client1.interceptor); Assert.assertNotNull(client2.interceptor); Assert.assertSame(client1.interceptor, client2.interceptor); }
public ProviderBuilder codec(String codec) { this.codec = codec; return getThis(); }
@Test void codec() { ProviderBuilder builder = ProviderBuilder.newBuilder(); builder.codec("mockcodec"); Assertions.assertEquals("mockcodec", builder.build().getCodec()); }
@Override public boolean isActivityAutoTrackAppViewScreenIgnored(Class<?> activity) { return true; }
@Test public void isActivityAutoTrackAppViewScreenIgnored() { mSensorsAPI.ignoreAutoTrackActivity(EmptyActivity.class); Assert.assertTrue(mSensorsAPI.isActivityAutoTrackAppViewScreenIgnored(EmptyActivity.class)); }
public static RestServerConfig forPublic(Integer rebalanceTimeoutMs, Map<?, ?> props) { return new PublicConfig(rebalanceTimeoutMs, props); }
@Test public void testAdminListenersNotAllowingEmptyStrings() { Map<String, String> props = new HashMap<>(); props.put(RestServerConfig.ADMIN_LISTENERS_CONFIG, "http://a.b:9999,"); ConfigException ce = assertThrows(ConfigException.class, () -> RestServerConfig.forPublic(null, props)); assertTrue(ce.getMessage().contains(" admin.listeners")); }
public synchronized TopologyConfig topologyConfigs() { return topologyConfigs; }
@SuppressWarnings("deprecation") @Test public void shouldOverrideGlobalStreamsConfigWhenGivenNamedTopologyProps() { final Properties topologyOverrides = new Properties(); topologyOverrides.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 12345L); topologyOverrides.put(StreamsConfig.MAX_TASK_IDLE_MS_CONFIG, 500L); topologyOverrides.put(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 1000L); topologyOverrides.put(StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG, 15); topologyOverrides.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, MockTimestampExtractor.class); topologyOverrides.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndContinueExceptionHandler.class); topologyOverrides.put(StreamsConfig.DEFAULT_DSL_STORE_CONFIG, StreamsConfig.IN_MEMORY); final StreamsConfig config = new StreamsConfig(StreamsTestUtils.getStreamsConfig()); final InternalTopologyBuilder topologyBuilder = new InternalTopologyBuilder( new TopologyConfig( "my-topology", config, topologyOverrides) ); assertThat(topologyBuilder.topologyConfigs().cacheSize, is(12345L)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().maxTaskIdleMs, equalTo(500L)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().taskTimeoutMs, equalTo(1000L)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().maxBufferedSize, equalTo(15)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().timestampExtractor.getClass(), equalTo(MockTimestampExtractor.class)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().deserializationExceptionHandler.getClass(), equalTo(LogAndContinueExceptionHandler.class)); assertThat(topologyBuilder.topologyConfigs().parseStoreType(), equalTo(Materialized.StoreType.IN_MEMORY)); }
public static UFreeIdent create(CharSequence identifier) { return new AutoValue_UFreeIdent(StringName.of(identifier)); }
@Test public void equality() { new EqualsTester() .addEqualityGroup(UFreeIdent.create("foo")) .addEqualityGroup(UFreeIdent.create("bar")) .testEquals(); }
@Override public boolean check(final Session<?> session, final CancelCallback callback) throws BackgroundException { final Host bookmark = session.getHost(); if(bookmark.getProtocol().isHostnameConfigurable() && StringUtils.isBlank(bookmark.getHostname())) { throw new ConnectionCanceledException(); } if(session.isConnected()) { if(log.isDebugEnabled()) { log.debug(String.format("Skip opening connection for session %s", session)); } // Connection already open return false; } // Obtain password from keychain or prompt synchronized(login) { login.validate(bookmark, prompt, new LoginOptions(bookmark.getProtocol())); } this.connect(session, callback); return true; }
@Test(expected = LoginCanceledException.class) public void testPasswordChange() throws Exception { final AtomicBoolean connected = new AtomicBoolean(); final AtomicBoolean keychain = new AtomicBoolean(); final AtomicBoolean prompt = new AtomicBoolean(); final LoginConnectionService s = new LoginConnectionService(new DisabledLoginCallback() { @Override public void warn(final Host bookmark, final String title, final String message, final String continueButton, final String disconnectButton, final String preference) { // } @Override public Credentials prompt(final Host bookmark, final String username, final String title, final String reason, final LoginOptions options) { prompt.set(true); // New password entered return new Credentials(username, "b"); } }, new DisabledHostKeyCallback(), new DisabledPasswordStore() { @Override public String findLoginPassword(final Host bookmark) { keychain.set(true); // Old password stored return "a"; } }, new DisabledProgressListener()); final Session session = new NullSession(new Host(new TestProtocol(), "localhost", new Credentials("user", ""))) { @Override public Void connect(final ProxyFinder proxy, final HostKeyCallback key, final LoginCallback prompt, final CancelCallback cancel) { connected.set(true); return null; } @Override public boolean isConnected() { return connected.get(); } @Override public void login(final LoginCallback l, final CancelCallback cancel) throws BackgroundException { if(prompt.get()) { assertEquals("b", host.getCredentials().getPassword()); throw new LoginCanceledException(); } if(keychain.get()) { assertFalse(prompt.get()); assertEquals("a", host.getCredentials().getPassword()); throw new LoginFailureException("f"); } } }; try { s.check(session, new DisabledCancelCallback()); } finally { assertTrue(keychain.get()); assertTrue(prompt.get()); } }
public FEELFnResult<String> invoke(@ParameterName("string") String string) { if ( string == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "string", "cannot be null")); } else { return FEELFnResult.ofResult( string.toLowerCase() ); } }
@Test void invokeMixedCaseString() { FunctionTestUtil.assertResult(stringLowerCaseFunction.invoke("testSTRing"), "teststring"); }
public static List<FieldInfo> buildSourceSchemaEntity(final LogicalSchema schema) { final List<FieldInfo> allFields = schema.columns().stream() .map(EntityUtil::toFieldInfo) .collect(Collectors.toList()); if (allFields.isEmpty()) { throw new IllegalArgumentException("Root schema should contain columns: " + schema); } return allFields; }
@Test public void shouldBuildCorrectDecimalField() { // Given: final SqlDecimal decimal = SqlTypes.decimal(10, 9); final LogicalSchema schema = LogicalSchema.builder() .valueColumn(ColumnName.of("field"), decimal) .build(); // When: final List<FieldInfo> fields = EntityUtil.buildSourceSchemaEntity(schema); // Then: assertThat(fields, hasSize(1)); assertThat(fields.get(0).getName(), equalTo("field")); assertThat(fields.get(0).getSchema().getTypeName(), equalTo("DECIMAL")); assertThat(fields.get(0).getSchema().getFields(), equalTo(Optional.empty())); assertThat(fields.get(0).getSchema().getParameters().get(SqlDecimal.SCALE), equalTo(decimal.getScale())); assertThat(fields.get(0).getSchema().getParameters().get(SqlDecimal.PRECISION), equalTo(decimal.getPrecision())); }
public PartialResultQueryTaskTracker( PartialResultQueryManager partialResultQueryManager, double minCompletionRatioThreshold, double timeMultiplier, WarningCollector warningCollector) { this.partialResultQueryManager = requireNonNull(partialResultQueryManager, "partialResultQueryManager is null"); this.minCompletionRatioThreshold = minCompletionRatioThreshold; this.timeMultiplier = timeMultiplier; this.warningCollector = requireNonNull(warningCollector, "warningCollector is null"); this.startTime = System.nanoTime(); }
@Test public void testPartialResultQueryTaskTracker() throws Exception { PartialResultQueryTaskTracker tracker = new PartialResultQueryTaskTracker(partialResultQueryManager, 0.50, 2.0, warningCollector); InternalNode node1 = new InternalNode( UUID.randomUUID().toString(), URI.create("https://192.0.2.8"), new NodeVersion("1"), false, false, false, false); InternalNode node2 = new InternalNode( UUID.randomUUID().toString(), URI.create("https://192.0.2.9"), new NodeVersion("1"), false, false, false, false); TaskId taskId1 = new TaskId("test1", 1, 0, 1, 0); TaskId taskId2 = new TaskId("test2", 2, 0, 1, 0); RemoteTask task1 = taskFactory.createTableScanTask(taskId1, node1, ImmutableList.of(), new NodeTaskMap.NodeStatsTracker(delta -> {}, delta -> {}, (age, delta) -> {})); RemoteTask task2 = taskFactory.createTableScanTask(taskId2, node2, ImmutableList.of(), new NodeTaskMap.NodeStatsTracker(delta -> {}, delta -> {}, (age, delta) -> {})); tracker.trackTask(task1); tracker.trackTask(task2); // Assert that completion ratio is 0.0 since the tasks did not complete yet assertEquals(0.0, tracker.getTaskCompletionRatio()); tracker.completeTaskScheduling(); tracker.recordTaskFinish(task1.getTaskInfo()); // Assert that completion ratio is 0.5 since we have set that task1 finished in above line assertEquals(0.5, tracker.getTaskCompletionRatio()); // Assert that the query is added to query manager, queue size = 1 since the query reached minCompletion ratio of 0.5 and is eligible for partial results assertEquals(1, partialResultQueryManager.getQueueSize()); // Sleep for 6 seconds so that we give enough time for query manager to cancel tasks and complete the query with partial results Thread.sleep(6000); assertEquals(0, partialResultQueryManager.getQueueSize()); // Assert that partial result warning is set correctly assertEquals(1, warningCollector.getWarnings().size()); PrestoWarning prestoWarning = warningCollector.getWarnings().get(0); // Assert that warning code is set to PARTIAL_RESULT_WARNING assertEquals(PARTIAL_RESULT_WARNING.toWarningCode(), prestoWarning.getWarningCode()); // Assert that completion percent of 50.00 is specified correctly in the warning message assertEquals("Partial results are returned. Only 50.00 percent of the data is read.", prestoWarning.getMessage()); }
public static <R extends ResourceVersion<R>> Collection<CompletedCheckpoint> retrieveCompletedCheckpoints( StateHandleStore<CompletedCheckpoint, R> checkpointStateHandleStore, CheckpointStoreUtil completedCheckpointStoreUtil) throws Exception { LOG.info("Recovering checkpoints from {}.", checkpointStateHandleStore); // Get all there is first. final List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> initialCheckpoints = checkpointStateHandleStore.getAllAndLock(); // Sort checkpoints by name. initialCheckpoints.sort(Comparator.comparing(o -> o.f1)); final int numberOfInitialCheckpoints = initialCheckpoints.size(); LOG.info( "Found {} checkpoints in {}.", numberOfInitialCheckpoints, checkpointStateHandleStore); final List<CompletedCheckpoint> retrievedCheckpoints = new ArrayList<>(numberOfInitialCheckpoints); LOG.info("Trying to fetch {} checkpoints from storage.", numberOfInitialCheckpoints); for (Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String> checkpointStateHandle : initialCheckpoints) { retrievedCheckpoints.add( checkNotNull( retrieveCompletedCheckpoint( completedCheckpointStoreUtil, checkpointStateHandle))); } return Collections.unmodifiableList(retrievedCheckpoints); }
@Test void testRetrievedCheckpointsAreOrderedChronologically() throws Exception { final TestingRetrievableStateStorageHelper<CompletedCheckpoint> storageHelper = new TestingRetrievableStateStorageHelper<>(); final List<Tuple2<RetrievableStateHandle<CompletedCheckpoint>, String>> handles = new ArrayList<>(); handles.add(Tuple2.of(storageHelper.store(createCompletedCheckpoint(0L)), "checkpoint-0")); handles.add(Tuple2.of(storageHelper.store(createCompletedCheckpoint(1L)), "checkpoint-1")); handles.add(Tuple2.of(storageHelper.store(createCompletedCheckpoint(2L)), "checkpoint-2")); Collections.shuffle(handles); final TestingStateHandleStore<CompletedCheckpoint> stateHandleStore = TestingStateHandleStore.<CompletedCheckpoint>newBuilder() .setGetAllSupplier(() -> handles) .build(); final Collection<CompletedCheckpoint> completedCheckpoints = DefaultCompletedCheckpointStoreUtils.retrieveCompletedCheckpoints( stateHandleStore, new SimpleCheckpointStoreUtil()); // Make sure checkpoints are ordered from earliest to latest. assertThat(completedCheckpoints) .extracting(CompletedCheckpoint::getCheckpointID) .containsExactly(0L, 1L, 2L); }
@Bean("Configuration") public Configuration provide(Settings settings) { return new ServerConfigurationAdapter(settings); }
@Test @UseDataProvider("quotedStrings1") public void getStringArray_supports_quoted_strings_when_settings_does_not(String str, String[] configurationExpected, String[] settingsExpected) { settings.setProperty(nonDeclaredKey, str); settings.setProperty(nonMultivalueKey, str); settings.setProperty(multivalueKey, str); Configuration configuration = underTest.provide(settings); getStringArrayBehaviorDiffers(configuration, nonDeclaredKey, configurationExpected, settingsExpected); getStringArrayBehaviorDiffers(configuration, nonMultivalueKey, configurationExpected, settingsExpected); getStringArrayBehaviorDiffers(configuration, multivalueKey, configurationExpected, settingsExpected); }
private void declineCheckpoint(long checkpointID, CheckpointFailureReason failureReason) { declineCheckpoint(checkpointID, failureReason, null); }
@Test public void testDeclineCheckpoint() throws Exception { TestCheckpointResponder testCheckpointResponder = new TestCheckpointResponder(); final Task task = createTaskBuilder() .setInvokable(InvokableDecliningCheckpoints.class) .setCheckpointResponder(testCheckpointResponder) .build(Executors.directExecutor()); assertCheckpointDeclined( task, testCheckpointResponder, 1, CheckpointFailureReason.CHECKPOINT_DECLINED_TASK_NOT_READY); task.startTaskThread(); try { awaitInvokableLatch(task); assertEquals(ExecutionState.RUNNING, task.getExecutionState()); assertCheckpointDeclined( task, testCheckpointResponder, InvokableDecliningCheckpoints.REJECTED_EXECUTION_CHECKPOINT_ID, CheckpointFailureReason.CHECKPOINT_DECLINED_TASK_CLOSING); assertCheckpointDeclined( task, testCheckpointResponder, InvokableDecliningCheckpoints.THROWING_CHECKPOINT_ID, CheckpointFailureReason.TASK_FAILURE); assertCheckpointDeclined( task, testCheckpointResponder, InvokableDecliningCheckpoints.TRIGGERING_FAILED_CHECKPOINT_ID, CheckpointFailureReason.TASK_FAILURE); } finally { triggerInvokableLatch(task); task.getExecutingThread().join(); } assertEquals(ExecutionState.FINISHED, task.getTerminationFuture().getNow(null)); }
@Override public void destroy() { super.destroy(); branchResultMessageExecutor.shutdown(); }
@Test public void destory() { nettyRemotingServer.destroy(); Assertions.assertTrue(nettyRemotingServer != null); }
public void processVerstrekkingAanAfnemer(VerstrekkingAanAfnemer verstrekkingAanAfnemer){ if (logger.isDebugEnabled()) logger.debug("Processing verstrekkingAanAfnemer: {}", marshallElement(verstrekkingAanAfnemer)); Afnemersbericht afnemersbericht = afnemersberichtRepository.findByOnzeReferentie(verstrekkingAanAfnemer.getReferentieId()); if(mismatch(verstrekkingAanAfnemer, afnemersbericht)){ digidXClient.remoteLogBericht(Log.NO_RELATION_TO_SENT_MESSAGE, verstrekkingAanAfnemer, afnemersbericht); return; } switch (verstrekkingAanAfnemer.getGebeurtenissoort().getNaam()) { case "Null" -> { logger.info("Start processing Null message"); dglResponseService.processNullMessage(verstrekkingAanAfnemer.getGebeurtenisinhoud().getNull(), afnemersbericht); digidXClient.remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Null"); } case "Ag01" -> { logger.info("Start processing Ag01 message"); dglResponseService.processAg01(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAg01(), afnemersbericht); digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht); } case "Ag31" -> { logger.info("Start processing Ag31 message"); dglResponseService.processAg31(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAg31(), afnemersbericht); digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht); } case "Af01" -> { logger.info("Start processing Af01 message"); dglResponseService.processAf01(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAf01(), afnemersbericht); digidXClient.remoteLogBericht(Log.MESSAGE_PROCESSED, verstrekkingAanAfnemer, afnemersbericht); } case "Af11" -> { logger.info("Start processing Af11 message"); dglResponseService.processAf11(verstrekkingAanAfnemer.getGebeurtenisinhoud().getAf11(), afnemersbericht); digidXClient.remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Af11"); } case "Gv01" -> { logger.info("Start processing Gv01 message"); Gv01 gv01 = verstrekkingAanAfnemer.getGebeurtenisinhoud().getGv01(); dglResponseService.processGv01(gv01); String bsn = CategorieUtil.findBsnOudeWaarde(gv01.getCategorie()); if (bsn == null) { bsn = CategorieUtil.findBsn(gv01.getCategorie()); } digidXClient.remoteLogSpontaneVerstrekking(Log.MESSAGE_PROCESSED, "Gv01", gv01.getANummer(), bsn); } case "Ng01" -> { logger.info("Start processing Ng01 message"); Ng01 ng01 = verstrekkingAanAfnemer.getGebeurtenisinhoud().getNg01(); dglResponseService.processNg01(ng01); digidXClient.remoteLogSpontaneVerstrekking(Log.MESSAGE_PROCESSED, "Ng01", CategorieUtil.findANummer(ng01.getCategorie()), ""); } case "Wa11" -> { logger.info("Start processing Wa11 message"); dglResponseService.processWa11(verstrekkingAanAfnemer.getGebeurtenisinhoud().getWa11()); } } }
@Test public void testProcessNullMessage(){ Null testNullMessage = TestDglMessagesUtil.createTestNullMessage(); VerstrekkingInhoudType inhoudType = new VerstrekkingInhoudType(); inhoudType.setNull(testNullMessage); GeversioneerdType type = new GeversioneerdType(); type.setNaam("Null"); when(verstrekkingAanAfnemer.getReferentieId()).thenReturn("referentieId"); when(afnemersberichtRepository.findByOnzeReferentie("referentieId")).thenReturn(afnemersbericht); when(verstrekkingAanAfnemer.getGebeurtenissoort()).thenReturn(type); when(verstrekkingAanAfnemer.getGebeurtenisinhoud()).thenReturn(inhoudType); classUnderTest.processVerstrekkingAanAfnemer(verstrekkingAanAfnemer); verify(dglResponseService, times(1)).processNullMessage(testNullMessage, afnemersbericht); verify(digidXClient, times(1)).remoteLogWithoutRelatingToAccount(Log.MESSAGE_PROCESSED, "Null"); }
@Override public String getName() { return "rpc.client.duration"; }
@Test void testGetName() { Assertions.assertEquals("rpc.client.duration", dubboClientObservationConvention.getName()); }
public static Matrix createMatrix(COSBase base) { if (!(base instanceof COSArray)) { return new Matrix(); } COSArray array = (COSArray) base; if (array.size() < 6) { return new Matrix(); } for (int i = 0; i < 6; ++i) { if (!(array.getObject(i) instanceof COSNumber)) { return new Matrix(); } } return new Matrix(array); }
@Test void testCreateMatrixUsingInvalidInput() { // anything but a COSArray is invalid and leads to an initial matrix Matrix createMatrix = Matrix.createMatrix(COSName.A); assertMatrixIsPristine(createMatrix); // a COSArray with fewer than 6 entries leads to an initial matrix COSArray cosArray = new COSArray(); cosArray.add(COSName.A); createMatrix = Matrix.createMatrix(cosArray); assertMatrixIsPristine(createMatrix); // a COSArray containing other kind of objects than COSNumber leads to an initial matrix cosArray = new COSArray(); for (int i = 0; i < 6; i++) { cosArray.add(COSName.A); } createMatrix = Matrix.createMatrix(cosArray); assertMatrixIsPristine(createMatrix); }
protected String readEncodingAndString(int max) throws IOException { byte encoding = readByte(); return readEncodedString(encoding, max - 1); }
@Test public void testReadUtf16RespectsBom() throws IOException { byte[] data = { ID3Reader.ENCODING_UTF16_WITH_BOM, (byte) 0xff, (byte) 0xfe, // BOM: Little-endian 'A', 0, 'B', 0, 'C', 0, 0, 0, // Null-terminated ID3Reader.ENCODING_UTF16_WITH_BOM, (byte) 0xfe, (byte) 0xff, // BOM: Big-endian 0, 'D', 0, 'E', 0, 'F', 0, 0, // Null-terminated }; CountingInputStream inputStream = new CountingInputStream(new ByteArrayInputStream(data)); ID3Reader reader = new ID3Reader(inputStream); assertEquals("ABC", reader.readEncodingAndString(1000)); assertEquals("DEF", reader.readEncodingAndString(1000)); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testFilterHasException() throws MetadataStoreException { var filters = new ArrayList<BrokerFilter>(); BrokerFilter filter = new BrokerFilter() { @Override public String name() { return "Test-Filter"; } @Override public CompletableFuture<Map<String, BrokerLookupData>> filterAsync(Map<String, BrokerLookupData> brokers, ServiceUnitId serviceUnit, LoadManagerContext context) { return FutureUtil.failedFuture(new BrokerFilterException("test")); } }; filters.add(filter); var counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(pulsar, counter, filters, isolationPoliciesHelper, antiAffinityGroupPolicyHelper); var ctx = setupContext(); ctx.brokerConfiguration().setLoadBalancerSheddingBundlesWithPoliciesEnabled(true); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBundles).get(), 1); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
@Override public double calcEdgeWeight(EdgeIteratorState edgeState, boolean reverse) { double priority = edgeToPriorityMapping.get(edgeState, reverse); if (priority == 0) return Double.POSITIVE_INFINITY; final double distance = edgeState.getDistance(); double seconds = calcSeconds(distance, edgeState, reverse); if (Double.isInfinite(seconds)) return Double.POSITIVE_INFINITY; // add penalty at start/stop/via points if (edgeState.get(EdgeIteratorState.UNFAVORED_EDGE)) seconds += headingPenaltySeconds; double distanceCosts = distance * distanceInfluence; if (Double.isInfinite(distanceCosts)) return Double.POSITIVE_INFINITY; return seconds / priority + distanceCosts; }
@Test public void testSpeedBiggerThan() { EdgeIteratorState edge40 = graph.edge(0, 1).setDistance(10).set(avSpeedEnc, 40); EdgeIteratorState edge50 = graph.edge(1, 2).setDistance(10).set(avSpeedEnc, 50); CustomModel customModel = createSpeedCustomModel(avSpeedEnc).setDistanceInfluence(70d). addToPriority(If("car_average_speed > 40", MULTIPLY, "0.5")); Weighting weighting = createWeighting(customModel); assertEquals(1.60, weighting.calcEdgeWeight(edge40, false), 0.01); assertEquals(2.14, weighting.calcEdgeWeight(edge50, false), 0.01); }
public static <T, K, D, A, M extends Map<K, D>> Collector<T, ?, M> groupingBy(Function<? super T, ? extends K> classifier, Supplier<M> mapFactory, Collector<? super T, A, D> downstream) { final Supplier<A> downstreamSupplier = downstream.supplier(); final BiConsumer<A, ? super T> downstreamAccumulator = downstream.accumulator(); final BiConsumer<Map<K, A>, T> accumulator = (m, t) -> { final K key = Opt.ofNullable(t).map(classifier).orElse(null); final A container = m.computeIfAbsent(key, k -> downstreamSupplier.get()); downstreamAccumulator.accept(container, t); }; final BinaryOperator<Map<K, A>> merger = mapMerger(downstream.combiner()); @SuppressWarnings("unchecked") final Supplier<Map<K, A>> mangledFactory = (Supplier<Map<K, A>>) mapFactory; if (downstream.characteristics().contains(Collector.Characteristics.IDENTITY_FINISH)) { return new SimpleCollector<>(mangledFactory, accumulator, merger, CH_ID); } else { @SuppressWarnings("unchecked") final Function<A, A> downstreamFinisher = (Function<A, A>) downstream.finisher(); final Function<Map<K, A>, M> finisher = intermediate -> { intermediate.replaceAll((k, v) -> downstreamFinisher.apply(v)); @SuppressWarnings("unchecked") final M castResult = (M) intermediate; return castResult; }; return new SimpleCollector<>(mangledFactory, accumulator, merger, finisher, CH_NOID); } }
@Test public void testGroupingByAfterValueMapped() { List<Integer> list = Arrays.asList(1, 1, 2, 2, 3, 4); Map<Boolean, Set<String>> map = list.stream() .collect(CollectorUtil.groupingBy(t -> (t & 1) == 0, String::valueOf, LinkedHashSet::new, LinkedHashMap::new)); assertEquals(LinkedHashMap.class, map.getClass()); assertEquals(new LinkedHashSet<>(Arrays.asList("2", "4")), map.get(Boolean.TRUE)); assertEquals(new LinkedHashSet<>(Arrays.asList("1", "3")), map.get(Boolean.FALSE)); map = list.stream() .collect(CollectorUtil.groupingBy(t -> (t & 1) == 0, String::valueOf, LinkedHashSet::new)); assertEquals(HashMap.class, map.getClass()); assertEquals(new LinkedHashSet<>(Arrays.asList("2", "4")), map.get(Boolean.TRUE)); assertEquals(new LinkedHashSet<>(Arrays.asList("1", "3")), map.get(Boolean.FALSE)); final Map<Boolean, List<String>> map2 = list.stream() .collect(CollectorUtil.groupingBy(t -> (t & 1) == 0, String::valueOf)); assertEquals(Arrays.asList("2", "2", "4"), map2.get(Boolean.TRUE)); assertEquals(Arrays.asList("1", "1", "3"), map2.get(Boolean.FALSE)); }
public Counter counter(String name) { return getOrAdd(name, MetricBuilder.COUNTERS); }
@Test public void accessingACounterRegistersAndReusesTheCounter() { final Counter counter1 = registry.counter("thing"); final Counter counter2 = registry.counter("thing"); assertThat(counter1) .isSameAs(counter2); verify(listener).onCounterAdded("thing", counter1); }
@Override public void search(K q, double radius, List<Neighbor<K, V>> neighbors) { if (radius <= 0 || radius != (int) radius) { throw new IllegalArgumentException("The parameter radius has to be an integer: " + radius); } root.search(q, (int) radius, neighbors); }
@Test public void testRange() { System.out.println("range"); List<Neighbor<String, String>> n1 = new ArrayList<>(); List<Neighbor<String, String>> n2 = new ArrayList<>(); for (int i = 1000; i < 1100; i++) { bktree.search(words[i], 1, n1); naive.search(words[i], 1, n2); /* System.out.println(i+" "+words[i]); java.util.Collections.sort(n1); java.util.Collections.sort(n2); System.out.println(n1.stream().map(Objects::toString).collect(Collectors.joining(", "))); System.out.println(n2.stream().map(Objects::toString).collect(Collectors.joining(", "))); */ assertEquals(n1.size(), n2.size()); String[] s1 = new String[n1.size()]; String[] s2 = new String[n2.size()]; for (int j = 0; j < s1.length; j++) { s1[j] = n1.get(j).value; s2[j] = n2.get(j).value; } Arrays.sort(s1); Arrays.sort(s2); for (int j = 0; j < s1.length; j++) { assertEquals(s1[j], s2[j]); } n1.clear(); n2.clear(); } }
public Dict setFields(Func0<?>... fields) { Arrays.stream(fields).forEach(f -> set(LambdaUtil.getFieldName(f), f.callWithRuntimeException())); return this; }
@Test public void setFieldsTest() { User user = GenericBuilder.of(User::new).with(User::setUsername, "hutool").build(); Dict dict = Dict.create(); dict.setFields(user::getNickname, user::getUsername); assertEquals("hutool", dict.get("username")); assertNull(dict.get("nickname")); }
static String extractPluginRawIdentifier(final JsonNode node) { JsonNode type = node.get(TYPE); if (type == null || type.textValue().isEmpty()) { return null; } return type.textValue(); }
@Test void shouldReturnNullPluginIdentifierGivenEmptyType() { Assertions.assertNull(PluginDeserializer.extractPluginRawIdentifier(new TextNode(""))); }
public static L2ModificationInstruction decMplsTtl() { return new L2ModificationInstruction.ModMplsTtlInstruction(); }
@Test public void testDecMplsTtlMethod() { final Instruction instruction = Instructions.decMplsTtl(); final L2ModificationInstruction.ModMplsTtlInstruction modMplsTtlInstruction = checkAndConvert(instruction, Instruction.Type.L2MODIFICATION, L2ModificationInstruction.ModMplsTtlInstruction.class); assertThat(modMplsTtlInstruction.subtype(), is(L2ModificationInstruction.L2SubType.DEC_MPLS_TTL)); }
@Override public void handle(TaskEvent event) { if (LOG.isDebugEnabled()) { LOG.debug("Processing " + event.getTaskID() + " of type " + event.getType()); } try { writeLock.lock(); TaskStateInternal oldState = getInternalState(); try { stateMachine.doTransition(event.getType(), event); } catch (InvalidStateTransitionException e) { LOG.error("Can't handle this event at current state for " + this.taskId, e); internalError(event.getType()); } if (oldState != getInternalState()) { LOG.info(taskId + " Task Transitioned from " + oldState + " to " + getInternalState()); } } finally { writeLock.unlock(); } }
@Test public void testSpeculativeMapMultipleSucceedFetchFailure() { // Setup a scenario where speculative task wins, first attempt succeeds mockTask = createMockTask(TaskType.MAP); runSpeculativeTaskAttemptSucceeds(TaskEventType.T_ATTEMPT_SUCCEEDED); assertEquals(2, taskAttempts.size()); // speculative attempt retroactively fails from fetch failures mockTask.handle(new TaskTAttemptFailedEvent( taskAttempts.get(1).getAttemptId())); assertTaskScheduledState(); assertEquals(3, taskAttempts.size()); }
public final Sensor storeLevelSensor(final String taskId, final String storeName, final String sensorSuffix, final RecordingLevel recordingLevel, final Sensor... parents) { final String sensorPrefix = storeSensorPrefix(Thread.currentThread().getName(), taskId, storeName); // since the keys in the map storeLevelSensors contain the name of the current thread and threads only // access keys in which their name is contained, the value in the maps do not need to be thread safe // and we can use a LinkedList here. // TODO: In future, we could use thread local maps since each thread will exclusively access the set of keys // that contain its name. Similar is true for the other metric levels. Thread-level metrics need some // special attention, since they are created before the thread is constructed. The creation of those // metrics could be moved into the run() method of the thread. return getSensors(storeLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents); }
@Test public void shouldNotUseSameStoreLevelSensorKeyWithDifferentThreadIds() throws InterruptedException { final Metrics metrics = mock(Metrics.class); final ArgumentCaptor<String> sensorKeys = setUpSensorKeyTests(metrics); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); streamsMetrics.storeLevelSensor(TASK_ID1, STORE_NAME1, SENSOR_NAME_1, INFO_RECORDING_LEVEL); final Thread otherThread = new Thread(() -> streamsMetrics.storeLevelSensor(TASK_ID1, STORE_NAME1, SENSOR_NAME_1, INFO_RECORDING_LEVEL)); otherThread.start(); otherThread.join(); assertThat(sensorKeys.getAllValues().get(0), not(sensorKeys.getAllValues().get(1))); }
public static <T> Write<T> write() { return new Write<>(); }
@Test public void testWriteWithoutPsWithNonNullableTableField() throws Exception { final int rowsToAdd = 10; Schema.Builder schemaBuilder = Schema.builder(); schemaBuilder.addField(Schema.Field.of("column_boolean", Schema.FieldType.BOOLEAN)); schemaBuilder.addField(Schema.Field.of("column_string", Schema.FieldType.STRING)); Schema schema = schemaBuilder.build(); String tableName = DatabaseTestHelper.getTestTableName("UT_WRITE"); StringBuilder stmt = new StringBuilder("CREATE TABLE "); stmt.append(tableName); stmt.append(" ("); stmt.append("column_boolean BOOLEAN,"); stmt.append("column_int INTEGER NOT NULL"); stmt.append(" )"); DatabaseTestHelper.createTableWithStatement(DATA_SOURCE, stmt.toString()); try { ArrayList<Row> data = getRowsToWrite(rowsToAdd, schema); pipeline .apply(Create.of(data)) .setRowSchema(schema) .apply( JdbcIO.<Row>write() .withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION) .withBatchSize(10L) .withTable(tableName)); pipeline.run(); } finally { DatabaseTestHelper.deleteTable(DATA_SOURCE, tableName); thrown.expect(RuntimeException.class); thrown.expectMessage("Non nullable fields are not allowed without a matching schema."); } }
@Bean public MotanServiceEventListener motanServiceEventListener(final ShenyuClientConfig clientConfig, final ShenyuClientRegisterRepository shenyuClientRegisterRepository) { return new MotanServiceEventListener(clientConfig.getClient().get(RpcTypeEnum.MOTAN.getName()), shenyuClientRegisterRepository); }
@Test public void testMotanServiceEventListener() { MockedStatic<RegisterUtils> registerUtilsMockedStatic = mockStatic(RegisterUtils.class); registerUtilsMockedStatic.when(() -> RegisterUtils.doLogin(any(), any(), any())).thenReturn(Optional.ofNullable("token")); new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(ShenyuMotanClientConfiguration.class)) .withBean(ShenyuMotanClientConfigurationTest.class) .withBean(ProtocolConfigBean.class) .withPropertyValues( "debug=true", "shenyu.register.registerType=http", "shenyu.register.serverLists=http://localhost:9095", "shenyu.register.props.username=admin", "shenyu.register.props.password=123456", "shenyu.client.motan.props[contextPath]=/motan", "shenyu.client.motan.props[appName]=motan", "shenyu.client.motan.props[host]=127.0.0.1", "shenyu.client.motan.props[port]=8081", "shenyu.client.motan.basicServiceConfig.exportPort=8002" ) .run(context -> { MotanServiceEventListener motanServiceEventListener = context.getBean("motanServiceEventListener", MotanServiceEventListener.class); assertNotNull(motanServiceEventListener); }); registerUtilsMockedStatic.close(); }
public static Builder builder() { return new Builder(); }
@Test public void testCanDeserializeWithoutDefaultValues() throws JsonProcessingException { CreateNamespaceResponse noProperties = CreateNamespaceResponse.builder().withNamespace(NAMESPACE).build(); String jsonWithMissingProperties = "{\"namespace\":[\"accounting\",\"tax\"]}"; assertEquals(deserialize(jsonWithMissingProperties), noProperties); String jsonWithNullProperties = "{\"namespace\":[\"accounting\",\"tax\"],\"properties\":null}"; assertEquals(deserialize(jsonWithNullProperties), noProperties); String jsonEmptyNamespaceMissingProperties = "{\"namespace\":[]}"; CreateNamespaceResponse responseWithEmptyNamespace = CreateNamespaceResponse.builder().withNamespace(Namespace.empty()).build(); assertEquals(deserialize(jsonEmptyNamespaceMissingProperties), responseWithEmptyNamespace); }
@Override public List<AdminUserDO> getUserListByStatus(Integer status) { return userMapper.selectListByStatus(status); }
@Test public void testGetUserListByStatus() { // mock 数据 AdminUserDO user = randomAdminUserDO(o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus())); userMapper.insert(user); // 测试 status 不匹配 userMapper.insert(randomAdminUserDO(o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus()))); // 准备参数 Integer status = CommonStatusEnum.DISABLE.getStatus(); // 调用 List<AdminUserDO> result = userService.getUserListByStatus(status); // 断言 assertEquals(1, result.size()); assertEquals(user, result.get(0)); }
public int boundedControlledPoll( final ControlledFragmentHandler handler, final long limitPosition, final int fragmentLimit) { if (isClosed) { return 0; } long initialPosition = subscriberPosition.get(); if (initialPosition >= limitPosition) { return 0; } int fragmentsRead = 0; int initialOffset = (int)initialPosition & termLengthMask; int offset = initialOffset; final UnsafeBuffer termBuffer = activeTermBuffer(initialPosition); final int limitOffset = (int)Math.min(termBuffer.capacity(), (limitPosition - initialPosition) + offset); final Header header = this.header; header.buffer(termBuffer); try { while (fragmentsRead < fragmentLimit && offset < limitOffset) { final int length = frameLengthVolatile(termBuffer, offset); if (length <= 0) { break; } final int frameOffset = offset; final int alignedLength = BitUtil.align(length, FRAME_ALIGNMENT); offset += alignedLength; if (isPaddingFrame(termBuffer, frameOffset)) { continue; } ++fragmentsRead; header.offset(frameOffset); final Action action = handler.onFragment( termBuffer, frameOffset + HEADER_LENGTH, length - HEADER_LENGTH, header); if (ABORT == action) { --fragmentsRead; offset -= alignedLength; break; } if (BREAK == action) { break; } if (COMMIT == action) { initialPosition += (offset - initialOffset); initialOffset = offset; subscriberPosition.setOrdered(initialPosition); } } } catch (final Exception ex) { errorHandler.onError(ex); } finally { final long resultingPosition = initialPosition + (offset - initialOffset); if (resultingPosition > initialPosition) { subscriberPosition.setOrdered(resultingPosition); } } return fragmentsRead; }
@Test void shouldPollFragmentsToBoundedControlledFragmentHandlerWithMaxPositionAboveIntMaxValue() { final int initialOffset = TERM_BUFFER_LENGTH - (ALIGNED_FRAME_LENGTH * 2); final long initialPosition = computePosition( INITIAL_TERM_ID, initialOffset, POSITION_BITS_TO_SHIFT, INITIAL_TERM_ID); final long maxPosition = (long)Integer.MAX_VALUE + 1000; position.setOrdered(initialPosition); final Image image = createImage(); insertDataFrame(INITIAL_TERM_ID, initialOffset); insertPaddingFrame(INITIAL_TERM_ID, initialOffset + ALIGNED_FRAME_LENGTH); when(mockControlledFragmentHandler.onFragment(any(DirectBuffer.class), anyInt(), anyInt(), any(Header.class))) .thenReturn(Action.CONTINUE); final int fragmentsRead = image.boundedControlledPoll( mockControlledFragmentHandler, maxPosition, Integer.MAX_VALUE); assertThat(fragmentsRead, is(1)); final InOrder inOrder = Mockito.inOrder(position, mockControlledFragmentHandler); inOrder.verify(mockControlledFragmentHandler).onFragment( any(UnsafeBuffer.class), eq(initialOffset + HEADER_LENGTH), eq(DATA.length), any(Header.class)); inOrder.verify(position).setOrdered(TERM_BUFFER_LENGTH); }
@Nonnull @Override public Optional<? extends Algorithm> parse( @Nullable final String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } String algorithmStr; Optional<Mode> modeOptional = Optional.empty(); Optional<? extends Padding> paddingOptional = Optional.empty(); if (str.contains("/")) { int slashIndex = str.indexOf("/"); algorithmStr = str.substring(0, slashIndex); String rest = str.substring(slashIndex + 1); if (rest.contains("/")) { slashIndex = rest.indexOf("/"); // mode final String modeStr = rest.substring(0, slashIndex); final JcaModeMapper jcaModeMapper = new JcaModeMapper(); modeOptional = jcaModeMapper.parse(modeStr, detectionLocation); // padding String paddingStr = rest.substring(slashIndex + 1); final JcaPaddingMapper jcaPaddingMapper = new JcaPaddingMapper(); paddingOptional = jcaPaddingMapper.parse(paddingStr, detectionLocation); } } else { algorithmStr = str; } // check if it is pbe JcaPasswordBasedEncryptionMapper pbeMapper = new JcaPasswordBasedEncryptionMapper(); Optional<PasswordBasedEncryption> pbeOptional = pbeMapper.parse(algorithmStr, detectionLocation); if (pbeOptional.isPresent()) { // pbe return pbeOptional; } Optional<? extends Algorithm> possibleCipher = map(algorithmStr, detectionLocation); if (possibleCipher.isEmpty()) { return Optional.empty(); } final Algorithm algorithm = possibleCipher.get(); modeOptional.ifPresent(algorithm::put); paddingOptional.ifPresent(algorithm::put); return Optional.of(algorithm); }
@Test void base() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaCipherMapper jcaCipherMapper = new JcaCipherMapper(); Optional<? extends Algorithm> cipherOptional = jcaCipherMapper.parse("AES/ECB/PKCS5Padding", testDetectionLocation); assertThat(cipherOptional).isPresent(); assertThat(cipherOptional.get().is(BlockCipher.class)).isTrue(); Cipher cipher = (Cipher) cipherOptional.get(); assertThat(cipher.getName()).isEqualTo("AES"); assertThat(cipher.getMode()).isPresent(); Mode mode = cipher.getMode().get(); assertThat(mode).isInstanceOf(ECB.class); assertThat(mode.getBlockSize()).isEmpty(); assertThat(cipher.getPadding()).isPresent(); assertThat(cipher.getPadding().get().getName()).isEqualTo("PKCS5"); }
@Override public String normalise(String text) { if (Objects.isNull(text) || text.isEmpty()) { throw new IllegalArgumentException("Text cannot be null or empty"); } return text.trim() .toLowerCase() .replaceAll("\\p{Punct}", "") .replaceAll("\\s+", " "); }
@Description("Normalise, when text is empty, then throw IllegalArgumentException") @Test void normalise_WhenTextIsEmpty_ThenThrowIllegalArgumentException() { // When & Then assertThrows(IllegalArgumentException.class, () -> textNormaliser.normalise("")); }
public double calculateDensity(Graph graph, boolean isGraphDirected) { double result; double edgesCount = graph.getEdgeCount(); double nodesCount = graph.getNodeCount(); double multiplier = 1; if (!isGraphDirected) { multiplier = 2; } result = (multiplier * edgesCount) / (nodesCount * nodesCount - nodesCount); return result; }
@Test public void testDirectedCyclicGraphDensity() { GraphModel graphModel = GraphGenerator.generateCyclicDirectedGraph(5); DirectedGraph graph = graphModel.getDirectedGraph(); GraphDensity d = new GraphDensity(); double density = d.calculateDensity(graph, true); assertEquals(density, 0.25); }
public static void checkTableFeatureSupported(Protocol protocol, Metadata metadata) { if (protocol == null || metadata == null) { LOG.error("Delta table is missing protocol or metadata information."); ErrorReport.reportValidateException(ErrorCode.ERR_BAD_TABLE_ERROR, ErrorType.UNSUPPORTED, "Delta table is missing protocol or metadata information."); } // check column mapping String columnMappingMode = ColumnMapping.getColumnMappingMode(metadata.getConfiguration()); if (!columnMappingMode.equals(ColumnMapping.COLUMN_MAPPING_MODE_NONE)) { LOG.error("Delta table feature column mapping is not supported"); ErrorReport.reportValidateException(ErrorCode.ERR_BAD_TABLE_ERROR, ErrorType.UNSUPPORTED, "Delta table feature [column mapping] is not supported"); } }
@Test public void testCheckTableFeatureSupported() { expectedEx.expect(ValidateException.class); expectedEx.expectMessage("Delta table is missing protocol or metadata information."); DeltaUtils.checkTableFeatureSupported(null, null); }
@Override public ResourceReconcileResult tryReconcileClusterResources( TaskManagerResourceInfoProvider taskManagerResourceInfoProvider) { ResourceReconcileResult.Builder builder = ResourceReconcileResult.builder(); List<TaskManagerInfo> taskManagersIdleTimeout = new ArrayList<>(); List<TaskManagerInfo> taskManagersNonTimeout = new ArrayList<>(); long currentTime = System.currentTimeMillis(); taskManagerResourceInfoProvider .getRegisteredTaskManagers() .forEach( taskManagerInfo -> { if (taskManagerInfo.isIdle() && currentTime - taskManagerInfo.getIdleSince() >= taskManagerTimeout.toMilliseconds()) { taskManagersIdleTimeout.add(taskManagerInfo); } else { taskManagersNonTimeout.add(taskManagerInfo); } }); List<PendingTaskManager> pendingTaskManagersNonUse = new ArrayList<>(); List<PendingTaskManager> pendingTaskManagersInuse = new ArrayList<>(); taskManagerResourceInfoProvider .getPendingTaskManagers() .forEach( pendingTaskManager -> { if (pendingTaskManager.getPendingSlotAllocationRecords().isEmpty()) { pendingTaskManagersNonUse.add(pendingTaskManager); } else { pendingTaskManagersInuse.add(pendingTaskManager); } }); ResourceProfile resourcesToKeep = ResourceProfile.ZERO; ResourceProfile resourcesInTotal = ResourceProfile.ZERO; boolean resourceFulfilled = false; // check whether available resources of used (pending) task manager is enough. ResourceProfile resourcesAvailableOfNonIdle = getAvailableResourceOfTaskManagers(taskManagersNonTimeout); ResourceProfile resourcesInTotalOfNonIdle = getTotalResourceOfTaskManagers(taskManagersNonTimeout); resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdle); resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdle); if (isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) { resourceFulfilled = true; } else { ResourceProfile resourcesAvailableOfNonIdlePendingTaskManager = getAvailableResourceOfPendingTaskManagers(pendingTaskManagersInuse); ResourceProfile resourcesInTotalOfNonIdlePendingTaskManager = getTotalResourceOfPendingTaskManagers(pendingTaskManagersInuse); resourcesToKeep = resourcesToKeep.merge(resourcesAvailableOfNonIdlePendingTaskManager); resourcesInTotal = resourcesInTotal.merge(resourcesInTotalOfNonIdlePendingTaskManager); } // try reserve or release unused (pending) task managers for (TaskManagerInfo taskManagerInfo : taskManagersIdleTimeout) { if (resourceFulfilled || isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) { resourceFulfilled = true; builder.addTaskManagerToRelease(taskManagerInfo); } else { resourcesToKeep = resourcesToKeep.merge(taskManagerInfo.getAvailableResource()); resourcesInTotal = resourcesInTotal.merge(taskManagerInfo.getTotalResource()); } } for (PendingTaskManager pendingTaskManager : pendingTaskManagersNonUse) { if (resourceFulfilled || isRequiredResourcesFulfilled(resourcesToKeep, resourcesInTotal)) { resourceFulfilled = true; builder.addPendingTaskManagerToRelease(pendingTaskManager); } else { resourcesToKeep = resourcesToKeep.merge(pendingTaskManager.getUnusedResource()); resourcesInTotal = resourcesInTotal.merge(pendingTaskManager.getTotalResourceProfile()); } } if (!resourceFulfilled) { // fulfill required resources tryFulFillRequiredResourcesWithAction( resourcesToKeep, resourcesInTotal, builder::addPendingTaskManagerToAllocate); } return builder.build(); }
@Test void testIdleTaskManagerShouldBeReleased() { final TestingTaskManagerInfo registeredTaskManager = new TestingTaskManagerInfo( DEFAULT_SLOT_RESOURCE.multiply(NUM_OF_SLOTS), DEFAULT_SLOT_RESOURCE.multiply(NUM_OF_SLOTS), DEFAULT_SLOT_RESOURCE); final TaskManagerResourceInfoProvider taskManagerResourceInfoProvider = TestingTaskManagerResourceInfoProvider.newBuilder() .setRegisteredTaskManagersSupplier( () -> Collections.singleton(registeredTaskManager)) .build(); ResourceReconcileResult result = ANY_MATCHING_STRATEGY.tryReconcileClusterResources(taskManagerResourceInfoProvider); assertThat(result.getTaskManagersToRelease()).isEmpty(); registeredTaskManager.setIdleSince(System.currentTimeMillis() - 10); result = ANY_MATCHING_STRATEGY.tryReconcileClusterResources(taskManagerResourceInfoProvider); assertThat(result.getTaskManagersToRelease()).containsExactly(registeredTaskManager); }
public Object convert(Object obj) { Object newObject = null; switch (conversion) { case NO_CONVERSION: newObject = obj; break; case DOUBLE_TO_FLOAT: newObject = ((Double) obj).floatValue(); break; case INT_TO_SHORT: newObject = ((Integer) obj).shortValue(); break; case INT_TO_BYTE: newObject = ((Integer) obj).byteValue(); break; case STRING_TO_CHAR: newObject = ((CharSequence) obj).charAt(0); break; case NUM_TO_LONG: newObject = Long.parseLong(obj.toString()); break; default: newObject = null; } return newObject; }
@Test public void testStringConversion() { TypeConverter converter = new TypeConverter(TypeConverter.STRING_TO_CHAR); assertEquals('c', converter.convert("c")); assertTrue(converter.convert("c") instanceof Character); }
@Override public boolean createTable(String tableName, JDBCSchema schema) { // Check table ID checkValidTableName(tableName); // Check if table already exists if (tableIds.containsKey(tableName)) { throw new IllegalStateException( "Table " + tableName + " already exists for database " + databaseName + "."); } LOG.info("Creating table using tableName '{}'.", tableName); StringBuilder sql = new StringBuilder(); try (Connection con = driver.getConnection(getUri(), username, password)) { Statement stmt = con.createStatement(); sql.append("CREATE TABLE ") .append(tableName) .append(" (") .append(schema.toSqlStatement()) .append(")"); stmt.executeUpdate(sql.toString()); stmt.close(); } catch (Exception e) { throw new JDBCResourceManagerException( "Error creating table with SQL statement: " + sql + " (for connection with URL " + getUri() + ")", e); } tableIds.put(tableName, schema.getIdColumn()); LOG.info("Successfully created table {}.{}", databaseName, tableName); return true; }
@Test public void testCreateTableShouldThrowErrorWhenJDBCFailsToExecuteSQL() throws SQLException { when(container.getHost()).thenReturn(HOST); when(container.getMappedPort(JDBC_PORT)).thenReturn(MAPPED_PORT); Statement statement = driver.getConnection(any(), any(), any()).createStatement(); doThrow(SQLException.class).when(statement).executeUpdate(anyString()); assertThrows( JDBCResourceManagerException.class, () -> testManager.createTable( TABLE_NAME, new JDBCResourceManager.JDBCSchema(ImmutableMap.of("id", "INTEGER"), "id"))); }
@VisibleForTesting void validateEmailUnique(Long id, String email) { if (StrUtil.isBlank(email)) { return; } AdminUserDO user = userMapper.selectByEmail(email); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_EMAIL_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_EMAIL_EXISTS); } }
@Test public void testValidateEmailUnique_emailExistsForCreate() { // 准备参数 String email = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setEmail(email))); // 调用,校验异常 assertServiceException(() -> userService.validateEmailUnique(null, email), USER_EMAIL_EXISTS); }
public static OpticalConnectivityId of(long value) { return new OpticalConnectivityId(value); }
@Test public void testEquality() { OpticalConnectivityId tid1 = OpticalConnectivityId.of(1L); OpticalConnectivityId tid2 = OpticalConnectivityId.of(1L); OpticalConnectivityId tid3 = OpticalConnectivityId.of(2L); new EqualsTester() .addEqualityGroup(tid1, tid2) .addEqualityGroup(tid3) .testEquals(); }
@Override protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName) throws Exception { Message in = exchange.getIn(); Counter counter = registry.counter(metricsName); Long increment = endpoint.getIncrement(); Long decrement = endpoint.getDecrement(); Long finalIncrement = getLongHeader(in, HEADER_COUNTER_INCREMENT, increment); Long finalDecrement = getLongHeader(in, HEADER_COUNTER_DECREMENT, decrement); if (finalIncrement != null) { counter.inc(finalIncrement); } else if (finalDecrement != null) { counter.dec(finalDecrement); } else { counter.inc(); } }
@Test public void testProcessWithOutIncrementAndDecrement() throws Exception { Object action = null; when(endpoint.getIncrement()).thenReturn(null); when(endpoint.getDecrement()).thenReturn(null); producer.doProcess(exchange, endpoint, registry, METRICS_NAME); inOrder.verify(exchange, times(1)).getIn(); inOrder.verify(registry, times(1)).counter(METRICS_NAME); inOrder.verify(endpoint, times(1)).getIncrement(); inOrder.verify(endpoint, times(1)).getDecrement(); inOrder.verify(in, times(1)).getHeader(HEADER_COUNTER_INCREMENT, action, Long.class); inOrder.verify(in, times(1)).getHeader(HEADER_COUNTER_DECREMENT, action, Long.class); inOrder.verify(counter, times(1)).inc(); inOrder.verifyNoMoreInteractions(); }
public static JsonNode renderChains(ComponentRegistry<? extends Chain<?>> chains) { ObjectNode ret = jsonMapper.createObjectNode(); for (Chain<?> chain : chains.allComponents()) { ret.set(chain.getId().stringValue(), renderAbstractComponents(chain.components())); } return ret; }
@Test void chains_are_rendered() { ChainRegistry<Processor> chains = new ChainRegistry<>(); Chain<Processor> chain = new Chain<>("myChain", new VoidProcessor(new ComponentId("voidProcessor"))); chains.register(new ComponentId("myChain"), chain); String json = ApplicationStatusHandler.renderChains(chains).toString(); assertTrue(json.contains("myChain")); assertTrue(json.contains("voidProcessor")); }
public static PostgreSQLCommandPacket newInstance(final PostgreSQLCommandPacketType commandPacketType, final PostgreSQLPacketPayload payload) { if (!PostgreSQLCommandPacketType.isExtendedProtocolPacketType(commandPacketType)) { payload.getByteBuf().skipBytes(1); return getPostgreSQLCommandPacket(commandPacketType, payload); } List<PostgreSQLCommandPacket> result = new ArrayList<>(); while (payload.hasCompletePacket()) { PostgreSQLCommandPacketType type = PostgreSQLCommandPacketType.valueOf(payload.readInt1()); int length = payload.getByteBuf().getInt(payload.getByteBuf().readerIndex()); PostgreSQLPacketPayload slicedPayload = new PostgreSQLPacketPayload(payload.getByteBuf().readSlice(length), payload.getCharset()); result.add(getPostgreSQLCommandPacket(type, slicedPayload)); } return new PostgreSQLAggregatedCommandPacket(result); }
@Test void assertNewInstanceWithTerminationComPacket() { when(payload.getByteBuf()).thenReturn(mock(ByteBuf.class)); assertThat(PostgreSQLCommandPacketFactory.newInstance(PostgreSQLCommandPacketType.TERMINATE, payload), instanceOf(PostgreSQLComTerminationPacket.class)); }
@Override public Optional<Track<T>> clean(Track<T> track) { TreeSet<Point<T>> points = new TreeSet<>(track.points()); Optional<Point<T>> firstNonNull = firstPointWithAltitude(points); if (!firstNonNull.isPresent()) { return Optional.empty(); } SortedSet<Point<T>> pointsMissingAltitude = points.headSet(firstNonNull.get()); TreeSet<Point<T>> fixedPoints = extrapolateAltitudes(pointsMissingAltitude, firstNonNull.get()); pointsMissingAltitude.clear(); points.addAll(fixedPoints); Optional<Point<T>> gapStart; Optional<Point<T>> gapEnd = firstNonNull; while (gapEnd.isPresent()) { gapStart = firstPointWithoutAltitude(points.tailSet(gapEnd.get())); if (!gapStart.isPresent()) { break; } gapEnd = firstPointWithAltitude(points.tailSet(gapStart.get())); if (!gapEnd.isPresent()) { pointsMissingAltitude = points.tailSet(gapStart.get()); fixedPoints = extrapolateAltitudes(pointsMissingAltitude, points.lower(gapStart.get())); pointsMissingAltitude.clear(); points.addAll(fixedPoints); // extrapolateAltitudes(points.tailSet(gapStart.get()), points.lower(gapStart.get())); } else { pointsMissingAltitude = points.subSet(gapStart.get(), gapEnd.get()); fixedPoints = interpolateAltitudes(pointsMissingAltitude, points.lower(gapStart.get()), gapEnd.get()); pointsMissingAltitude.clear(); points.addAll(fixedPoints); // interpolateAltitudes(points.subSet(gapStart.get(), gapEnd.get()), points.lower(gapStart.get()), gapEnd.get()); } } return Optional.of(Track.of(points)); }
@Test public void testFillingMultipleMissingAltitudes() { Track<NoRawData> testTrack = trackWithMultipleMissingAltitudes(); Track<NoRawData> cleanedTrack = (new FillMissingAltitudes<NoRawData>()).clean(testTrack).get(); ArrayList<Point<NoRawData>> points = new ArrayList<>(cleanedTrack.points()); assertTrue( (points.get(1).altitude().inFeet() == 0.0) && (points.get(2).altitude().inFeet() == 20.0), "The middle points' altitudes should be filled based on their neighbors" ); }
public void logAndProcessFailure( String computationId, ExecutableWork executableWork, Throwable t, Consumer<Work> onInvalidWork) { if (shouldRetryLocally(computationId, executableWork.work(), t)) { // Try again after some delay and at the end of the queue to avoid a tight loop. executeWithDelay(retryLocallyDelayMs, executableWork); } else { // Consider the item invalid. It will eventually be retried by Windmill if it still needs to // be processed. onInvalidWork.accept(executableWork.work()); } }
@Test public void logAndProcessFailure_retriesOnUncaughtUnhandledException_streamingAppliance() throws InterruptedException { CountDownLatch runWork = new CountDownLatch(1); ExecutableWork work = createWork(ignored -> runWork.countDown()); WorkFailureProcessor workFailureProcessor = createWorkFailureProcessor(streamingApplianceFailureReporter(false)); Set<Work> invalidWork = new HashSet<>(); workFailureProcessor.logAndProcessFailure( DEFAULT_COMPUTATION_ID, work, new RuntimeException(), invalidWork::add); runWork.await(); assertThat(invalidWork).isEmpty(); }
public static String[] parseAsCsv(String key, String value) { return parseAsCsv(key, value, identity()); }
@Test @UseDataProvider("testParseAsCsv") public void parseAsCsv_for_coverage(String value, String[] expected) { // parseAsCsv is extensively tested in org.sonar.server.config.ConfigurationProviderTest assertThat(parseAsCsv("key", value)) .isEqualTo(parseAsCsv("key", value, identity())) .isEqualTo(expected); }
public boolean processConsumeResult( final List<MessageExt> msgs, final ConsumeOrderlyStatus status, final ConsumeOrderlyContext context, final ConsumeRequest consumeRequest ) { return true; }
@Test public void testProcessConsumeResult() { ConsumeOrderlyContext context = mock(ConsumeOrderlyContext.class); ConsumeMessagePopOrderlyService.ConsumeRequest consumeRequest = mock(ConsumeMessagePopOrderlyService.ConsumeRequest.class); assertTrue(popService.processConsumeResult(Collections.singletonList(createMessageExt()), ConsumeOrderlyStatus.SUCCESS, context, consumeRequest)); }
public static <T> List<T> reverseNew(List<T> list) { List<T> list2 = ObjectUtil.clone(list); if (null == list2) { // 不支持clone list2 = new ArrayList<>(list); } try { return reverse(list2); } catch (final UnsupportedOperationException e) { // 提供的列表不可编辑,新建列表 return reverse(list(false, list)); } }
@Test public void reverseNewTest() { final List<Integer> view = ListUtil.of(1, 2, 3); final List<Integer> reverse = ListUtil.reverseNew(view); assertEquals("[3, 2, 1]", reverse.toString()); }
void addFields( String prefix, Set<String> fieldNames, XmlObject field ) { //Salesforce SOAP Api sends IDs always in the response, even if we don't request it in SOQL query and //the id's value is null in this case. So, do not add this Id to the fields list if ( isNullIdField( field ) ) { return; } String fieldname = prefix + field.getName().getLocalPart(); if ( field instanceof SObject ) { SObject sobject = (SObject) field; for ( XmlObject element : SalesforceConnection.getChildren( sobject ) ) { addFields( fieldname + ".", fieldNames, element ); } } else { addField( fieldname, fieldNames, (String) field.getValue() ); } }
@Test public void testAddFields_IdAdded() throws Exception { final Set<String> fields = new LinkedHashSet<>(); XmlObject testObject = createObject( "Id", VALUE, ObjectType.XMLOBJECT ); dialog.addFields( "", fields, testObject ); assertArrayEquals( "Id field added", new String[]{"Id"}, fields.toArray() ); }
@SuppressWarnings({"unchecked", "UnstableApiUsage"}) @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement) { if (!(statement.getStatement() instanceof DropStatement)) { return statement; } final DropStatement dropStatement = (DropStatement) statement.getStatement(); if (!dropStatement.isDeleteTopic()) { return statement; } final SourceName sourceName = dropStatement.getName(); final DataSource source = metastore.getSource(sourceName); if (source != null) { if (source.isSource()) { throw new KsqlException("Cannot delete topic for read-only source: " + sourceName.text()); } checkTopicRefs(source); deleteTopic(source); final Closer closer = Closer.create(); closer.register(() -> deleteKeySubject(source)); closer.register(() -> deleteValueSubject(source)); try { closer.close(); } catch (final KsqlException e) { throw e; } catch (final Exception e) { throw new KsqlException(e); } } else if (!dropStatement.getIfExists()) { throw new KsqlException("Could not find source to delete topic for: " + statement); } final T withoutDelete = (T) dropStatement.withoutDeleteClause(); final String withoutDeleteText = SqlFormatter.formatSql(withoutDelete) + ";"; return statement.withStatement(withoutDeleteText, withoutDelete); }
@Test public void shouldThrowExceptionIfOtherSourcesUsingTopic() { // Given: final ConfiguredStatement<DropStream> dropStatement = givenStatement( "DROP SOMETHING DELETE TOPIC;", new DropStream(SOURCE_NAME, true, true) ); final DataSource other1 = givenSource(SourceName.of("OTHER1"), TOPIC_NAME); final DataSource other2 = givenSource(SourceName.of("OTHER2"), TOPIC_NAME); final Map<SourceName, DataSource> sources = new HashMap<>(); sources.put(SOURCE_NAME, source); sources.put(SourceName.of("OTHER1"), other1); sources.put(SourceName.of("OTHER2"), other2); when(metaStore.getAllDataSources()).thenReturn(sources); // When: final Exception e = assertThrows( RuntimeException.class, () -> deleteInjector.inject(dropStatement) ); // Then: assertThat(e.getMessage(), containsString("Refusing to delete topic. " + "Found other data sources (OTHER1, OTHER2) using topic something")); }
protected static String getTrimmedUrl(String rawUrl) { if (isBlank(rawUrl)) { return rawUrl; } if (rawUrl.endsWith("/")) { return substringBeforeLast(rawUrl, "/"); } return rawUrl; }
@Test public void trim_empty_url() { assertThat(AzureDevOpsHttpClient.getTrimmedUrl("")) .isEmpty(); }
@Override public URL getResource(final String name) { try { final Enumeration<URL> resources = getResources(name); if (resources.hasMoreElements()) { return resources.nextElement(); } } catch (IOException ignored) { // mimics the behavior of the JDK } return null; }
@Test void testOwnerFirstResourceNotFoundFallsBackToComponent() throws Exception { String resourceToLoad = TempDirUtils.newFile(tempFolder).getName(); TestUrlClassLoader owner = new TestUrlClassLoader(); final ComponentClassLoader componentClassLoader = new ComponentClassLoader( new URL[] {tempFolder.toUri().toURL()}, owner, new String[] {resourceToLoad}, new String[0], Collections.emptyMap()); final URL loadedResource = componentClassLoader.getResource(resourceToLoad); assertThat(loadedResource.toString()).contains(resourceToLoad); }
@Override public void metricChange(final KafkaMetric metric) { if (!THROUGHPUT_METRIC_NAMES.contains(metric.metricName().name()) || !StreamsMetricsImpl.TOPIC_LEVEL_GROUP.equals(metric.metricName().group())) { return; } addMetric( metric, getQueryId(metric), getTopic(metric) ); }
@Test public void shouldAddNewMeasurableForAllThroughputMetricsTypes() { // Given: listener.metricChange(mockMetric( BYTES_CONSUMED_TOTAL, 1D, STREAMS_TAGS_TASK_1) ); listener.metricChange(mockMetric( RECORDS_CONSUMED_TOTAL, 2D, STREAMS_TAGS_TASK_1) ); listener.metricChange(mockMetric( BYTES_PRODUCED_TOTAL, 3D, STREAMS_TAGS_TASK_1) ); listener.metricChange(mockMetric( RECORDS_PRODUCED_TOTAL, 4D, STREAMS_TAGS_TASK_1) ); // When: final Measurable bytesConsumed = verifyAndGetMetric(BYTES_CONSUMED_TOTAL, QUERY_ONE_TAGS); final Measurable recordsConsumed = verifyAndGetMetric(RECORDS_CONSUMED_TOTAL, QUERY_ONE_TAGS); final Measurable bytesProduced = verifyAndGetMetric(BYTES_PRODUCED_TOTAL, QUERY_ONE_TAGS); final Measurable recordsProduced = verifyAndGetMetric(RECORDS_PRODUCED_TOTAL, QUERY_ONE_TAGS); // Then: assertThat(bytesConsumed.measure(new MetricConfig().tags(QUERY_ONE_TAGS), 0L), equalTo(1D)); assertThat(recordsConsumed.measure(new MetricConfig().tags(QUERY_ONE_TAGS), 0L), equalTo(2D)); assertThat(bytesProduced.measure(new MetricConfig().tags(QUERY_ONE_TAGS), 0L), equalTo(3D)); assertThat(recordsProduced.measure(new MetricConfig().tags(QUERY_ONE_TAGS), 0L), equalTo(4D)); }
public static void main(String[] args) { var app = new App(); app.run(); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public static <T extends SpecificRecordBase> T dataMapToSpecificRecord(DataMap map, RecordDataSchema dataSchema, Schema avroSchema) throws DataTranslationException { DataMapToSpecificRecordTranslator translator = new DataMapToSpecificRecordTranslator(); try { T avroRecord = translator.translate(map, dataSchema, avroSchema); translator.checkMessageListForErrorsAndThrowDataTranslationException(); return avroRecord; } catch (RuntimeException e) { throw translator.dataTranslationException(e); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw translator.dataTranslationException(new RuntimeException(e)); } }
@Test(dataProvider = "testDataMapToSpecificRecordTranslatorUnionProvider") public void testDataMapToSpecificRecordTranslatorUnion(String field1, String fieldVal1, String field2, Object fieldVal2, String field3, EnumData enumData, Object testVal) throws IOException { RecordDataSchema recordDataSchema = (RecordDataSchema) TestUtil.dataSchemaFromString(TestEventWithUnionAndEnum.TEST_SCHEMA.toString()); Schema avroSchema = TestEventWithUnionAndEnum.TEST_SCHEMA; DataMap innerMap2 = new DataMap(); innerMap2.put(field1, fieldVal1); innerMap2.put(field2, fieldVal2); innerMap2.put(field3, enumData.toString()); TestEventWithUnionAndEnum event = DataTranslator.dataMapToSpecificRecord(innerMap2, recordDataSchema, avroSchema); Assert.assertEquals(event.get(event.getSchema().getField(field1).pos()), fieldVal1); Assert.assertEquals(event.get(event.getSchema().getField(field2).pos()), testVal); Assert.assertEquals(event.get(event.getSchema().getField(field3).pos()), enumData); }
@Override public InputStream getInputStream() { return new RedissonInputStream(); }
@Test public void testReadArrayWithOffset() throws IOException { RBinaryStream stream = redisson.getBinaryStream("test"); byte[] value = {1, 2, 3, 4, 5, 6}; stream.set(value); InputStream s = stream.getInputStream(); byte[] b = new byte[4]; assertThat(s.read(b, 1, 3)).isEqualTo(3); byte[] valuesRead = {0, 1, 2, 3}; assertThat(b).isEqualTo(valuesRead); }
@SuppressWarnings("unchecked") public <T extends Expression> T rewrite(final T expression, final C context) { return (T) rewriter.process(expression, context); }
@Test public void shouldRewriteInListExpression() { // Given: final InPredicate inPredicate = parseExpression("1 IN (1, 2, 3)"); final InListExpression parsed = inPredicate.getValueList(); when(processor.apply(parsed.getValues().get(0), context)).thenReturn(expr1); when(processor.apply(parsed.getValues().get(1), context)).thenReturn(expr2); when(processor.apply(parsed.getValues().get(2), context)).thenReturn(expr3); // When: final Expression rewritten = expressionRewriter.rewrite(parsed, context); // Then: assertThat( rewritten, equalTo(new InListExpression(parsed.getLocation(), ImmutableList.of(expr1, expr2, expr3))) ); }
public static void cleanDirectory(File directory) throws IOException { requireNonNull(directory, DIRECTORY_CAN_NOT_BE_NULL); if (!directory.exists()) { return; } cleanDirectoryImpl(directory.toPath()); }
@Test public void cleanDirectory_throws_IAE_if_argument_is_a_file() throws IOException { File file = temporaryFolder.newFile(); assertThatThrownBy(() -> FileUtils2.cleanDirectory(file)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("'" + file.getAbsolutePath() + "' is not a directory"); }
@Override public void deleteProject(Long id) { // 校验存在 validateProjectExists(id); // 删除 goViewProjectMapper.deleteById(id); }
@Test public void testDeleteProject_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> goViewProjectService.deleteProject(id), GO_VIEW_PROJECT_NOT_EXISTS); }
public String getPath(final String itemName) { return String.join("/", type, itemName); }
@Test void assertGetPath() { assertThat(converter.getPath("foo_table"), is("tables/foo_table")); }
Mono<Post> getById(UUID id) { return client.get() .uri(uriBuilder -> uriBuilder.path("/posts/{id}").build(id)) .accept(MediaType.APPLICATION_JSON) .exchangeToMono(response -> { if (response.statusCode().equals(HttpStatus.OK)) { return response.bodyToMono(Post.class); } return response.createError(); }); }
@SneakyThrows @Test public void testGetPostById() { var id = UUID.randomUUID(); var data = new Post(id, "title1", "content1", Status.DRAFT, LocalDateTime.now()); stubFor(get("/posts/" + id) .willReturn( aResponse() .withHeader("Content-Type", "application/json") .withResponseBody(Body.fromJsonBytes(Json.toByteArray(data))) ) ); postClient.getById(id) .as(StepVerifier::create) .consumeNextWith( post -> { assertThat(post.id()).isEqualTo(id); assertThat(post.title()).isEqualTo(data.title()); assertThat(post.content()).isEqualTo(data.content()); assertThat(post.status()).isEqualTo(data.status()); assertThat(post.createdAt()).isEqualTo(data.createdAt()); } ) .verifyComplete(); verify(getRequestedFor(urlEqualTo("/posts/" + id)) .withHeader("Accept", equalTo("application/json")) ); }
public void registerCommands(Map<String, CommandHandler> handlerMap) { if (handlerMap != null) { for (Entry<String, CommandHandler> e : handlerMap.entrySet()) { registerCommand(e.getKey(), e.getValue()); } } }
@Test public void testRegisterCommands() { Map<String, CommandHandler> handlerMap = null; // If handlerMap is null, no handler added in handlerMap httpServer.registerCommands(handlerMap); assertEquals(0, HttpServer.handlerMap.size()); // Add handler from CommandHandlerProvider handlerMap = CommandHandlerProvider.getInstance().namedHandlers(); httpServer.registerCommands(handlerMap); // Check same size assertEquals(handlerMap.size(), HttpServer.handlerMap.size()); // Check not same reference assertTrue(handlerMap != HttpServer.handlerMap); }
@Override @Deprecated public User getAdminUser() { return getRootUser().orElseThrow(() -> new IllegalStateException("Local admin user requested but root user is disabled in config.")); }
@Test public void testGetAdminUser() throws Exception { assertThat(userService.getAdminUser().getName()).isEqualTo(configuration.getRootUsername()); assertThat(userService.getAdminUser().getEmail()).isEqualTo(configuration.getRootEmail()); assertThat(userService.getAdminUser().getTimeZone()).isEqualTo(configuration.getRootTimeZone()); }
private static int interpolate(float fraction, long lowRgb, long highRgb) { float[] lowHsv = Color.RGBtoHSB(getRed(lowRgb), getGreen(lowRgb), getBlue(lowRgb), null); float[] highHsv = Color.RGBtoHSB(getRed(highRgb), getGreen(highRgb), getBlue(highRgb), null); float h = fraction * (highHsv[0] - lowHsv[0]) + lowHsv[0]; float s = fraction * (highHsv[1] - lowHsv[1]) + lowHsv[1]; float v = fraction * (highHsv[2] - lowHsv[2]) + lowHsv[2]; return Color.HSBtoRGB(h, s, v) & 0xFF_FF_FF; }
@Test public void testInterpolate() { assertEquals(color(0, 0, 255, color(toSlice("#000")), color(toSlice("#fff"))), 0x00_00_00); assertEquals(color(0.0f, 0.0f, 255.0f, color(toSlice("#000")), color(toSlice("#fff"))), 0x00_00_00); assertEquals(color(128, 0, 255, color(toSlice("#000")), color(toSlice("#fff"))), 0x80_80_80); assertEquals(color(255, 0, 255, color(toSlice("#000")), color(toSlice("#fff"))), 0xFF_FF_FF); assertEquals(color(-1, 42, 52, rgb(0xFF, 0, 0), rgb(0xFF, 0xFF, 0)), 0xFF_00_00); assertEquals(color(47, 42, 52, rgb(0xFF, 0, 0), rgb(0xFF, 0xFF, 0)), 0xFF_80_00); assertEquals(color(142, 42, 52, rgb(0xFF, 0, 0), rgb(0xFF, 0xFF, 0)), 0xFF_FF_00); assertEquals(color(-42, color(toSlice("#000")), color(toSlice("#fff"))), 0x00_00_00); assertEquals(color(0.0, color(toSlice("#000")), color(toSlice("#fff"))), 0x00_00_00); assertEquals(color(0.5, color(toSlice("#000")), color(toSlice("#fff"))), 0x80_80_80); assertEquals(color(1.0, color(toSlice("#000")), color(toSlice("#fff"))), 0xFF_FF_FF); assertEquals(color(42, color(toSlice("#000")), color(toSlice("#fff"))), 0xFF_FF_FF); assertEquals(color(1.0f, color(toSlice("#000")), color(toSlice("#fff"))), 0xFF_FF_FF); assertEquals(color(-0.0f, color(toSlice("#000")), color(toSlice("#fff"))), 0x00_00_00); assertEquals(color(0.0f, color(toSlice("#000")), color(toSlice("#fff"))), 0x00_00_00); }
public static void main(final String[] args) { SpringApplication.run(ComplexDemoApplication.class, args); }
@Test void checkPossibilityToSimplyStartAndRestartApplication() { this.configuration.getStorageInstance().stop(); ComplexDemoApplication.main(new String[]{}); }
public void register(RegisterRequest request) { Optional<User> userOptional = userRepository.findByIdentificationNumber(request.getIdentificationNumber()); if (userOptional.isPresent()) { throw GenericException.builder() .httpStatus(HttpStatus.BAD_REQUEST) .logMessage(this.getClass().getName() + ".register user already exists with identification number {0}", request.getIdentificationNumber()) .message(ErrorCode.USER_ALREADY_EXISTS) .build(); } User user = User.builder().identificationNumber(request.getIdentificationNumber()).firstname(request.getFirstName()).lastname(request.getLastName()).password(passwordEncoder.encode(request.getPassword())).build(); userRepository.save(user); }
@Test void register_userAlreadyExists() { // Arrange RegisterRequest request = new RegisterRequest("1234567890", "John", "Doe", "password"); User existingUser = new User("1234567890", "John", "Doe", "encodedPassword"); when(userRepository.findByIdentificationNumber(request.getIdentificationNumber())).thenReturn(Optional.of(existingUser)); // Act & Assert assertThrows(GenericException.class, () -> userService.register(request)); verify(userRepository, never()).save(any(User.class)); }
@Override public Long del(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key: keys) { write(key, LongCodec.INSTANCE, RedisCommands.DEL, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.writeAsync(key, StringCodec.INSTANCE, RedisCommands.DEL, key); } BatchResult<Long> b = (BatchResult<Long>) es.execute(); return b.getResponses().stream().collect(Collectors.summarizingLong(v -> v)).getSum(); }
@Test public void testDelPipeline() { byte[] k = "key".getBytes(); byte[] v = "val".getBytes(); connection.set(k, v); connection.openPipeline(); connection.get(k); connection.del(k); List<Object> results = connection.closePipeline(); byte[] val = (byte[])results.get(0); assertThat(val).isEqualTo(v); Long res = (Long) results.get(1); assertThat(res).isEqualTo(1); }