focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void putConnectorConfig(final String connName, final Map<String, String> config, final boolean allowReplace, final Callback<Created<ConnectorInfo>> callback) { putConnectorConfig(connName, config, null, allowReplace, callback); }
@Test public void testPutConnectorConfig() throws Exception { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); when(member.memberId()).thenReturn("leader"); expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectConfigRefreshAndSnapshot(SNAPSHOT); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); ArgumentCaptor<Callback<TargetState>> onStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { onStart.getValue().onCompletion(null, TargetState.STARTED); return true; }).when(worker).startConnector(eq(CONN1), eq(CONN1_CONFIG), any(), eq(herder), eq(TargetState.STARTED), onStart.capture()); expectExecuteTaskReconfiguration(true, conn1SinkConfig, invocation -> TASK_CONFIGS); expectMemberPoll(); // Should pick up original config FutureCallback<Map<String, String>> connectorConfigCb = new FutureCallback<>(); herder.connectorConfig(CONN1, connectorConfigCb); herder.tick(); assertTrue(connectorConfigCb.isDone()); assertEquals(CONN1_CONFIG, connectorConfigCb.get()); // Poll loop for second round of calls expectMemberEnsureActive(); ArgumentCaptor<Callback<ConfigInfos>> validateCallback = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { validateCallback.getValue().onCompletion(null, CONN1_CONFIG_INFOS); return null; }).when(herder).validateConnectorConfig(eq(CONN1_CONFIG_UPDATED), validateCallback.capture()); doAnswer(invocation -> { // Simulate response to writing config + waiting until end of log to be read configUpdateListener.onConnectorConfigUpdate(CONN1); return null; }).when(configBackingStore).putConnectorConfig(eq(CONN1), eq(CONN1_CONFIG_UPDATED), isNull()); // As a result of reconfig, should need to update snapshot. With only connector updates, we'll just restart // connector without rebalance when(configBackingStore.snapshot()).thenReturn(SNAPSHOT_UPDATED_CONN1_CONFIG); doNothing().when(worker).stopAndAwaitConnector(CONN1); ArgumentCaptor<Callback<TargetState>> onStart2 = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { onStart2.getValue().onCompletion(null, TargetState.STARTED); return true; }).when(worker).startConnector(eq(CONN1), eq(CONN1_CONFIG_UPDATED), any(), eq(herder), eq(TargetState.STARTED), onStart2.capture()); expectExecuteTaskReconfiguration(true, conn1SinkConfigUpdated, invocation -> TASK_CONFIGS); // Apply new config. FutureCallback<Herder.Created<ConnectorInfo>> putConfigCb = new FutureCallback<>(); herder.putConnectorConfig(CONN1, CONN1_CONFIG_UPDATED, true, putConfigCb); herder.tick(); assertTrue(putConfigCb.isDone()); ConnectorInfo updatedInfo = new ConnectorInfo(CONN1, CONN1_CONFIG_UPDATED, Arrays.asList(TASK0, TASK1, TASK2), ConnectorType.SOURCE); assertEquals(new Herder.Created<>(false, updatedInfo), putConfigCb.get()); // Third tick just to read the config - check config again to validate change connectorConfigCb = new FutureCallback<>(); herder.connectorConfig(CONN1, connectorConfigCb); herder.tick(); assertTrue(connectorConfigCb.isDone()); assertEquals(CONN1_CONFIG_UPDATED, connectorConfigCb.get()); // Once after initial rebalance and assignment; another after config update verify(worker, times(2)).startConnector(eq(CONN1), any(), any(), eq(herder), eq(TargetState.STARTED), any()); verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore); }
public static <R extends Request> Http2Headers toHttp2Headers(R request) throws Exception { URI uri = request.getURI(); URL url = new URL(uri.toString()); String method = request.getMethod(); String authority = url.getAuthority(); String path = url.getFile(); String scheme = uri.getScheme(); // RFC 2616, section 5.1.2: // Note that the absolute path cannot be empty; if none is present in the original URI, // it MUST be given as "/" (the server root). path = path.isEmpty() ? "/" : path; final Http2Headers headers = new DefaultHttp2Headers() .method(method) .authority(authority) .path(path) .scheme(scheme); for (Map.Entry<String, String> entry : request.getHeaders().entrySet()) { // Ignores HTTP/2 blacklisted headers if (HEADER_BLACKLIST.contains(entry.getKey().toLowerCase())) { continue; } // RFC 7540, section 8.1.2: // ... header field names MUST be converted to lowercase prior to their // encoding in HTTP/2. A request or response containing uppercase // header field names MUST be treated as malformed (Section 8.1.2.6). String name = entry.getKey().toLowerCase(); String value = entry.getValue(); headers.set(name, value == null ? "" : value); } // Split up cookies to allow for better header compression headers.set(HttpHeaderNames.COOKIE, request.getCookies()); return headers; }
@Test public void testStreamToHttp2HeadersCookies() throws Exception { StreamRequestBuilder streamRequestBuilder = new StreamRequestBuilder(new URI(ANY_URI)); IntStream.range(0, 10).forEach(i -> streamRequestBuilder.addCookie(ANY_COOKIE)); StreamRequest request = streamRequestBuilder.build( EntityStreams.newEntityStream(new ByteStringWriter(ByteString.copy(ANY_ENTITY.getBytes())))); Http2Headers headers = NettyRequestAdapter.toHttp2Headers(request); Assert.assertNotNull(headers); List<CharSequence> cookies = headers.getAll(HttpHeaderNames.COOKIE); Assert.assertNotNull(cookies); Assert.assertEquals(cookies.size(), 10); }
@Override public void close() { }
@Test public void shouldFail_hitRequestLimitRemote() throws ExecutionException, InterruptedException { // Given: when(locator.locate()).thenReturn(ImmutableList.of(ksqlNodeRemote)); transientQueryQueue = new TransientQueryQueue(OptionalInt.empty(), 1, 100); final PushRouting routing = new PushRouting(); // When: final PushConnectionsHandle handle = handlePushRouting(routing); context.runOnContext(v -> { remotePublisher.accept(REMOTE_ROW1); remotePublisher.accept(REMOTE_ROW2); }); // Then: Set<List<?>> rows = waitOnRows(1); handle.close(); assertThat(rows.contains(REMOTE_ROW1.getRow().get().getColumns()), is(true)); assertThat(handle.getError().getMessage(), containsString("Hit limit of request queue")); }
@VisibleForTesting protected void copyFromHost(MapHost host) throws IOException { // reset retryStartTime for a new host retryStartTime = 0; // Get completed maps on 'host' List<TaskAttemptID> maps = scheduler.getMapsForHost(host); // Sanity check to catch hosts with only 'OBSOLETE' maps, // especially at the tail of large jobs if (maps.size() == 0) { return; } if (LOG.isDebugEnabled()) { LOG.debug("Fetcher " + id + " going to fetch from " + host + " for: " + maps); } // List of maps to be fetched yet Set<TaskAttemptID> remaining = new HashSet<TaskAttemptID>(maps); // Construct the url and connect URL url = getMapOutputURL(host, maps); DataInputStream input = null; try { input = openShuffleUrl(host, remaining, url); if (input == null) { return; } // Loop through available map-outputs and fetch them // On any error, faildTasks is not null and we exit // after putting back the remaining maps to the // yet_to_be_fetched list and marking the failed tasks. TaskAttemptID[] failedTasks = null; while (!remaining.isEmpty() && failedTasks == null) { try { failedTasks = copyMapOutput(host, input, remaining, fetchRetryEnabled); } catch (IOException e) { IOUtils.cleanupWithLogger(LOG, input); // // Setup connection again if disconnected by NM connection.disconnect(); // Get map output from remaining tasks only. url = getMapOutputURL(host, remaining); input = openShuffleUrl(host, remaining, url); if (input == null) { return; } } } if(failedTasks != null && failedTasks.length > 0) { LOG.warn("copyMapOutput failed for tasks "+Arrays.toString(failedTasks)); scheduler.hostFailed(host.getHostName()); for(TaskAttemptID left: failedTasks) { scheduler.copyFailed(left, host, true, false); } } // Sanity check if (failedTasks == null && !remaining.isEmpty()) { throw new IOException("server didn't return all expected map outputs: " + remaining.size() + " left."); } input.close(); input = null; } finally { if (input != null) { IOUtils.cleanupWithLogger(LOG, input); input = null; } for (TaskAttemptID left : remaining) { scheduler.putBackKnownMapOutput(host, left); } } }
@Test(timeout=30000) public void testCopyFromHostConnectionTimeout() throws Exception { when(connection.getInputStream()).thenThrow( new SocketTimeoutException("This is a fake timeout :)")); Fetcher<Text,Text> underTest = new FakeFetcher<Text,Text>(job, id, ss, mm, r, metrics, except, key, connection); underTest.copyFromHost(host); verify(connection).addRequestProperty( SecureShuffleUtils.HTTP_HEADER_URL_HASH, encHash); verify(allErrs).increment(1); verify(ss).copyFailed(map1ID, host, false, false); verify(ss).copyFailed(map2ID, host, false, false); verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID)); verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID)); }
@Override public Set<IndexSet> getAll() { return ImmutableSet.copyOf(findAllMongoIndexSets()); }
@Test public void getAllShouldBeCachedForEmptyList() { final List<IndexSetConfig> indexSetConfigs = Collections.emptyList(); when(indexSetService.findAll()).thenReturn(indexSetConfigs); assertThat(this.indexSetRegistry.getAll()) .isNotNull() .isEmpty(); assertThat(this.indexSetRegistry.getAll()) .isNotNull() .isEmpty(); verify(indexSetService, times(1)).findAll(); }
@VisibleForTesting static List<Set<PlanFragmentId>> extractPhases(Collection<PlanFragment> fragments) { // Build a graph where the plan fragments are vertexes and the edges represent // a before -> after relationship. For example, a join hash build has an edge // to the join probe. Graph<PlanFragmentId, DefaultEdge> graph = new DefaultDirectedGraph<>(DefaultEdge.class); fragments.forEach(fragment -> graph.addVertex(fragment.getId())); Visitor visitor = new Visitor(fragments, graph); for (PlanFragment fragment : fragments) { visitor.processFragment(fragment.getId()); } // Computes all the strongly connected components of the directed graph. // These are the "phases" which hold the set of fragments that must be started // at the same time to avoid deadlock. List<Set<PlanFragmentId>> components = new KosarajuStrongConnectivityInspector<>(graph).stronglyConnectedSets(); Map<PlanFragmentId, Set<PlanFragmentId>> componentMembership = new HashMap<>(); for (Set<PlanFragmentId> component : components) { for (PlanFragmentId planFragmentId : component) { componentMembership.put(planFragmentId, component); } } // build graph of components (phases) Graph<Set<PlanFragmentId>, DefaultEdge> componentGraph = new DefaultDirectedGraph<>(DefaultEdge.class); components.forEach(componentGraph::addVertex); for (DefaultEdge edge : graph.edgeSet()) { PlanFragmentId source = graph.getEdgeSource(edge); PlanFragmentId target = graph.getEdgeTarget(edge); Set<PlanFragmentId> from = componentMembership.get(source); Set<PlanFragmentId> to = componentMembership.get(target); if (!from.equals(to)) { // the topological order iterator below doesn't include vertices that have self-edges, so don't add them componentGraph.addEdge(from, to); } } List<Set<PlanFragmentId>> schedulePhases = ImmutableList.copyOf(new TopologicalOrderIterator<>(componentGraph)); return schedulePhases; }
@Test public void testBroadcastJoin() { PlanFragment buildFragment = createTableScanPlanFragment("build"); PlanFragment joinFragment = createBroadcastJoinPlanFragment("join", buildFragment); List<Set<PlanFragmentId>> phases = PhasedExecutionSchedule.extractPhases(ImmutableList.of(joinFragment, buildFragment)); assertEquals(phases, ImmutableList.of(ImmutableSet.of(joinFragment.getId(), buildFragment.getId()))); }
public static String hmacSha512Hex(final String key, final String valueToDigest) { return getHmacHex(HmacAlgorithms.HMAC_SHA_512, key, valueToDigest); }
@Test public void testHmacSha512Hex() { assertEquals(HmacHexUtils.hmacSha512Hex("testKey", "testValue"), "99997ffdee76da2f016fe4ee9256c3361c7dc9f1588be5cabeca9e541f8224db00b10260f4885eaaf29edab66574237058d43f5644b47e0fc13e66b89dbcde68"); }
public static byte[] getNullableSizePrefixedArray(final ByteBuffer buffer) { final int size = buffer.getInt(); return getNullableArray(buffer, size); }
@Test public void getNullableSizePrefixedArrayExact() { byte[] input = {0, 0, 0, 2, 1, 0}; final ByteBuffer buffer = ByteBuffer.wrap(input); final byte[] array = Utils.getNullableSizePrefixedArray(buffer); assertArrayEquals(new byte[] {1, 0}, array); assertEquals(6, buffer.position()); assertFalse(buffer.hasRemaining()); }
static <E extends Enum<E>> int encodeReplaySessionStateChange( final UnsafeBuffer encodingBuffer, final int offset, final int captureLength, final int length, final E from, final E to, final long id, final long recordingId, final long position, final String reason) { int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length); encodingBuffer.putLong(offset + encodedLength, id, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodingBuffer.putLong(offset + encodedLength, recordingId, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodingBuffer.putLong(offset + encodedLength, position, LITTLE_ENDIAN); encodedLength += SIZE_OF_LONG; encodedLength += encodeStateChange(encodingBuffer, offset + encodedLength, from, to); encodedLength += encodeTrailingString(encodingBuffer, offset + encodedLength, captureLength + LOG_HEADER_LENGTH - encodedLength, reason); return encodedLength; }
@Test void testEncodeReplaySessionStateChange() { int offset = 24; final int length = replaySessionStateChangeLength(State.ALPHA, State.BETA, "reason"); final int captureLength = captureLength(length); encodeReplaySessionStateChange(buffer, offset, captureLength, length, State.ALPHA, State.BETA, 1, 2, 3, "reason"); assertEquals(captureLength, buffer.getInt(offset, LITTLE_ENDIAN)); assertEquals(length, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN)); assertNotEquals(0, buffer.getLong(offset + 2 * SIZE_OF_INT, LITTLE_ENDIAN)); offset += LOG_HEADER_LENGTH; assertEquals(1, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals(2, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals(3, buffer.getLong(offset, LITTLE_ENDIAN)); offset += SIZE_OF_LONG; assertEquals("ALPHA -> BETA", buffer.getStringAscii(offset)); offset += SIZE_OF_INT + "ALPHA -> BETA".length(); assertEquals("reason", buffer.getStringAscii(offset)); }
Modification modificationFromDescription(String output, ConsoleResult result) throws P4OutputParseException { String[] parts = StringUtils.splitByWholeSeparator(output, SEPARATOR); Pattern pattern = Pattern.compile(DESCRIBE_OUTPUT_PATTERN, Pattern.MULTILINE); Matcher matcher = pattern.matcher(parts[0]); if (matcher.find()) { Modification modification = new Modification(); parseFistline(modification, matcher.group(1), result); parseComment(matcher, modification); parseAffectedFiles(parts, modification); return modification; } throw new P4OutputParseException("Could not parse P4 description: " + output); }
@Test void shouldThrowExceptionWhenCannotParseChanges() { String line = "Some line I don't understand"; try { parser.modificationFromDescription(line, new ConsoleResult(0, new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), new ArrayList<>())); fail("Should throw exception if can't parse the description"); } catch (P4OutputParseException expected) { assertThat(expected.getMessage()).contains(line); } }
@Override public void accept(ServerWebExchange exchange, CachedResponse cachedResponse) { ServerHttpResponse response = exchange.getResponse(); long calculatedMaxAgeInSeconds = calculateMaxAgeInSeconds(exchange.getRequest(), cachedResponse, configuredTimeToLive); rewriteCacheControlMaxAge(response.getHeaders(), calculatedMaxAgeInSeconds); }
@Test void maxAgeIsZero_whenRequestIsCachedMoreThanTimeToLive() { Duration timeToLive = Duration.ofSeconds(SECONDS_LATER / 2); // To be staled after // SECONDS_LATER // passed CachedResponse inputCachedResponse = CachedResponse.create(HttpStatus.OK).timestamp(clock.instant()).build(); SetMaxAgeHeaderAfterCacheExchangeMutator toTest = new SetMaxAgeHeaderAfterCacheExchangeMutator(timeToLive, clock, false); toTest.accept(inputExchange, inputCachedResponse); Optional<Long> firstMaxAgeSeconds = parseMaxAge(inputExchange.getResponse()); SetMaxAgeHeaderAfterCacheExchangeMutator toTestSecondsLater = new SetMaxAgeHeaderAfterCacheExchangeMutator( timeToLive, clockSecondsLater, false); toTestSecondsLater.accept(inputExchange, inputCachedResponse); Optional<Long> secondMaxAgeSeconds = parseMaxAge(inputExchange.getResponse()); assertThat(firstMaxAgeSeconds).contains(timeToLive.getSeconds()); assertThat(secondMaxAgeSeconds).contains(0L); }
@Override public void process(CruiseConfig cruiseConfig) { for (PipelineConfig pipelineConfig : cruiseConfig.getAllPipelineConfigs()) { if (pipelineConfig.hasTemplate()) { CaseInsensitiveString templateName = pipelineConfig.getTemplateName(); PipelineTemplateConfig pipelineTemplate = cruiseConfig.findTemplate(templateName); pipelineConfig.validateTemplate(pipelineTemplate); if (pipelineConfig.errors().isEmpty() && !pipelineConfig.hasTemplateApplied()) { pipelineConfig.usingTemplate(pipelineTemplate); } } } }
@Test public void shouldNotThrowAnExceptionWhenAPipelineHasAtLeastOneStage() throws Exception { PipelineConfig pipelineConfig = pipelineConfigWithGivenStages("foo"); preprocessor.process(new BasicCruiseConfig(new BasicPipelineConfigs(pipelineConfig))); }
public int validate( final ServiceContext serviceContext, final List<ParsedStatement> statements, final SessionProperties sessionProperties, final String sql ) { requireSandbox(serviceContext); final KsqlExecutionContext ctx = requireSandbox(snapshotSupplier.apply(serviceContext)); final Injector injector = injectorFactory.apply(ctx, serviceContext); final KsqlConfig ksqlConfig = ctx.getKsqlConfig(); int numPersistentQueries = 0; for (final ParsedStatement parsed : statements) { final PreparedStatement<?> prepared = ctx.prepare( parsed, (isVariableSubstitutionEnabled(sessionProperties, ksqlConfig) ? sessionProperties.getSessionVariables() : Collections.emptyMap()) ); final ConfiguredStatement<?> configured = ConfiguredStatement.of(prepared, SessionConfig.of(ksqlConfig, sessionProperties.getMutableScopedProperties()) ); final int currNumPersistentQueries = validate( serviceContext, configured, sessionProperties, ctx, injector ); numPersistentQueries += currNumPersistentQueries; if (currNumPersistentQueries > 0 && QueryCapacityUtil.exceedsPersistentQueryCapacity(ctx, ksqlConfig)) { QueryCapacityUtil.throwTooManyActivePersistentQueriesException(ctx, ksqlConfig, sql); } } return numPersistentQueries; }
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") @Test public void shouldCallPrepareStatementWithSessionVariables() { // Given givenRequestValidator(ImmutableMap.of(CreateStream.class, statementValidator)); final Map<String, String> sessionVariables = ImmutableMap.of("a", "1"); when(sessionProperties.getSessionVariables()).thenReturn(sessionVariables); when(ksqlConfig.getBoolean(KsqlConfig.KSQL_VARIABLE_SUBSTITUTION_ENABLE)).thenReturn(true); // When final List<ParsedStatement> statements = givenParsed(SOME_STREAM_SQL); validator.validate(serviceContext, statements, sessionProperties, "sql"); // Then verify(sandboxEngine).prepare(statements.get(0), sessionVariables); verify(sessionProperties).getSessionVariables(); }
@VisibleForTesting public void validateDictTypeExists(String type) { DictTypeDO dictType = dictTypeService.getDictType(type); if (dictType == null) { throw exception(DICT_TYPE_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dictType.getStatus())) { throw exception(DICT_TYPE_NOT_ENABLE); } }
@Test public void testValidateDictTypeExists_success() { // mock 方法,数据类型被禁用 String type = randomString(); when(dictTypeService.getDictType(eq(type))).thenReturn(randomDictTypeDO(type)); // 调用, 成功 dictDataService.validateDictTypeExists(type); }
@Override public void start() { Context context = new Context(); definitions.forEach(definition -> definition.define(context)); Context coreContext = new Context(); corePageDefinitions.stream() .map(CorePageDefinition::getPageDefinition) .forEach(definition -> definition.define(coreContext)); context.getPages().forEach(this::checkPluginExists); coreContext.getPages().forEach(this::checkCoreExtensionExists); pages = new ArrayList<>(); pages.addAll(context.getPages()); pages.addAll(coreContext.getPages()); pages.sort(comparing(Page::getKey)); }
@Test public void fail_if_page_with_unknown_plugin() { PageDefinition governance = context -> context.addPage(Page.builder("governance/my_key").setName("N1").build()); PageDefinition plugin42 = context -> context.addPage(Page.builder("plugin_42/my_key").setName("N2").build()); pluginRepository = mock(PluginRepository.class); when(pluginRepository.hasPlugin("governance")).thenReturn(true); underTest = new PageRepository(pluginRepository, coreExtensionRepository, new PageDefinition[]{governance, plugin42}); assertThatThrownBy(() -> underTest.start()) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Page 'N2' references plugin 'plugin_42' that does not exist"); }
public void handleUpdatedHostInfo(NodeInfo node, HostInfo hostInfo) { if ( ! node.isDistributor()) return; final int hostVersion; if (hostInfo.getClusterStateVersionOrNull() == null) { // TODO: Consider logging a warning in the future (>5.36). // For now, a missing cluster state version probably means the content // node has not been updated yet. return; } else { hostVersion = hostInfo.getClusterStateVersionOrNull(); } int currentStateVersion = clusterState.getVersion(); if (hostVersion != currentStateVersion) { // The distributor may be old (null), or the distributor may not have updated // to the latest state version just yet. We log here with fine, because it may // also be a symptom of something wrong. log.log(Level.FINE, () -> "Current state version is " + currentStateVersion + ", while host info received from distributor " + node.getNodeIndex() + " is " + hostVersion); return; } statsAggregator.updateForDistributor(node.getNodeIndex(), StorageNodeStatsBridge.generate(hostInfo.getDistributor())); }
@Test void testSuccessCase() { when(nodeInfo.isDistributor()).thenReturn(true); HostInfo hostInfo = HostInfo.createHostInfo( "{" + " \"cluster-state-version\": 101," + " \"distributor\": {\n" + " \"storage-nodes\": [\n" + " {\n" + " \"node-index\": 3\n" + " }\n" + " ]}}"); when(nodeInfo.getNodeIndex()).thenReturn(3); when(clusterState.getVersion()).thenReturn(101); clusterStateView.handleUpdatedHostInfo(nodeInfo, hostInfo); verify(statsAggregator).updateForDistributor(3, StorageNodeStatsBridge.generate(hostInfo.getDistributor())); }
@Delete(uri = "{namespace}/{id}") @ExecuteOn(TaskExecutors.IO) @Operation(tags = {"Flows"}, summary = "Delete a flow") @ApiResponse(responseCode = "204", description = "On success") public HttpResponse<Void> delete( @Parameter(description = "The flow namespace") @PathVariable String namespace, @Parameter(description = "The flow id") @PathVariable String id ) { Optional<Flow> flow = flowRepository.findById(tenantService.resolveTenant(), namespace, id); if (flow.isPresent()) { flowRepository.delete(flow.get()); return HttpResponse.status(HttpStatus.NO_CONTENT); } else { return HttpResponse.status(HttpStatus.NOT_FOUND); } }
@Test void updateNamespaceAsString() { // initial création String flows = String.join("---\n", Arrays.asList( generateFlowAsString("flow1","io.kestra.updatenamespace","a"), generateFlowAsString("flow2","io.kestra.updatenamespace","a"), generateFlowAsString("flow3","io.kestra.updatenamespace","a") )); List<FlowWithSource> updated = client.toBlocking() .retrieve( HttpRequest.POST("/api/v1/flows/io.kestra.updatenamespace", flows) .contentType(MediaType.APPLICATION_YAML), Argument.listOf(FlowWithSource.class) ); assertThat(updated.size(), is(3)); client.toBlocking().exchange(DELETE("/api/v1/flows/io.kestra.updatenamespace/flow1")); client.toBlocking().exchange(DELETE("/api/v1/flows/io.kestra.updatenamespace/flow2")); client.toBlocking().exchange(DELETE("/api/v1/flows/io.kestra.updatenamespace/flow3")); }
@Override public void setDeepLinkCompletion(SensorsDataDeferredDeepLinkCallback callback) { }
@Test public void setDeepLinkCompletion() { mSensorsAPI.setDeepLinkCompletion(new SensorsDataDeferredDeepLinkCallback() { @Override public boolean onReceive(SADeepLinkObject saDeepLinkObject) { Assert.fail(); return false; } }); }
protected void reportVerifyMessageResult(ProxyContext ctx, Status status, VerifyMessageResult request) { String nonce = request.getNonce(); CompletableFuture<ProxyRelayResult<ConsumeMessageDirectlyResult>> responseFuture = this.grpcChannelManager.getAndRemoveResponseFuture(nonce); if (responseFuture != null) { try { ConsumeMessageDirectlyResult result = this.buildConsumeMessageDirectlyResult(status, request); responseFuture.complete(new ProxyRelayResult<>(ResponseCode.SUCCESS, "", result)); } catch (Throwable t) { responseFuture.completeExceptionally(t); } } }
@Test public void testReportVerifyMessageResult() { this.clientActivity = new ClientActivity(this.messagingProcessor, this.grpcClientSettingsManager, grpcChannelManagerMock); String nonce = "123"; when(grpcChannelManagerMock.getAndRemoveResponseFuture(anyString())).thenReturn((CompletableFuture) resultFutureMock); ProxyContext context = createContext(); ContextStreamObserver<TelemetryCommand> streamObserver = clientActivity.telemetry(new StreamObserver<TelemetryCommand>() { @Override public void onNext(TelemetryCommand value) { } @Override public void onError(Throwable t) { } @Override public void onCompleted() { } }); streamObserver.onNext(context, TelemetryCommand.newBuilder() .setVerifyMessageResult(VerifyMessageResult.newBuilder() .setNonce(nonce) .build()) .setStatus(ResponseBuilder.getInstance().buildStatus(Code.OK, Code.OK.name())) .build()); verify(resultFutureMock, times(1)).complete(resultArgumentCaptor.capture()); ProxyRelayResult<ConsumeMessageDirectlyResult> result = resultArgumentCaptor.getValue(); assertThat(result.getCode()).isEqualTo(ResponseCode.SUCCESS); assertThat(result.getResult().getConsumeResult()).isEqualTo(CMResult.CR_SUCCESS); }
public QuotaCounts negation() { QuotaCounts ret = new QuotaCounts.Builder().quotaCount(this).build(); ret.nsSsCounts = modify(ret.nsSsCounts, ec -> ec.negation()); ret.tsCounts = modify(ret.tsCounts, ec -> ec.negation()); return ret; }
@Test public void testNegation() throws Exception { QuotaCounts qc = new QuotaCounts.Builder() .nameSpace(HdfsConstants.QUOTA_RESET) .storageSpace(HdfsConstants.QUOTA_RESET) .typeSpaces(HdfsConstants.QUOTA_RESET).build(); qc = qc.negation(); assertEquals(1, qc.getNameSpace()); assertEquals(1, qc.getStorageSpace()); for (StorageType t : StorageType.values()) { assertEquals(1, qc.getTypeSpace(t)); } }
@Override public void removeConfigInfo4Beta(final String dataId, final String group, final String tenant) { final String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; ConfigInfoStateWrapper configInfo = findConfigInfo4BetaState(dataId, group, tenant); if (configInfo != null) { try { ConfigInfoBetaMapper configInfoBetaMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO_BETA); final String sql = configInfoBetaMapper.delete(Arrays.asList("data_id", "group_id", "tenant_id")); final Object[] args = new Object[] {dataId, group, tenantTmp}; EmbeddedStorageContextUtils.onDeleteConfigBetaInfo(tenantTmp, group, dataId, System.currentTimeMillis()); EmbeddedStorageContextHolder.addSqlContext(sql, args); boolean result = databaseOperate.update(EmbeddedStorageContextHolder.getCurrentSqlContext()); if (!result) { throw new NacosConfigException("[Tag] Configuration deletion failed"); } } finally { EmbeddedStorageContextHolder.cleanAllContext(); } } }
@Test void testRemoveConfigInfo4Beta() { String dataId = "dataId456789"; String group = "group4567"; String tenant = "tenant56789o0"; //mock exist beta ConfigInfoStateWrapper mockedConfigInfoStateWrapper = new ConfigInfoStateWrapper(); mockedConfigInfoStateWrapper.setDataId(dataId); mockedConfigInfoStateWrapper.setGroup(group); mockedConfigInfoStateWrapper.setTenant(tenant); mockedConfigInfoStateWrapper.setId(123456L); mockedConfigInfoStateWrapper.setLastModified(System.currentTimeMillis()); Mockito.when( databaseOperate.queryOne(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))) .thenReturn(mockedConfigInfoStateWrapper); //mock remove ok Mockito.when(databaseOperate.update(any(List.class))).thenReturn(true); embeddedConfigInfoBetaPersistService.removeConfigInfo4Beta(dataId, group, tenant); //verity embeddedStorageContextHolderMockedStatic.verify( () -> EmbeddedStorageContextHolder.addSqlContext(anyString(), eq(dataId), eq(group), eq(tenant)), times(1)); }
@Override public String toString() { return "{" + sourceFile + "," + extractionPath + "," + permissions + "," + modificationTime + "," + ownership + "}"; }
@Test public void testToString() { Assert.assertEquals( "{a" + File.separator + "path,/an/absolute/unix/path,333,1970-01-01T00:00:00Z,0:0}", new FileEntry( Paths.get("a/path"), AbsoluteUnixPath.get("/an/absolute/unix/path"), FilePermissions.fromOctalString("333"), Instant.EPOCH, "0:0") .toString()); }
public static boolean endWith(CharSequence str, char c) { if (isEmpty(str)) { return false; } return c == str.charAt(str.length() - 1); }
@Test public void endWithTest() { assertFalse(CharSequenceUtil.endWith("123", "123", false, true)); assertFalse(CharSequenceUtil.endWith(null, null, false, true)); assertFalse(CharSequenceUtil.endWith("abc", "abc", true, true)); assertTrue(CharSequenceUtil.endWithIgnoreCase(null, null)); assertFalse(CharSequenceUtil.endWithIgnoreCase(null, "abc")); assertFalse(CharSequenceUtil.endWithIgnoreCase("abcdef", null)); assertTrue(CharSequenceUtil.endWithIgnoreCase("abcdef", "def")); assertTrue(CharSequenceUtil.endWithIgnoreCase("ABCDEF", "def")); }
@Override public Iterable<FlowEntry> getFlowEntries(DeviceId deviceId) { checkPermission(FLOWRULE_READ); checkNotNull(deviceId, DEVICE_ID_NULL); return store.getFlowEntries(deviceId); }
@Test public void getFlowEntries() { assertTrue("store should be empty", Sets.newHashSet(service.getFlowEntries(DID)).isEmpty()); FlowRule f1 = addFlowRule(1); FlowRule f2 = addFlowRule(2); FlowEntry fe1 = new DefaultFlowEntry(f1); FlowEntry fe2 = new DefaultFlowEntry(f2); assertEquals("2 rules should exist", 2, flowCount()); providerService.pushFlowMetrics(DID, ImmutableList.of(fe1, fe2)); validateEvents(RULE_ADD_REQUESTED, RULE_ADD_REQUESTED, RULE_ADDED, RULE_ADDED); addFlowRule(1); System.err.println("events :" + listener.events); assertEquals("should still be 2 rules", 2, flowCount()); providerService.pushFlowMetrics(DID, ImmutableList.of(fe1)); validateEvents(RULE_UPDATED, RULE_UPDATED); }
public FinalizedFeatures( MetadataVersion metadataVersion, Map<String, Short> finalizedFeatures, long finalizedFeaturesEpoch, boolean kraftMode ) { this.metadataVersion = metadataVersion; this.finalizedFeatures = new HashMap<>(finalizedFeatures); this.finalizedFeaturesEpoch = finalizedFeaturesEpoch; // In KRaft mode, we always include the metadata version in the features map. // In ZK mode, we never include it. if (kraftMode) { this.finalizedFeatures.put(MetadataVersion.FEATURE_NAME, metadataVersion.featureLevel()); } else { this.finalizedFeatures.remove(MetadataVersion.FEATURE_NAME); } }
@Test public void testZkModeFeatures() { FinalizedFeatures finalizedFeatures = new FinalizedFeatures(MINIMUM_KRAFT_VERSION, Collections.singletonMap("foo", (short) 2), 123, false); assertNull(finalizedFeatures.finalizedFeatures().get(FEATURE_NAME)); assertEquals((short) 2, finalizedFeatures.finalizedFeatures().get("foo")); assertEquals(1, finalizedFeatures.finalizedFeatures().size()); }
public EdgeResult convertForViaWays(LongArrayList fromWays, LongArrayList viaWays, LongArrayList toWays) throws OSMRestrictionException { if (fromWays.isEmpty() || toWays.isEmpty() || viaWays.isEmpty()) throw new IllegalArgumentException("There must be at least one from-, via- and to-way"); if (fromWays.size() > 1 && toWays.size() > 1) throw new IllegalArgumentException("There can only be multiple from- or to-ways, but not both"); List<IntArrayList> solutions = new ArrayList<>(); for (LongCursor fromWay : fromWays) for (LongCursor toWay : toWays) findEdgeChain(fromWay.value, viaWays, toWay.value, solutions); if (solutions.size() < fromWays.size() * toWays.size()) throw new OSMRestrictionException("has disconnected member ways"); else if (solutions.size() > fromWays.size() * toWays.size()) throw new OSMRestrictionException("has member ways that do not form a unique path"); return buildResult(solutions, new EdgeResult(fromWays.size(), viaWays.size(), toWays.size())); }
@Test void convertForViaWays_reorderEdges() throws OSMRestrictionException { BaseGraph graph = new BaseGraph.Builder(1).create(); graph.edge(0, 1); graph.edge(1, 2); // the next two edges are given in the 'wrong' order graph.edge(3, 4); graph.edge(2, 3); graph.edge(4, 5); graph.edge(5, 6); LongFunction<Iterator<IntCursor>> edgesByWay = way -> { // way 1 is split into the four edges 1-4 if (way == 1) return IntArrayList.from(1, 2, 3, 4).iterator(); else if (way == 0) return IntArrayList.from(0).iterator(); else if (way == 2) return IntArrayList.from(5).iterator(); else throw new IllegalArgumentException(); }; WayToEdgeConverter.EdgeResult edgeResult = new WayToEdgeConverter(graph, edgesByWay).convertForViaWays(ways(0), ways(1), ways(2)); assertEquals(IntArrayList.from(1, 3, 2, 4), edgeResult.getViaEdges()); assertEquals(IntArrayList.from(1, 2, 3, 4, 5), edgeResult.getNodes()); }
static void schedule(CapacityScheduler cs) throws InterruptedException{ // First randomize the start point int current = 0; Collection<FiCaSchedulerNode> nodes = cs.nodeTracker.getAllNodes(); // If nodes size is 0 (when there are no node managers registered, // we can return from here itself. int nodeSize = nodes.size(); if(nodeSize == 0) { return; } if (!cs.multiNodePlacementEnabled) { int start = random.nextInt(nodeSize); boolean printSkippedNodeLogging = isPrintSkippedNodeLogging(cs); // Allocate containers of node [start, end) for (FiCaSchedulerNode node : nodes) { if (current++ >= start) { if (shouldSkipNodeSchedule(node, cs, printSkippedNodeLogging)) { continue; } cs.allocateContainersToNode(node.getNodeID(), false); } } current = 0; // Allocate containers of node [0, start) for (FiCaSchedulerNode node : nodes) { if (current++ > start) { break; } if (shouldSkipNodeSchedule(node, cs, printSkippedNodeLogging)) { continue; } cs.allocateContainersToNode(node.getNodeID(), false); } if (printSkippedNodeLogging) { cs.printedVerboseLoggingForAsyncScheduling = true; } } else { // Get all partitions List<String> partitions = cs.nodeTracker.getPartitions(); int partitionSize = partitions.size(); // First randomize the start point int start = random.nextInt(partitionSize); // Allocate containers of partition [start, end) for (String partition : partitions) { if (current++ >= start) { CandidateNodeSet<FiCaSchedulerNode> candidates = cs.getCandidateNodeSet(partition); if (candidates == null) { continue; } cs.allocateContainersToNode(candidates, false); } } current = 0; // Allocate containers of partition [0, start) for (String partition : partitions) { if (current++ > start) { break; } CandidateNodeSet<FiCaSchedulerNode> candidates = cs.getCandidateNodeSet(partition); if (candidates == null) { continue; } cs.allocateContainersToNode(candidates, false); } } Thread.sleep(cs.getAsyncScheduleInterval()); }
@Test public void testResourceOverCommit() throws Exception { Configuration conf = new Configuration(); conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); MockRM rm = new MockRM(conf); rm.start(); ResourceScheduler scheduler = rm.getResourceScheduler(); MockNM nm = rm.registerNode("127.0.0.1:1234", 4 * GB); NodeId nmId = nm.getNodeId(); RMApp app = MockRMAppSubmitter.submitWithMemory(2048, rm); // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm nm.nodeHeartbeat(true); RMAppAttempt attempt1 = app.getCurrentAppAttempt(); MockAM am = rm.sendAMLaunched(attempt1.getAppAttemptId()); am.registerAppAttempt(); assertMemory(scheduler, nmId, 2 * GB, 2 * GB); // add request for 1 container of 2 GB am.addRequests(new String[] {"127.0.0.1", "127.0.0.2"}, 2 * GB, 1, 1); AllocateResponse alloc1Response = am.schedule(); // send the request // kick the scheduler, 2 GB given to AM1, resource remaining 0 nm.nodeHeartbeat(true); while (alloc1Response.getAllocatedContainers().isEmpty()) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(100); alloc1Response = am.schedule(); } List<Container> allocated1 = alloc1Response.getAllocatedContainers(); assertEquals(1, allocated1.size()); Container c1 = allocated1.get(0); assertEquals(2 * GB, c1.getResource().getMemorySize()); assertEquals(nmId, c1.getNodeId()); // check node report, 4 GB used and 0 GB available assertMemory(scheduler, nmId, 4 * GB, 0); nm.nodeHeartbeat(true); assertEquals(4 * GB, nm.getCapability().getMemorySize()); // update node resource to 2 GB, so resource is over-consumed updateNodeResource(rm, nmId, 2 * GB, 2, -1); // the used resource should still 4 GB and negative available resource waitMemory(scheduler, nmId, 4 * GB, -2 * GB, 200, 5 * 1000); // check that we did not get a preemption requests assertNoPreemption(am.schedule().getPreemptionMessage()); // check that the NM got the updated resources nm.nodeHeartbeat(true); assertEquals(2 * GB, nm.getCapability().getMemorySize()); // check container can complete successfully with resource over-commitment ContainerStatus containerStatus = BuilderUtils.newContainerStatus( c1.getId(), ContainerState.COMPLETE, "", 0, c1.getResource()); nm.containerStatus(containerStatus); LOG.info("Waiting for containers to be finished for app 1..."); GenericTestUtils.waitFor( () -> attempt1.getJustFinishedContainers().size() == 1, 100, 2000); assertEquals(1, am.schedule().getCompletedContainersStatuses().size()); assertMemory(scheduler, nmId, 2 * GB, 0); // verify no NPE is trigger in schedule after resource is updated am.addRequests(new String[] {"127.0.0.1", "127.0.0.2"}, 3 * GB, 1, 1); AllocateResponse allocResponse2 = am.schedule(); assertTrue("Shouldn't have enough resource to allocate containers", allocResponse2.getAllocatedContainers().isEmpty()); // try 10 times as scheduling is an async process for (int i = 0; i < 10; i++) { Thread.sleep(100); allocResponse2 = am.schedule(); assertTrue("Shouldn't have enough resource to allocate containers", allocResponse2.getAllocatedContainers().isEmpty()); } // increase the resources again to 5 GB to schedule the 3GB container updateNodeResource(rm, nmId, 5 * GB, 2, -1); waitMemory(scheduler, nmId, 2 * GB, 3 * GB, 100, 5 * 1000); // kick the scheduling and check it took effect nm.nodeHeartbeat(true); while (allocResponse2.getAllocatedContainers().isEmpty()) { LOG.info("Waiting for containers to be created for app 1..."); Thread.sleep(100); allocResponse2 = am.schedule(); } assertEquals(1, allocResponse2.getAllocatedContainers().size()); Container c2 = allocResponse2.getAllocatedContainers().get(0); assertEquals(3 * GB, c2.getResource().getMemorySize()); assertEquals(nmId, c2.getNodeId()); assertMemory(scheduler, nmId, 5 * GB, 0); // reduce the resources and trigger a preempt request to the AM for c2 updateNodeResource(rm, nmId, 3 * GB, 2, 2 * 1000); waitMemory(scheduler, nmId, 5 * GB, -2 * GB, 200, 5 * 1000); PreemptionMessage preemptMsg = am.schedule().getPreemptionMessage(); assertPreemption(c2.getId(), preemptMsg); // increasing the resources again, should stop killing the containers updateNodeResource(rm, nmId, 5 * GB, 2, -1); waitMemory(scheduler, nmId, 5 * GB, 0, 200, 5 * 1000); Thread.sleep(3 * 1000); assertMemory(scheduler, nmId, 5 * GB, 0); // reduce the resources again to trigger a preempt request to the AM for c2 long t0 = Time.now(); updateNodeResource(rm, nmId, 3 * GB, 2, 2 * 1000); waitMemory(scheduler, nmId, 5 * GB, -2 * GB, 200, 5 * 1000); preemptMsg = am.schedule().getPreemptionMessage(); assertPreemption(c2.getId(), preemptMsg); // wait until the scheduler kills the container GenericTestUtils.waitFor(() -> { try { nm.nodeHeartbeat(true); // trigger preemption in the NM } catch (Exception e) { LOG.error("Cannot heartbeat", e); } SchedulerNodeReport report = scheduler.getNodeReport(nmId); return report.getAvailableResource().getMemorySize() > 0; }, 200, 5 * 1000); assertMemory(scheduler, nmId, 2 * GB, 1 * GB); List<ContainerStatus> completedContainers = am.schedule().getCompletedContainersStatuses(); assertEquals(1, completedContainers.size()); ContainerStatus c2status = completedContainers.get(0); assertContainerKilled(c2.getId(), c2status); assertTime(2000, Time.now() - t0); rm.stop(); }
public static <T> String render(ClassPluginDocumentation<T> classPluginDocumentation) throws IOException { return render("task", JacksonMapper.toMap(classPluginDocumentation)); }
@Test void tasks() throws URISyntaxException, IOException { Path plugins = Paths.get(Objects.requireNonNull(ClassPluginDocumentationTest.class.getClassLoader().getResource("plugins")).toURI()); PluginScanner pluginScanner = new PluginScanner(ClassPluginDocumentationTest.class.getClassLoader()); List<RegisteredPlugin> scan = pluginScanner.scan(plugins); assertThat(scan.size(), is(1)); ClassPluginDocumentation<? extends Task> doc = ClassPluginDocumentation.of(jsonSchemaGenerator, scan.getFirst(), scan.getFirst().getTasks().getFirst(), Task.class); String render = DocumentationGenerator.render(doc); assertThat(render, containsString("ExampleTask")); assertThat(render, containsString("description: \"Short description for this task\"")); assertThat(render, containsString("`VALUE_1`")); assertThat(render, containsString("`VALUE_2`")); assertThat(render, containsString("This plugin is exclusively available on the Cloud and Enterprise editions of Kestra.")); }
@Override public ParamCheckResponse checkParamInfoList(List<ParamInfo> paramInfos) { ParamCheckResponse paramCheckResponse = new ParamCheckResponse(); if (paramInfos == null) { paramCheckResponse.setSuccess(true); return paramCheckResponse; } for (ParamInfo paramInfo : paramInfos) { paramCheckResponse = checkParamInfoFormat(paramInfo); if (!paramCheckResponse.isSuccess()) { return paramCheckResponse; } } paramCheckResponse.setSuccess(true); return paramCheckResponse; }
@Test void testCheckParamInfoForNamespaceShowName() { ParamInfo paramInfo = new ParamInfo(); ArrayList<ParamInfo> paramInfos = new ArrayList<>(); paramInfos.add(paramInfo); // Max Length String namespaceShowName = buildStringLength(257); paramInfo.setNamespaceShowName(namespaceShowName); ParamCheckResponse actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'namespaceShowName' is illegal, the param length should not exceed 256.", actual.getMessage()); // Pattern paramInfo.setNamespaceShowName("hsbfkj@$!#khdkad"); actual = paramChecker.checkParamInfoList(paramInfos); assertFalse(actual.isSuccess()); assertEquals("Param 'namespaceShowName' is illegal, illegal characters should not appear in the param.", actual.getMessage()); // Success paramInfo.setNamespaceShowName("测试"); actual = paramChecker.checkParamInfoList(paramInfos); assertTrue(actual.isSuccess()); }
@Override public boolean equals( Object o ) { if ( this == o ) { return true; } if ( o == null || getClass() != o.getClass() ) { return false; } SlaveStepCopyPartitionDistribution that = (SlaveStepCopyPartitionDistribution) o; return Objects.equals( distribution, that.distribution ) && Objects.equals( originalPartitionSchemas, that.originalPartitionSchemas ); }
@Test public void equalsNullTest() { Assert.assertFalse( slaveStep.equals( null ) ); }
public static boolean isMulticastAddress(String host) { int i = host.indexOf('.'); if (i > 0) { String prefix = host.substring(0, i); if (StringUtils.isNumber(prefix)) { int p = Integer.parseInt(prefix); return p >= 224 && p <= 239; } } return false; }
@Test void testIsMulticastAddress() { assertTrue(NetUtils.isMulticastAddress("224.0.0.1")); assertFalse(NetUtils.isMulticastAddress("127.0.0.1")); }
@Override public boolean accept(final Path file) { if(pattern.matcher(file.getName()).matches()) { return false; } if(file.getType().contains(Path.Type.upload)) { return false; } if(file.attributes().isDuplicate()) { return false; } if(file.attributes().isHidden()) { return false; } return true; }
@Test public void testAccept() { final DefaultBrowserFilter f = new DefaultBrowserFilter(); assertFalse(f.accept(new Path(".f", EnumSet.of(Path.Type.file)))); assertTrue(f.accept(new Path("f.f", EnumSet.of(Path.Type.file)))); final Path d = new Path("f.f", EnumSet.of(Path.Type.file)); d.attributes().setDuplicate(true); assertFalse(f.accept(d)); }
public ClientTelemetrySender telemetrySender() { return clientTelemetrySender; }
@Test public void testCreateRequestSubscriptionNeededAfterExistingSubscription() { ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); Optional<AbstractRequest.Builder<?>> requestOptional = telemetrySender.createRequest(); assertNotNull(requestOptional); assertTrue(requestOptional.isPresent()); assertInstanceOf(GetTelemetrySubscriptionsRequest.class, requestOptional.get().build()); GetTelemetrySubscriptionsRequest request = (GetTelemetrySubscriptionsRequest) requestOptional.get().build(); GetTelemetrySubscriptionsRequest expectedResult = new GetTelemetrySubscriptionsRequest.Builder( new GetTelemetrySubscriptionsRequestData().setClientInstanceId(subscription.clientInstanceId()), true).build(); assertEquals(expectedResult.data(), request.data()); assertEquals(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS, telemetrySender.state()); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeOutRangeScale() { FunctionTestUtil.assertResultError(roundHalfUpFunction.invoke(BigDecimal.valueOf(1.5), BigDecimal.valueOf(6177)), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundHalfUpFunction.invoke(BigDecimal.valueOf(1.5), BigDecimal.valueOf(-6122)), InvalidParametersEvent.class); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { new SwiftAttributesFinderFeature(session).find(file, listener); return true; } catch(NotfoundException e) { return false; } }
@Test public void testFindCommonPrefix() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); assertTrue(new SwiftFindFeature(session).find(container)); final String prefix = new AlphanumericRandomStringService().random(); final Path test = new SwiftTouchFeature(session, new SwiftRegionService(session)).touch( new Path(new Path(container, prefix, EnumSet.of(Path.Type.directory)), new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertTrue(new SwiftFindFeature(session).find(test)); assertTrue(new SwiftFindFeature(session).find(new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder)))); assertTrue(new SwiftObjectListService(session).list(new Path(container, prefix, EnumSet.of(Path.Type.directory)), new DisabledListProgressListener()).contains(test)); new SwiftDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new SwiftFindFeature(session).find(test)); assertFalse(new SwiftFindFeature(session).find(new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder)))); final PathCache cache = new PathCache(1); final Path directory = new Path(container, prefix, EnumSet.of(Path.Type.directory, Path.Type.placeholder)); assertFalse(new CachingFindFeature(session, cache, new SwiftFindFeature(session)).find(directory)); assertFalse(cache.isCached(directory)); }
public CRMaterial getMaterialByName(String name) { if (this.materials == null) return null; for (CRMaterial m : this.materials) { if (m.getName().equals(name)) return m; } return null; }
@Test public void shouldHandlePolymorphismWhenDeserializingJobs() { String json = gson.toJson(pipe1); CRPipeline deserializedValue = gson.fromJson(json, CRPipeline.class); CRMaterial git = deserializedValue.getMaterialByName("gitMaterial1"); assertThat(git).isInstanceOf(CRGitMaterial.class); assertThat(((CRGitMaterial) git).getBranch()).isEqualTo("feature12"); }
@Override public void upload(UploadTask uploadTask) throws IOException { Throwable error = getErrorSafe(); if (error != null) { LOG.debug("don't persist {} changesets, already failed", uploadTask.changeSets.size()); uploadTask.fail(error); return; } LOG.debug("persist {} changeSets", uploadTask.changeSets.size()); try { long size = uploadTask.getSize(); synchronized (lock) { while (!uploadThrottle.hasCapacity()) { lock.wait(); } uploadThrottle.seizeCapacity(size); if (!uploadThrottle.hasCapacity()) { availabilityHelper.resetUnavailable(); } scheduledBytesCounter += size; scheduled.add(wrapWithSizeUpdate(uploadTask, size)); scheduleUploadIfNeeded(); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); uploadTask.fail(e); throw new IOException(e); } catch (Exception e) { uploadTask.fail(e); throw e; } }
@Test void testDelay() throws Exception { int delayMs = 50; ManuallyTriggeredScheduledExecutorService scheduler = new ManuallyTriggeredScheduledExecutorService(); withStore( delayMs, MAX_BYTES_IN_FLIGHT, MAX_BYTES_IN_FLIGHT, scheduler, (store, probe) -> { scheduler.triggerAll(); List<StateChangeSet> changeSets = getChanges(4); upload(store, changeSets); assertThat(probe.getUploaded()).isEmpty(); assertThat(scheduler.getAllNonPeriodicScheduledTask()) .anyMatch(scheduled -> scheduled.getDelay(MILLISECONDS) == delayMs); scheduler.triggerAllNonPeriodicTasks(); assertThat(probe.getUploaded()).isEqualTo(changeSets); }); }
@Deprecated public static RowMutationInformation of(MutationType mutationType, long sequenceNumber) { checkArgument(sequenceNumber >= 0, "sequenceNumber must be non-negative"); return new AutoValue_RowMutationInformation( mutationType, null, Long.toHexString(sequenceNumber)); }
@Test public void givenEmptyString_throws() { IllegalArgumentException error = assertThrows( IllegalArgumentException.class, () -> RowMutationInformation.of(RowMutationInformation.MutationType.UPSERT, "")); assertEquals("changeSequenceNumber must not be empty", error.getMessage()); }
@Override public DiscreteResources difference(DiscreteResources other) { return this; }
@Test public void testDifference() { DiscreteResource res1 = Resources.discrete(DeviceId.deviceId("a")).resource(); DiscreteResource res2 = Resources.discrete(DeviceId.deviceId("b")).resource(); assertThat(sut.difference(DiscreteResources.of(ImmutableSet.of(res1))), is(EmptyDiscreteResources.INSTANCE)); assertThat(sut.difference(DiscreteResources.of(ImmutableSet.of(res2))), is(EmptyDiscreteResources.INSTANCE)); }
public void addFields(final Map<String, Object> fields) { if (fields == null) { return; } for (Map.Entry<String, Object> field : fields.entrySet()) { addField(field.getKey(), field.getValue()); } }
@Test public void testAddFields() throws Exception { final Map<String, Object> map = Maps.newHashMap(); map.put("field1", "Foo"); map.put("field2", 1); message.addFields(map); assertEquals("Foo", message.getField("field1")); assertEquals(1, message.getField("field2")); }
public HttpResponse validateSecretStore(Application application, SystemName system, Slime slime) { addExternalId(application.getId().tenant(), system, slime); var uri = getUri(application); return postRequest(uri, slime); }
@Test public void createsCorrectRequestData() throws IOException { var app = mockApplication(); var requestBody = SlimeUtils.jsonToSlime("{\"awsId\":\"123\"," + "\"name\":\"store\"," + "\"role\":\"role\"," + "\"region\":\"some-region\"," + "\"parameterName\":\"some-parameter\"" + "}"); var expectedSecretName = SecretStoreExternalIdRetriever.secretName(TenantName.defaultName(), SystemName.PublicCd, "store"); when(secretStore.getSecret(expectedSecretName)).thenReturn("some-secret-value"); stubFor(post(urlEqualTo("/validate-secret-store")) .withRequestBody(equalToJson("{\"awsId\":\"123\"," + "\"name\":\"store\"," + "\"role\":\"role\"," + "\"region\":\"some-region\"," + "\"parameterName\":\"some-parameter\"," + "\"externalId\":\"some-secret-value\"" + "}")) .willReturn(aResponse() .withStatus(200) .withBody("is ok"))); var response = secretStoreValidator.validateSecretStore(app, SystemName.PublicCd, requestBody); var body = new ByteArrayOutputStream(); response.render(body); assertEquals("is ok", body.toString()); }
public static <UserT, DestinationT, OutputT> WriteFiles<UserT, DestinationT, OutputT> to( FileBasedSink<UserT, DestinationT, OutputT> sink) { checkArgument(sink != null, "sink can not be null"); return new AutoValue_WriteFiles.Builder<UserT, DestinationT, OutputT>() .setSink(sink) .setComputeNumShards(null) .setNumShardsProvider(null) .setWindowedWrites(false) .setWithAutoSharding(false) .setMaxNumWritersPerBundle(DEFAULT_MAX_NUM_WRITERS_PER_BUNDLE) .setSideInputs(sink.getDynamicDestinations().getSideInputs()) .setSkipIfEmpty(false) .setBadRecordErrorHandler(new DefaultErrorHandler<>()) .setBadRecordRouter(BadRecordRouter.THROWING_ROUTER) .build(); }
@Test @Category(NeedsRunner.class) public void testWriteWithEmptyPCollection() throws IOException { List<String> inputs = new ArrayList<>(); runWrite(inputs, IDENTITY_MAP, getBaseOutputFilename(), WriteFiles.to(makeSimpleSink())); }
public static <T> RetryOperator<T> of(Retry retry) { return new RetryOperator<>(retry); }
@Test public void retryOnResultFailAfterMaxAttemptsUsingFlux() { RetryConfig config = RetryConfig.<String>custom() .retryOnResult("retry"::equals) .waitDuration(Duration.ofMillis(10)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); StepVerifier.create(Flux.just("retry") .transformDeferred(RetryOperator.of(retry))) .expectSubscription() .expectNextCount(1) .expectComplete() .verify(Duration.ofSeconds(1)); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isEqualTo(1); }
@Override public void getArtifact( ArtifactApi.GetArtifactRequest request, StreamObserver<ArtifactApi.GetArtifactResponse> responseObserver) { // Trying out artifact services in order till one succeeds. // If all services fail, re-raises the last error. // TODO: when all services fail, return an aggregated error with errors from all services. RuntimeException lastError = null; for (Endpoints.ApiServiceDescriptor endpoint : endpoints) { ArtifactResolver artifactResolver = this.artifactResolver != null ? this.artifactResolver : new EndpointBasedArtifactResolver(endpoint.getUrl()); try { Iterator<ArtifactApi.GetArtifactResponse> responseIterator = artifactResolver.getArtifact(request); while (responseIterator.hasNext()) { responseObserver.onNext(responseIterator.next()); } responseObserver.onCompleted(); return; } catch (RuntimeException exn) { lastError = exn; } finally { if (this.artifactResolver == null) { artifactResolver.shutdown(); } } } if (lastError == null) { lastError = new RuntimeException( "Could not successfully get the artifact for the request " + request); } throw lastError; }
@Test public void testArtifactGetSecondEndpoint() { Path path = Paths.get("dummypath"); RunnerApi.ArtifactInformation fileArtifact = RunnerApi.ArtifactInformation.newBuilder() .setTypeUrn(ArtifactRetrievalService.FILE_ARTIFACT_URN) .setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(path.toString()) .build() .toByteString()) .setRoleUrn("") .build(); ArtifactApi.GetArtifactRequest request = ArtifactApi.GetArtifactRequest.newBuilder().setArtifact(fileArtifact).build(); StreamObserver<GetArtifactResponse> responseObserver = Mockito.mock(StreamObserver.class); Mockito.when(artifactResolver.getArtifact(request)) .thenThrow(new RuntimeException("Failing artifact resolve")) .thenReturn( ImmutableList.of(ArtifactApi.GetArtifactResponse.newBuilder().build()).iterator()); artifactService.getArtifact(request, responseObserver); Mockito.verify(artifactResolver, Mockito.times(2)).getArtifact(request); }
public FEELFnResult<Range> invoke(@ParameterName("from") String from) { if (from == null || from.isEmpty() || from.isBlank()) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "cannot be null")); } Range.RangeBoundary startBoundary; if (from.startsWith("(") || from.startsWith("]")) { startBoundary = RangeBoundary.OPEN; } else if (from.startsWith("[")) { startBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not start with a valid character")); } Range.RangeBoundary endBoundary; if (from.endsWith(")") || from.endsWith("[")) { endBoundary = RangeBoundary.OPEN; } else if (from.endsWith("]")) { endBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not end with a valid character")); } String[] split = from.split("\\.\\."); if (split.length != 2) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not include two literals separated by `..` two dots characters")); } String leftString = split[0].substring(1); String rightString = split[1].substring(0, split[1].length() - 1); if ((leftString.isEmpty() || leftString.isBlank()) && (rightString.isEmpty() || rightString.isBlank())) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "at least one endpoint must not be null")); } BaseNode leftNode = parse(leftString); if (!nodeIsAllowed(leftNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a recognised valid literal")); } BaseNode rightNode = parse(rightString); if (!nodeIsAllowed(rightNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a recognised valid literal")); } Object left = leftNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(left)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a valid value " + left.getClass())); } Object right = rightNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a valid value " + right.getClass())); } if (!nodesReturnsSameType(left, right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "endpoints must be of equivalent types")); } return FEELFnResult.ofResult(new RangeImpl(startBoundary, (Comparable) left, (Comparable) right, endBoundary)); }
@Test void invoke_WithOneFunctionNode() { String from = "[number(\"1\", \",\", \".\")\"..2]"; FunctionTestUtil.assertResult(rangeFunction.invoke(from), new RangeImpl(Range.RangeBoundary.CLOSED, BigDecimal.ONE, BigDecimal.valueOf(2) , Range.RangeBoundary.CLOSED), from); from = "[\"a\"..lower case(\"Z\")]"; FunctionTestUtil.assertResult(rangeFunction.invoke(from), new RangeImpl(Range.RangeBoundary.CLOSED, "a", "z", Range.RangeBoundary.CLOSED), from); }
void runOnce() { if (transactionManager != null) { try { transactionManager.maybeResolveSequences(); RuntimeException lastError = transactionManager.lastError(); // do not continue sending if the transaction manager is in a failed state if (transactionManager.hasFatalError()) { if (lastError != null) maybeAbortBatches(lastError); client.poll(retryBackoffMs, time.milliseconds()); return; } if (transactionManager.hasAbortableError() && shouldHandleAuthorizationError(lastError)) { return; } // Check whether we need a new producerId. If so, we will enqueue an InitProducerId // request which will be sent below transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); if (maybeSendAndPollTransactionalRequest()) { return; } } catch (AuthenticationException e) { // This is already logged as error, but propagated here to perform any clean ups. log.trace("Authentication exception while processing transactional request", e); transactionManager.authenticationFailed(e); } } long currentTimeMs = time.milliseconds(); long pollTimeout = sendProducerData(currentTimeMs); client.poll(pollTimeout, currentTimeMs); }
@Test public void testClusterAuthorizationExceptionInInitProducerIdRequest() throws Exception { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); // cluster authorization failed on initProducerId is retriable prepareAndReceiveInitProducerId(producerId, Errors.CLUSTER_AUTHORIZATION_FAILED); assertFalse(transactionManager.hasProducerId()); assertTrue(transactionManager.hasError()); assertInstanceOf(ClusterAuthorizationException.class, transactionManager.lastError()); assertEquals(-1, transactionManager.producerIdAndEpoch().epoch); assertSendFailure(ClusterAuthorizationException.class); prepareAndReceiveInitProducerId(producerId, Errors.NONE); // sender retry initProducerId and succeed sender.runOnce(); assertFalse(transactionManager.hasFatalError()); assertTrue(transactionManager.hasProducerId()); assertEquals(0, transactionManager.producerIdAndEpoch().epoch); // subsequent send should be successful assertSuccessfulSend(); }
public static String readLink(File f) { /* NB: Use readSymbolicLink in java.nio.file.Path once available. Could * use getCanonicalPath in File to get the target of the symlink but that * does not indicate if the given path refers to a symlink. */ if (f == null) { LOG.warn("Can not read a null symLink"); return ""; } try { return Shell.execCommand( Shell.getReadlinkCommand(f.toString())).trim(); } catch (IOException x) { return ""; } }
@Test public void testReadSymlinkWithNullInput() { String result = FileUtil.readLink(null); Assert.assertEquals("", result); }
@Override public void close() { if (this.serviceDiscovery == null) { return; } try { this.serviceDiscovery.close(); } catch (IOException ex) { LOGGER.log(Level.WARNING, "Stop zookeeper discovery client failed", ex); } }
@Test public void close() throws IOException { zkDiscoveryClient.close(); Mockito.doThrow(new IOException("wrong")).when(serviceDiscovery).close(); Mockito.verify(serviceDiscovery, Mockito.times(1)).close(); }
public List<User> searchUsers(String pluginId, final String searchTerm, List<SecurityAuthConfig> authConfigs) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_SEARCH_USERS, new DefaultPluginInteractionCallback<>() { @Override public String requestBody(String resolvedExtensionVersion) { return getMessageConverter(resolvedExtensionVersion).searchUsersRequestBody(searchTerm, authConfigs); } @Override public List<User> onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return getMessageConverter(resolvedExtensionVersion).getSearchUsersFromResponseBody(responseBody); } }); }
@Test void shouldTalkToPlugin_To_SearchUsers() { String requestBody = """ { "search_term": "bob", "auth_configs": [ { "id": "ldap", "configuration": { "foo": "bar" } } ] }"""; String responseBody = "[{\"username\":\"bob\",\"display_name\":\"Bob\",\"email\":\"bob@example.com\"}]"; when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody)); List<User> users = authorizationExtension.searchUsers(PLUGIN_ID, "bob", List.of(new SecurityAuthConfig("ldap", "cd.go.ldap", create("foo", false, "bar")))); assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_SEARCH_USERS, requestBody); assertThat(users).hasSize(1) .contains(new User("bob", "Bob", "bob@example.com")); }
@Override public InputStream getBinaryStream(final int columnIndex) throws SQLException { return mergeResultSet.getInputStream(columnIndex, BINARY); }
@Test void assertGetBinaryStreamWithColumnIndex() throws SQLException { InputStream inputStream = mock(InputStream.class); when(mergeResultSet.getInputStream(1, "Binary")).thenReturn(inputStream); assertThat(shardingSphereResultSet.getBinaryStream(1), instanceOf(InputStream.class)); }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Map<String, String> savedSearchToViewsMap = new HashMap<>(); final Map<View, Search> newViews = this.savedSearchService.streamAll() .map(savedSearch -> { final Map.Entry<View, Search> newView = migrateSavedSearch(savedSearch); savedSearchToViewsMap.put(savedSearch.id(), newView.getKey().id()); return newView; }) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); newViews.forEach((view, search) -> { viewService.save(view); searchService.save(search); }); final MigrationCompleted migrationCompleted = MigrationCompleted.create(savedSearchToViewsMap); writeMigrationCompleted(migrationCompleted); }
@Test @MongoDBFixtures("sample_saved_search_relative.json") public void migrateSavedSearchWithRelativeTimerange() throws Exception { this.migration.upgrade(); final MigrationCompleted migrationCompleted = captureMigrationCompleted(); assertThat(migrationCompleted.savedSearchIds()) .containsExactly(new AbstractMap.SimpleEntry<>("5c7e5499f38ed7e1d8d6a613", "000000020000000000000000")); assertViewServiceCreatedViews(1, resourceFile("sample_saved_search_relative-expected_views.json")); assertSearchServiceCreated(1, resourceFile("sample_saved_search_relative-expected_searches.json")); }
public String getKey() { return key; }
@Test void requireThatConstructorsWork() { FeatureSet node = new FeatureSet("key", "valueA", "valueB"); assertEquals("key", node.getKey()); assertValues(List.of("valueA", "valueB"), node); node = new FeatureSet("key", List.of("valueA", "valueB")); assertEquals("key", node.getKey()); assertValues(List.of("valueA", "valueB"), node); }
public boolean isUserMemberOfRole(final CaseInsensitiveString userName, final CaseInsensitiveString roleName) { Role role = findByName(roleName); bombIfNull(role, () -> String.format("Role \"%s\" does not exist!", roleName)); return role.hasMember(userName); }
@Test public void shouldThrowExceptionIfRoleDoesNotExist() { RolesConfig rolesConfig = new RolesConfig(new RoleConfig(new CaseInsensitiveString("role1"), new RoleUser(new CaseInsensitiveString("user1")))); try { rolesConfig.isUserMemberOfRole(new CaseInsensitiveString("anyone"), new CaseInsensitiveString("invalid-role-name")); } catch (Exception e) { assertThat(e.getMessage(), is("Role \"invalid-role-name\" does not exist!")); } }
public Optional<RouteContext> loadRouteContext(final OriginSQLRouter originSQLRouter, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingCache shardingCache, final ConfigurationProperties props, final ConnectionContext connectionContext) { if (queryContext.getSql().length() > shardingCache.getConfiguration().getAllowedMaxSqlLength()) { return Optional.empty(); } ShardingRouteCacheableCheckResult cacheableCheckResult = shardingCache.getRouteCacheableChecker().check(database, queryContext); if (!cacheableCheckResult.isProbablyCacheable()) { return Optional.empty(); } List<Object> shardingConditionParams = new ArrayList<>(cacheableCheckResult.getShardingConditionParameterMarkerIndexes().size()); for (int each : cacheableCheckResult.getShardingConditionParameterMarkerIndexes()) { if (each >= queryContext.getParameters().size()) { return Optional.empty(); } shardingConditionParams.add(queryContext.getParameters().get(each)); } Optional<RouteContext> cachedResult = shardingCache.getRouteCache().get(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams)) .flatMap(ShardingRouteCacheValue::getCachedRouteContext); RouteContext result = cachedResult.orElseGet( () -> originSQLRouter.createRouteContext(queryContext, globalRuleMetaData, database, shardingCache.getShardingRule(), props, connectionContext)); if (!cachedResult.isPresent() && hitOneShardOnly(result)) { shardingCache.getRouteCache().put(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams), new ShardingRouteCacheValue(result)); } return Optional.of(result); }
@Test void assertCreateRouteContextWithUnmatchedActualParameterSize() { QueryContext queryContext = new QueryContext(sqlStatementContext, "insert into t values (?, ?)", Collections.singletonList(0), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); when(shardingCache.getConfiguration()).thenReturn(new ShardingCacheConfiguration(100, null)); when(shardingCache.getRouteCacheableChecker()).thenReturn(mock(ShardingRouteCacheableChecker.class)); when(shardingCache.getRouteCacheableChecker().check(null, queryContext)).thenReturn(new ShardingRouteCacheableCheckResult(true, Collections.singletonList(1))); Optional<RouteContext> actual = new CachedShardingSQLRouter().loadRouteContext(null, queryContext, mock(RuleMetaData.class), null, shardingCache, null, null); assertFalse(actual.isPresent()); }
@Benchmark @Threads(1) public void testBundleProcessingThreadCounterReset( BundleProcessingThreadCounterState counterState) throws Exception { counterState.bundleCounter.inc(); counterState.bundleCounter.reset(); counterState.bundleCounter.inc(); }
@Test public void testBundleProcessingThreadCounterReset() throws Exception { BundleProcessingThreadCounterState state = new BundleProcessingThreadCounterState(); new MetricsBenchmark().testBundleProcessingThreadCounterReset(state); state.check(); }
@Override public Batch read(@Nullable ShufflePosition startPosition, @Nullable ShufflePosition endPosition) throws IOException { final BatchRange range = new BatchRange(startPosition, endPosition); try { return cache.get(range); } catch (RuntimeException | ExecutionException e) { Throwables.propagateIfPossible(e, IOException.class); throw new RuntimeException("unexpected", e); } }
@Test public void readerShouldRereadEvictedBatches() throws IOException, ExecutionException { ShuffleBatchReader base = mock(ShuffleBatchReader.class); CachingShuffleBatchReader reader = new CachingShuffleBatchReader(base); when(base.read(null, null)).thenReturn(testBatch); ShuffleBatchReader.Batch read = reader.read(null, null); assertThat(read, equalTo(testBatch)); verify(base, times(1)).read(null, null); CachingShuffleBatchReader.BatchRange range = new CachingShuffleBatchReader.BatchRange(null, null); CachingShuffleBatchReader.Batch batch = reader.cache.get(range); assertThat(batch, notNullValue()); reader.cache.invalidateAll(); read = reader.read(null, null); assertThat(read, equalTo(testBatch)); verify(base, times(2)).read(null, null); }
public static byte[] readBytes(ByteBuffer buffer) { return readBytes(buffer, 0, buffer.remaining()); }
@Test public void readBytesFromNonArrayBackedByteBuffer() { final byte[] bytes = "FOOBAR".getBytes(StandardCharsets.US_ASCII); final ByteBuffer buffer1 = ByteBuffer.allocateDirect(1024); buffer1.put(bytes).flip(); final ByteBuffer buffer2 = ByteBuffer.allocateDirect(1024); buffer2.put(bytes).flip(); final byte[] readBytesComplete = ByteBufferUtils.readBytes(buffer1); final byte[] readBytesPartial = ByteBufferUtils.readBytes(buffer2, 0, 3); assertThat(readBytesComplete).isEqualTo(bytes); assertThat(readBytesPartial).isEqualTo(Arrays.copyOf(bytes, 3)); }
@Override public void setSortKeys(List<? extends SortKey> keys) { switch (keys.size()) { case 0: setSortKey(null); break; case 1: setSortKey(keys.get(0)); break; default: throw new IllegalArgumentException("Only one column can be sorted"); } }
@Test public void setSortKeys_many() { assertThrows( IllegalArgumentException.class, () -> sorter.setSortKeys(asList(new SortKey(0, SortOrder.ASCENDING), new SortKey(1, SortOrder.ASCENDING)))); }
public EvictionConfig getEvictionConfig() { return evictionConfig; }
@Test public void testSetMaxSize() { assertEquals(1234, new MapConfig().getEvictionConfig().setSize(1234).getSize()); }
@Override public String getName() { return FUNCTION_NAME; }
@Test public void testModuloNullLiteral() { ExpressionContext expression = RequestContextUtils.getExpression(String.format("mod(null, %s)", INT_SV_NULL_COLUMN)); TransformFunction transformFunction = TransformFunctionFactory.get(expression, _dataSourceMap); Assert.assertTrue(transformFunction instanceof ModuloTransformFunction); Assert.assertEquals(transformFunction.getName(), "mod"); double[] expectedValues = new double[NUM_ROWS]; RoaringBitmap roaringBitmap = new RoaringBitmap(); roaringBitmap.add(0L, NUM_ROWS); testTransformFunctionWithNull(transformFunction, expectedValues, roaringBitmap); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = host.getCredentials(); if(host.getProtocol().isPasswordConfigurable()) { final String domain, username; if(credentials.getUsername().contains("\\")) { domain = StringUtils.substringBefore(credentials.getUsername(), "\\"); username = StringUtils.substringAfter(credentials.getUsername(), "\\"); } else { username = credentials.getUsername(); domain = new HostPreferences(host).getProperty("webdav.ntlm.domain"); } for(String scheme : Arrays.asList(AuthSchemes.NTLM, AuthSchemes.SPNEGO)) { client.setCredentials( new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, scheme), new NTCredentials(username, credentials.getPassword(), preferences.getProperty("webdav.ntlm.workstation"), domain) ); } for(String scheme : Arrays.asList(AuthSchemes.BASIC, AuthSchemes.DIGEST, AuthSchemes.KERBEROS)) { client.setCredentials( new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, scheme), new UsernamePasswordCredentials(username, credentials.getPassword())); } if(preferences.getBoolean("webdav.basic.preemptive")) { client.enablePreemptiveAuthentication(host.getHostname(), host.getPort(), host.getPort(), Charset.forName(preferences.getProperty("http.credentials.charset")) ); } else { client.disablePreemptiveAuthentication(); } } if(credentials.isPassed()) { if(log.isWarnEnabled()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); } return; } try { final Path home = new DelegatingHomeFeature(new WorkdirHomeFeature(host), new DefaultPathHomeFeature(host)).find(); final HttpHead head = new HttpHead(new DAVPathEncoder().encode(home)); try { client.execute(head, new MicrosoftIISFeaturesResponseHandler(capabilities)); } catch(SardineException e) { switch(e.getStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(log.isWarnEnabled()) { log.warn(String.format("Ignore failure %s", e)); } break; case HttpStatus.SC_NOT_IMPLEMENTED: case HttpStatus.SC_FORBIDDEN: case HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE: case HttpStatus.SC_METHOD_NOT_ALLOWED: if(log.isWarnEnabled()) { log.warn(String.format("Failed HEAD request to %s with %s. Retry with PROPFIND.", host, e.getResponsePhrase())); } cancel.verify(); // Possibly only HEAD requests are not allowed final ListService list = this.getFeature(ListService.class); list.list(home, new DisabledListProgressListener() { @Override public void chunk(final Path parent, final AttributedList<Path> list) throws ListCanceledException { try { cancel.verify(); } catch(ConnectionCanceledException e) { throw new ListCanceledException(list, e); } } }); break; case HttpStatus.SC_BAD_REQUEST: if(preferences.getBoolean("webdav.basic.preemptive")) { if(log.isWarnEnabled()) { log.warn(String.format("Disable preemptive authentication for %s due to failure %s", host, e.getResponsePhrase())); } cancel.verify(); client.disablePreemptiveAuthentication(); client.execute(head, new MicrosoftIISFeaturesResponseHandler(capabilities)); } else { throw new DAVExceptionMappingService().map(e); } break; default: throw new DAVExceptionMappingService().map(e); } } } catch(SardineException e) { throw new DAVExceptionMappingService().map(e); } catch(IOException e) { throw new HttpExceptionMappingService().map(e); } }
@Test public void testLoginNTLM() throws Exception { final Host host = new Host(new DAVProtocol(), "winbuild.iterate.ch", new Credentials( PROPERTIES.get("webdav.iis.user"), PROPERTIES.get("webdav.iis.password") )); host.setDefaultPath("/WebDAV"); final DAVSession session = new DAVSession(host, new DisabledX509TrustManager(), new DefaultX509KeyManager()); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); session.close(); }
@Override public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception { boolean useJavaCC = "--useJavaCC".equals(getArg(args, 0, null)); if (args.size() > (useJavaCC ? 3 : 2) || (args.size() == 1 && (args.get(0).equals("--help") || args.get(0).equals("-help")))) { err.println("Usage: idl [--useJavaCC] [in [out]]"); err.println(); err.println("If an output path is not specified, outputs to stdout."); err.println("If no input or output is specified, takes input from"); err.println("stdin and outputs to stdout."); err.println("The special path \"-\" may also be specified to refer to"); err.println("stdin and stdout."); return -1; } String inputName = getArg(args, useJavaCC ? 1 : 0, "-"); File inputFile = "-".equals(inputName) ? null : new File(inputName); String outputName = getArg(args, useJavaCC ? 2 : 1, "-"); File outputFile = "-".equals(outputName) ? null : new File(outputName); Schema m = null; Protocol p; if (useJavaCC) { // noinspection deprecation try (Idl parser = new Idl(inputFile)) { p = parser.CompilationUnit(); for (String warning : parser.getWarningsAfterParsing()) { err.println("Warning: " + warning); } } } else { IdlReader parser = new IdlReader(); IdlFile idlFile = inputFile == null ? parser.parse(in) : parser.parse(inputFile.toPath()); for (String warning : idlFile.getWarnings()) { err.println("Warning: " + warning); } p = idlFile.getProtocol(); m = idlFile.getMainSchema(); } PrintStream parseOut = out; if (outputFile != null) { parseOut = new PrintStream(Files.newOutputStream(outputFile.toPath())); } if (m == null && p == null) { err.println("Error: the IDL file does not contain a schema nor a protocol."); return 1; } try { parseOut.print(m == null ? p.toString(true) : m.toString(true)); } finally { if (parseOut != out) // Close only the newly created FileOutputStream parseOut.close(); } return 0; }
@Test void writeIdlAsProtocol() throws Exception { String idl = "src/test/idl/protocol.avdl"; String protocol = "src/test/idl/protocol.avpr"; String outfile = "target/test-protocol.avpr"; ByteArrayOutputStream buffer = new ByteArrayOutputStream(); List<String> arglist = Arrays.asList(idl, outfile); new IdlTool().run(null, null, new PrintStream(buffer), arglist); assertEquals(readFileAsString(protocol), readFileAsString(outfile)); String warnings = readPrintStreamBuffer(buffer); assertEquals("Warning: Line 1, char 1: Ignoring out-of-place documentation comment." + "\nDid you mean to use a multiline comment ( /* ... */ ) instead?", warnings); }
@ApiOperation(value = "Delete asset (deleteAsset)", notes = "Deletes the asset and all the relations (from and to the asset). Referencing non-existing asset Id will cause an error." + TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('TENANT_ADMIN')") @RequestMapping(value = "/asset/{assetId}", method = RequestMethod.DELETE) @ResponseStatus(value = HttpStatus.OK) public void deleteAsset(@Parameter(description = ASSET_ID_PARAM_DESCRIPTION) @PathVariable(ASSET_ID) String strAssetId) throws Exception { checkParameter(ASSET_ID, strAssetId); AssetId assetId = new AssetId(toUUID(strAssetId)); Asset asset = checkAssetId(assetId, Operation.DELETE); tbAssetService.delete(asset, getCurrentUser()); }
@Test public void testDeleteAsset() throws Exception { Asset asset = new Asset(); asset.setName("My asset"); asset.setType("default"); Asset savedAsset = doPost("/api/asset", asset, Asset.class); Mockito.reset(tbClusterService, auditLogService); doDelete("/api/asset/" + savedAsset.getId().getId().toString()) .andExpect(status().isOk()); testNotifyEntityAllOneTime(savedAsset, savedAsset.getId(), savedAsset.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.DELETED, savedAsset.getId().getId().toString()); String assetIdStr = savedAsset.getId().getId().toString(); doGet("/api/asset/" + assetIdStr) .andExpect(status().isNotFound()) .andExpect(statusReason(containsString(msgErrorNoFound("Asset", assetIdStr)))); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { try { if (statement.getStatement() instanceof CreateAsSelect) { registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement); } else if (statement.getStatement() instanceof CreateSource) { registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } // Remove schema id from SessionConfig return stripSchemaIdConfig(statement); }
@Test public void shouldRegisterValueSchemaForSchemaRegistryEnabledFormatCreateSourceIfSubjectDoesntExist() throws Exception { // Given: givenStatement("CREATE STREAM sink (f1 VARCHAR) WITH (kafka_topic='expectedName', key_format='KAFKA', value_format='AVRO', partitions=1);"); // When: injector.inject(statement); // Then: verify(schemaRegistryClient).register("expectedName-value", AVRO_SCHEMA); }
public AtomicInteger getThreadNum() { return mThreadNum; }
@Test public void testGetThreadNum() { NamedThreadFactory threadFactory = new NamedThreadFactory(); AtomicInteger threadNum = threadFactory.getThreadNum(); assertNotNull(threadNum); assertEquals(INITIAL_THREAD_NUM, threadNum.get()); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedWildCardInvalidTypes1() { String[] forwardedFields = {"*"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, fiveIntTupleType, threeIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
@Override public <PS extends Serializer<P>, P> KeyValueIterator<K, V> prefixScan(final P prefix, final PS prefixKeySerializer) { Objects.requireNonNull(prefix); Objects.requireNonNull(prefixKeySerializer); final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.prefixScan(prefix, prefixKeySerializer); } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction)); }
@Test public void shouldReturnKeysWithGivenPrefixExcludingNextKeyLargestKey() { stubOneUnderlying.put("abc", "a"); stubOneUnderlying.put("abcd", "b"); stubOneUnderlying.put("abce", "c"); final List<KeyValue<String, String>> results = toList(theStore.prefixScan("abcd", new StringSerializer())); assertTrue(results.contains(new KeyValue<>("abcd", "b"))); assertEquals(1, results.size()); }
@Override public boolean publishConfigCas(String dataId, String group, String content, String casMd5) throws NacosException { return publishConfigInner(namespace, dataId, group, null, null, null, content, ConfigType.getDefaultType().getType(), casMd5); }
@Test void testPublishConfigCas() throws NacosException { String dataId = "1"; String group = "2"; String content = "123"; String namespace = ""; String casMd5 = "96147704e3cb8be8597d55d75d244a02"; String type = ConfigType.getDefaultType().getType(); Mockito.when(mockWoker.publishConfig(dataId, group, namespace, null, null, null, content, "", casMd5, type)).thenReturn(true); final boolean b = nacosConfigService.publishConfigCas(dataId, group, content, casMd5); assertTrue(b); Mockito.verify(mockWoker, Mockito.times(1)).publishConfig(dataId, group, namespace, null, null, null, content, "", casMd5, type); }
@Override protected String copy(final Path source, final S3Object destination, final TransferStatus status, final StreamListener listener) throws BackgroundException { try { final List<MultipartPart> completed = new ArrayList<>(); // ID for the initiated multipart upload. final MultipartUpload multipart = session.getClient().multipartStartUpload( destination.getBucketName(), destination); if(log.isDebugEnabled()) { log.debug(String.format("Multipart upload started for %s with ID %s", multipart.getObjectKey(), multipart.getUploadId())); } final long size = status.getLength(); long remaining = size; long offset = 0; final List<Future<MultipartPart>> parts = new ArrayList<>(); for(int partNumber = 1; remaining > 0; partNumber++) { // Last part can be less than 5 MB. Adjust part size. final long length = Math.min(Math.max((size / S3DefaultMultipartService.MAXIMUM_UPLOAD_PARTS), partsize), remaining); // Submit to queue parts.add(this.submit(source, multipart, partNumber, offset, length)); remaining -= length; offset += length; } for(Future<MultipartPart> f : parts) { final MultipartPart part = Interruptibles.await(f); completed.add(part); listener.sent(part.getSize()); } // Combining all the given parts into the final object. Processing of a Complete Multipart Upload request // could take several minutes to complete. Because a request could fail after the initial 200 OK response // has been sent, it is important that you check the response body to determine whether the request succeeded. final MultipartCompleted complete = session.getClient().multipartCompleteUpload(multipart, completed); if(log.isDebugEnabled()) { log.debug(String.format("Completed multipart upload for %s with checksum %s", complete.getObjectKey(), complete.getEtag())); } return complete.getVersionId(); } catch(ServiceException e) { throw new S3ExceptionMappingService().map("Cannot copy {0}", e, source); } finally { pool.shutdown(false); } }
@Test public void testCopy() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final byte[] content = RandomUtils.nextBytes(1023); final TransferStatus status = new TransferStatus().withLength(content.length); status.setChecksum(new SHA256ChecksumCompute().compute(new ByteArrayInputStream(content), status)); final OutputStream out = new S3WriteFeature(session, new S3AccessControlListFeature(session)).write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); out.close(); test.attributes().setSize(content.length); final Path copy = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final S3MultipartCopyFeature feature = new S3MultipartCopyFeature(session, new S3AccessControlListFeature(session)); feature.copy(test, copy, status, new DisabledConnectionCallback(), new DisabledStreamListener()); assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(test)); assertEquals(content.length, new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(test).getSize()); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertTrue(new S3FindFeature(session, new S3AccessControlListFeature(session)).find(copy)); assertEquals(content.length, new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)).find(copy).getSize()); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(copy), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public WebhookDelivery call(Webhook webhook, WebhookPayload payload) { WebhookDelivery.Builder builder = new WebhookDelivery.Builder(); long startedAt = system.now(); builder .setAt(startedAt) .setPayload(payload) .setWebhook(webhook); try { HttpUrl url = HttpUrl.parse(webhook.getUrl()); if (url == null) { throw new IllegalArgumentException("Webhook URL is not valid: " + webhook.getUrl()); } builder.setEffectiveUrl(HttpUrlHelper.obfuscateCredentials(webhook.getUrl(), url)); Request request = buildHttpRequest(url, webhook, payload); try (Response response = execute(request)) { builder.setHttpStatus(response.code()); } } catch (Exception e) { builder.setError(e); } return builder .setDurationInMs((int) (system.now() - startedAt)) .build(); }
@Test public void redirects_throws_ISE_if_header_Location_is_missing() { HttpUrl url = server.url("/redirect"); Webhook webhook = new Webhook(WEBHOOK_UUID, PROJECT_UUID, CE_TASK_UUID, randomAlphanumeric(40), "my-webhook", url.toString(), null); server.enqueue(new MockResponse().setResponseCode(307)); WebhookDelivery delivery = newSender(false).call(webhook, PAYLOAD); Throwable error = delivery.getError().get(); assertThat(error) .isInstanceOf(IllegalStateException.class) .hasMessage("Missing HTTP header 'Location' in redirect of " + url); }
@Override public UsersSearchRestResponse toUsersForResponse(List<UserInformation> userInformations, PaginationInformation paginationInformation) { List<UserRestResponse> usersForResponse = toUsersForResponse(userInformations); PageRestResponse pageRestResponse = new PageRestResponse(paginationInformation.pageIndex(), paginationInformation.pageSize(), paginationInformation.total()); return new UsersSearchRestResponse(usersForResponse, pageRestResponse); }
@Test public void toUsersForResponse_whenNonAdmin_mapsNonAdminFields() { when(userSession.isLoggedIn()).thenReturn(true); PaginationInformation paging = forPageIndex(1).withPageSize(2).andTotal(3); UserInformation userInformation1 = mockSearchResult(1, true); UserInformation userInformation2 = mockSearchResult(2, false); UsersSearchRestResponse usersForResponse = usersSearchRestResponseGenerator.toUsersForResponse(List.of(userInformation1, userInformation2), paging); UserRestResponseForLoggedInUsers expectUser1 = buildExpectedResponseForUser(userInformation1); UserRestResponseForLoggedInUsers expectUser2 = buildExpectedResponseForUser(userInformation2); assertThat(usersForResponse.users()).containsExactly(expectUser1, expectUser2); assertPaginationInformationAreCorrect(paging, usersForResponse.page()); }
static String trimVersion(String version) { // TODO seems like there should be some trick with VersionNumber to do this return version.replaceFirst(" .+$", ""); }
@Test public void trimVersion() { assertEquals("3.2", XStream2.trimVersion("3.2")); assertEquals("3.2.1", XStream2.trimVersion("3.2.1")); assertEquals("3.2-SNAPSHOT", XStream2.trimVersion("3.2-SNAPSHOT (private-09/23/2012 12:26-jhacker)")); }
@Override public ConfigAllInfo findConfigAllInfo(final String dataId, final String group, final String tenant) { final String tenantTmp = StringUtils.isBlank(tenant) ? StringUtils.EMPTY : tenant; try { List<String> configTagList = this.selectTagByConfig(dataId, group, tenant); ConfigInfoMapper configInfoMapper = mapperManager.findMapper(dataSourceService.getDataSourceType(), TableConstant.CONFIG_INFO); ConfigAllInfo configAdvance = this.jt.queryForObject(configInfoMapper.select( Arrays.asList("id", "data_id", "group_id", "tenant_id", "app_name", "content", "md5", "gmt_create", "gmt_modified", "src_user", "src_ip", "c_desc", "c_use", "effect", "type", "c_schema", "encrypted_data_key"), Arrays.asList("data_id", "group_id", "tenant_id")), new Object[] {dataId, group, tenantTmp}, CONFIG_ALL_INFO_ROW_MAPPER); if (configTagList != null && !configTagList.isEmpty()) { StringBuilder configTagsTmp = new StringBuilder(); for (String configTag : configTagList) { if (configTagsTmp.length() == 0) { configTagsTmp.append(configTag); } else { configTagsTmp.append(',').append(configTag); } } configAdvance.setConfigTags(configTagsTmp.toString()); } return configAdvance; } catch (EmptyResultDataAccessException e) { // Indicates that the data does not exist, returns null return null; } catch (CannotGetJdbcConnectionException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindConfigAllInfo() { String dataId = "dataId1324"; String group = "group23546"; String tenant = "tenant13245"; //mock select tags List<String> mockTags = Arrays.asList("tag1", "tag2", "tag3"); when(jdbcTemplate.queryForList(anyString(), eq(new Object[] {dataId, group, tenant}), eq(String.class))).thenReturn(mockTags); String schema = "schema12345654"; //mock select config advance ConfigAllInfo mockedConfig = new ConfigAllInfo(); mockedConfig.setSchema(schema); when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_ALL_INFO_ROW_MAPPER))).thenReturn( mockedConfig); //execute return mock obj ConfigAllInfo configAllInfo = externalConfigInfoPersistService.findConfigAllInfo(dataId, group, tenant); //expect check schema & tags. assertEquals(mockedConfig.getSchema(), configAllInfo.getSchema()); assertEquals(String.join(",", mockTags), configAllInfo.getConfigTags()); //mock EmptyResultDataAccessException when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_ALL_INFO_ROW_MAPPER))).thenThrow( new EmptyResultDataAccessException(1)); //expect return null. assertNull(externalConfigInfoPersistService.findConfigAllInfo(dataId, group, tenant)); //mock CannotGetJdbcConnectionException when(jdbcTemplate.queryForObject(anyString(), eq(new Object[] {dataId, group, tenant}), eq(CONFIG_ALL_INFO_ROW_MAPPER))).thenThrow( new CannotGetJdbcConnectionException("mock exp")); //expect throw exception. try { externalConfigInfoPersistService.findConfigAllInfo(dataId, group, tenant); assertFalse(true); } catch (Exception e) { assertTrue(e instanceof CannotGetJdbcConnectionException); assertTrue(e.getMessage().endsWith("mock exp")); } }
@Override public boolean next() throws SQLException { if (isExecutedAllDirection) { return false; } if (orderByValuesQueue.isEmpty()) { return false; } if (isFirstNext) { isFirstNext = false; fetchCount--; return true; } OrderByValue firstOrderByValue = orderByValuesQueue.poll(); if (firstOrderByValue.next()) { orderByValuesQueue.offer(firstOrderByValue); } if (orderByValuesQueue.isEmpty()) { return false; } setCurrentQueryResult(orderByValuesQueue.peek().getQueryResult()); return DirectionType.isAllDirectionType(directionType) || fetchCount-- > 0L; }
@Test void assertNextForResultSetsAllEmptyWhenConfigAllDirectionType() throws SQLException { List<QueryResult> queryResults = Arrays.asList(mock(QueryResult.class, RETURNS_DEEP_STUBS), mock(QueryResult.class, RETURNS_DEEP_STUBS), mock(QueryResult.class, RETURNS_DEEP_STUBS)); MergedResult actual = resultMerger.merge(queryResults, fetchAllStatementContext, database, connectionContext); assertFalse(actual.next()); }
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { FunctionConfig mergedConfig = existingConfig.toBuilder().build(); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getJar())) { mergedConfig.setJar(newConfig.getJar()); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getCustomSerdeInputs() != null) { newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getCustomSchemaInputs() != null) { newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } mergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName() .equals(existingConfig.getOutputSerdeClassName())) { throw new IllegalArgumentException("Output Serde mismatch"); } if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType() .equals(existingConfig.getOutputSchemaType())) { throw new IllegalArgumentException("Output Schema mismatch"); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (!StringUtils.isEmpty(newConfig.getOutput())) { mergedConfig.setOutput(newConfig.getOutput()); } if (newConfig.getUserConfig() != null) { mergedConfig.setUserConfig(newConfig.getUserConfig()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) { throw new IllegalArgumentException("Runtime cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getMaxMessageRetries() != null) { mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries()); } if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) { mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic()); } if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName() .equals(existingConfig.getSubName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getWindowConfig() != null) { mergedConfig.setWindowConfig(newConfig.getWindowConfig()); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Subscription Name cannot be altered") public void testMergeDifferentSubname() { FunctionConfig functionConfig = createFunctionConfig(); FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("subName", "Different"); FunctionConfig mergedConfig = FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); }
@ProcessElement public ProcessContinuation processElement( @Element PulsarSourceDescriptor pulsarSourceDescriptor, RestrictionTracker<OffsetRange, Long> tracker, WatermarkEstimator watermarkEstimator, OutputReceiver<PulsarMessage> output) throws IOException { long startTimestamp = tracker.currentRestriction().getFrom(); String topicDescriptor = pulsarSourceDescriptor.getTopic(); try (Reader<byte[]> reader = newReader(this.client, topicDescriptor)) { if (startTimestamp > 0) { reader.seek(startTimestamp); } while (true) { if (reader.hasReachedEndOfTopic()) { reader.close(); return ProcessContinuation.stop(); } Message<byte[]> message = reader.readNext(); if (message == null) { return ProcessContinuation.resume(); } Long currentTimestamp = message.getPublishTime(); // if tracker.tryclaim() return true, sdf must execute work otherwise // doFn must exit processElement() without doing any work associated // or claiming more work if (!tracker.tryClaim(currentTimestamp)) { reader.close(); return ProcessContinuation.stop(); } if (pulsarSourceDescriptor.getEndMessageId() != null) { MessageId currentMsgId = message.getMessageId(); boolean hasReachedEndMessageId = currentMsgId.compareTo(pulsarSourceDescriptor.getEndMessageId()) == 0; if (hasReachedEndMessageId) { return ProcessContinuation.stop(); } } PulsarMessage pulsarMessage = new PulsarMessage(message.getTopicName(), message.getPublishTime(), message); Instant outputTimestamp = extractOutputTimestampFn.apply(message); output.outputWithTimestamp(pulsarMessage, outputTimestamp); } } }
@Test public void testProcessElementWhenEndMessageIdIsDefined() throws Exception { MockOutputReceiver receiver = new MockOutputReceiver(); OffsetRangeTracker tracker = new OffsetRangeTracker(new OffsetRange(0L, Long.MAX_VALUE)); MessageId endMessageId = DefaultImplementation.newMessageId(50L, 50L, 50); DoFn.ProcessContinuation result = dofnInstance.processElement( PulsarSourceDescriptor.of(TOPIC, null, null, endMessageId, SERVICE_URL, ADMIN_URL), tracker, null, (DoFn.OutputReceiver) receiver); assertEquals(DoFn.ProcessContinuation.stop(), result); assertEquals(50, receiver.getOutputs().size()); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldBuildAssertNotExistsSchemaWithIdAndTimeout() { // Given: final SingleStatementContext stmt = givenQuery("ASSERT NOT EXISTS SCHEMA ID 24 TIMEOUT 10 SECONDS;"); // When: final AssertSchema assertSchema = (AssertSchema) builder.buildStatement(stmt); // Then: assertThat(assertSchema.getSubject(), is(Optional.empty())); assertThat(assertSchema.getId(), is(Optional.of(24))); assertThat(assertSchema.getTimeout().get().getTimeUnit(), is(TimeUnit.SECONDS)); assertThat(assertSchema.checkExists(), is(false)); }
public static EquivalentAddressGroup convertToEquivalentAddressGroup(final GrpcUpstream instance) { String[] ipAndPort = instance.getUpstreamUrl().split(":"); return new EquivalentAddressGroup(new InetSocketAddress(ipAndPort[0], Integer.parseInt(ipAndPort[1])), createAttributes(instance)); }
@Test public void convertToEquivalentAddressGroup() { assertNotNull(ShenyuResolverHelper.convertToEquivalentAddressGroup(grpcUpstream)); }
@Override public void add(Integer value) { this.max = Math.max(this.max, value); }
@Test void testAdd() { IntMaximum max = new IntMaximum(); max.add(1234); max.add(9876); max.add(-987); max.add(-123); assertThat(max.getLocalValue().intValue()).isEqualTo(9876); }
@Override public double sd() { return Math.sqrt(1 - p) / p; }
@Test public void testSd() { System.out.println("sd"); ShiftedGeometricDistribution instance = new ShiftedGeometricDistribution(0.3); instance.rand(); assertEquals(2.788867, instance.sd(), 1E-6); }
@Override public String newProcessorName(final String prefix) { return prefix + String.format("%010d", index.getAndIncrement()); }
@Test public void testNewName() { assertEquals("X-0000000000", builder.newProcessorName("X-")); assertEquals("Y-0000000001", builder.newProcessorName("Y-")); assertEquals("Z-0000000002", builder.newProcessorName("Z-")); final InternalStreamsBuilder newBuilder = new InternalStreamsBuilder(new InternalTopologyBuilder()); assertEquals("X-0000000000", newBuilder.newProcessorName("X-")); assertEquals("Y-0000000001", newBuilder.newProcessorName("Y-")); assertEquals("Z-0000000002", newBuilder.newProcessorName("Z-")); }
@Override public void close() throws IOException { finish(); os.close(); }
@Test public void testClosingFinishesTheStream() throws Exception { ByteArrayOutputStream baos = new ByteArrayOutputStream(); BufferedElementCountingOutputStream os = createAndWriteValues(toBytes("abcdefghij"), baos); os.close(); verifyValues(toBytes("abcdefghij"), new ByteArrayInputStream(baos.toByteArray())); }
public CompletableFuture<Optional<Account>> getByServiceIdentifierAsync(final ServiceIdentifier serviceIdentifier) { return switch (serviceIdentifier.identityType()) { case ACI -> getByAccountIdentifierAsync(serviceIdentifier.uuid()); case PNI -> getByPhoneNumberIdentifierAsync(serviceIdentifier.uuid()); }; }
@Test void testGetByServiceIdentifierAsync() { final UUID aci = UUID.randomUUID(); final UUID pni = UUID.randomUUID(); when(asyncCommands.get(eq("AccountMap::" + pni))).thenReturn(MockRedisFuture.completedFuture(aci.toString())); when(asyncCommands.get(eq("Account3::" + aci))).thenReturn(MockRedisFuture.completedFuture( "{\"number\": \"+14152222222\", \"pni\": \"" + pni + "\"}")); when(asyncCommands.setex(any(), anyLong(), any())).thenReturn(MockRedisFuture.completedFuture("OK")); when(accounts.getByAccountIdentifierAsync(any())) .thenReturn(CompletableFuture.completedFuture(Optional.empty())); when(accounts.getByPhoneNumberIdentifierAsync(any())) .thenReturn(CompletableFuture.completedFuture(Optional.empty())); assertTrue(accountsManager.getByServiceIdentifierAsync(new AciServiceIdentifier(aci)).join().isPresent()); assertTrue(accountsManager.getByServiceIdentifierAsync(new PniServiceIdentifier(pni)).join().isPresent()); assertFalse(accountsManager.getByServiceIdentifierAsync(new AciServiceIdentifier(pni)).join().isPresent()); assertFalse(accountsManager.getByServiceIdentifierAsync(new PniServiceIdentifier(aci)).join().isPresent()); }
@Override public boolean publishConfig(String dataId, String group, String content) throws NacosException { return publishConfig(dataId, group, content, ConfigType.getDefaultType().getType()); }
@Test void testPublishConfig2() throws NacosException { String dataId = "1"; String group = "2"; String content = "123"; String namespace = ""; String type = ConfigType.PROPERTIES.getType(); Mockito.when(mockWoker.publishConfig(dataId, group, namespace, null, null, null, content, "", null, type)).thenReturn(true); final boolean b = nacosConfigService.publishConfig(dataId, group, content, type); assertTrue(b); Mockito.verify(mockWoker, Mockito.times(1)).publishConfig(dataId, group, namespace, null, null, null, content, "", null, type); }
public static void prepareFilesForStaging(FileStagingOptions options) { List<String> filesToStage = options.getFilesToStage(); if (filesToStage == null || filesToStage.isEmpty()) { filesToStage = detectClassPathResourcesToStage(ReflectHelpers.findClassLoader(), options); LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", filesToStage.size()); LOG.debug("Classpath elements: {}", filesToStage); } final String tmpJarLocation = MoreObjects.firstNonNull(options.getTempLocation(), System.getProperty("java.io.tmpdir")); final List<String> resourcesToStage = prepareFilesForStaging(filesToStage, tmpJarLocation); options.setFilesToStage(resourcesToStage); }
@Test public void testFailOnNonExistingPaths() throws IOException { String nonexistentFilePath = tmpFolder.getRoot().getPath() + "/nonexistent/file"; String existingFilePath = tmpFolder.newFile("existingFile").getAbsolutePath(); List<String> filesToStage = Arrays.asList(nonexistentFilePath, existingFilePath); String temporaryLocation = tmpFolder.newFolder().getAbsolutePath(); assertThrows( "To-be-staged file does not exist: ", IllegalStateException.class, () -> PipelineResources.prepareFilesForStaging(filesToStage, temporaryLocation)); }
public final void hasSize(int expectedSize) { checkArgument(expectedSize >= 0, "expectedSize(%s) must be >= 0", expectedSize); int actualSize = size(checkNotNull(actual)); check("size()").that(actualSize).isEqualTo(expectedSize); }
@Test public void hasSizeNegative() { try { assertThat(ImmutableList.of(1, 2, 3)).hasSize(-1); fail(); } catch (IllegalArgumentException expected) { } }
public FEELFnResult<List<Object>> invoke(@ParameterName( "ctx" ) EvaluationContext ctx, @ParameterName("list") List list, @ParameterName("precedes") FEELFunction function) { if ( function == null ) { return invoke( list ); } else { return invoke(list, (a, b) -> { final Object result = function.invokeReflectively(ctx, new Object[]{a, b}); if (!(result instanceof Boolean) || ((Boolean) result)) { return -1; } else { return 1; } } ); } }
@Test void invokeListSingleItem() { FunctionTestUtil.assertResultList(sortFunction.invoke(Collections.singletonList(10)), Collections.singletonList(10)); }
public static PredicateTreeAnalyzerResult analyzePredicateTree(Predicate predicate) { AnalyzerContext context = new AnalyzerContext(); int treeSize = aggregatePredicateStatistics(predicate, false, context); int minFeature = ((int)Math.ceil(findMinFeature(predicate, false, context))) + (context.hasNegationPredicate ? 1 : 0); return new PredicateTreeAnalyzerResult(minFeature, treeSize, context.subTreeSizes); }
@Test void require_that_not_features_dont_count_towards_minfeature_calculation() { Predicate p = and( feature("foo").inSet("A"), not(feature("foo").inSet("A")), not(feature("foo").inSet("B")), feature("foo").inSet("B")); PredicateTreeAnalyzerResult r = PredicateTreeAnalyzer.analyzePredicateTree(p); assertEquals(3, r.minFeature); assertEquals(6, r.treeSize); }
private PlantUmlDiagram createDiagram(List<String> rawDiagramLines) { List<String> diagramLines = filterOutComments(rawDiagramLines); Set<PlantUmlComponent> components = parseComponents(diagramLines); PlantUmlComponents plantUmlComponents = new PlantUmlComponents(components); List<ParsedDependency> dependencies = parseDependencies(plantUmlComponents, diagramLines); return new PlantUmlDiagram.Builder(plantUmlComponents) .withDependencies(dependencies) .build(); }
@Test public void parses_a_simple_component() { PlantUmlDiagram diagram = createDiagram(TestDiagram.in(temporaryFolder) .component("SomeOrigin").withStereoTypes("..origin..") .write()); PlantUmlComponent origin = getComponentWithName("SomeOrigin", diagram); assertThat(getOnlyElement(origin.getStereotypes())).as("Stereotype") .isEqualTo(new Stereotype("..origin..")); assertThat(origin.getAlias().isPresent()).as("alias is present").isFalse(); }
@Override public boolean add(E element) { return add(element, element.hashCode()); }
@Test(expected = NullPointerException.class) public void testContainsAllThrowsOnNullElement() { final OAHashSet<Integer> set = new OAHashSet<>(8); set.add(1); final Collection<Integer> elementsToCheck = new ArrayList<>(2); elementsToCheck.add(1); elementsToCheck.add(null); set.containsAll(elementsToCheck); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof TransMeta ) ) { return feedback; } TransMeta transMeta = (TransMeta) subject; String description = transMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or is too short." ) ); } return feedback; }
@Test public void testVerifyRule_NullDescription_EnabledRule() { TransformationHasDescriptionImportRule importRule = getImportRule( 10, true ); TransMeta transMeta = new TransMeta(); transMeta.setDescription( null ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( transMeta ); assertNotNull( feedbackList ); assertFalse( feedbackList.isEmpty() ); ImportValidationFeedback feedback = feedbackList.get( 0 ); assertNotNull( feedback ); assertEquals( ImportValidationResultType.ERROR, feedback.getResultType() ); assertTrue( feedback.isError() ); }
void auditDataInfluenceArg(ProceedingJoinPoint pjp) { Method method = findMethod(pjp); if (Objects.isNull(method)) { return; } Object[] args = pjp.getArgs(); for (int i = 0; i < args.length; i++) { Object arg = args[i]; Annotation[] annotations = method.getParameterAnnotations()[i]; boolean needAudit = false; String entityName = null; String fieldName = null; for (Annotation annotation : annotations) { if (annotation instanceof ApolloAuditLogDataInfluence) { needAudit = true; } if (annotation instanceof ApolloAuditLogDataInfluenceTable) { entityName = ((ApolloAuditLogDataInfluenceTable) annotation).tableName(); } if (annotation instanceof ApolloAuditLogDataInfluenceTableField) { fieldName = ((ApolloAuditLogDataInfluenceTableField) annotation).fieldName(); } } if (needAudit) { parseArgAndAppend(entityName, fieldName, arg); } } }
@Test public void testAuditDataInfluenceArg() throws NoSuchMethodException { ProceedingJoinPoint mockPJP = mock(ProceedingJoinPoint.class); Object[] args = new Object[]{new Object(), new Object()}; Method method = MockAuditClass.class.getMethod("mockAuditMethod", Object.class, Object.class); { doReturn(method).when(aspect).findMethod(any()); when(mockPJP.getArgs()).thenReturn(args); } aspect.auditDataInfluenceArg(mockPJP); verify(aspect, times(1)) .parseArgAndAppend(eq("App"), eq("Name"), eq(args[0])); }
public static Map<TopicPartition, Long> parseSinkConnectorOffsets(Map<Map<String, ?>, Map<String, ?>> partitionOffsets) { Map<TopicPartition, Long> parsedOffsetMap = new HashMap<>(); for (Map.Entry<Map<String, ?>, Map<String, ?>> partitionOffset : partitionOffsets.entrySet()) { Map<String, ?> partitionMap = partitionOffset.getKey(); if (partitionMap == null) { throw new BadRequestException("The partition for a sink connector offset cannot be null or missing"); } if (!partitionMap.containsKey(KAFKA_TOPIC_KEY) || !partitionMap.containsKey(KAFKA_PARTITION_KEY)) { throw new BadRequestException(String.format("The partition for a sink connector offset must contain the keys '%s' and '%s'", KAFKA_TOPIC_KEY, KAFKA_PARTITION_KEY)); } if (partitionMap.get(KAFKA_TOPIC_KEY) == null) { throw new BadRequestException("Kafka topic names must be valid strings and may not be null"); } if (partitionMap.get(KAFKA_PARTITION_KEY) == null) { throw new BadRequestException("Kafka partitions must be valid numbers and may not be null"); } String topic = String.valueOf(partitionMap.get(KAFKA_TOPIC_KEY)); int partition; try { // We parse it this way because both "10" and 10 should be accepted as valid partition values in the REST API's // JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value. partition = Integer.parseInt(String.valueOf(partitionMap.get(KAFKA_PARTITION_KEY))); } catch (Exception e) { throw new BadRequestException("Failed to parse the following Kafka partition value in the provided offsets: '" + partitionMap.get(KAFKA_PARTITION_KEY) + "'. Partition values for sink connectors need " + "to be integers.", e); } TopicPartition tp = new TopicPartition(topic, partition); Map<String, ?> offsetMap = partitionOffset.getValue(); if (offsetMap == null) { // represents an offset reset parsedOffsetMap.put(tp, null); } else { if (!offsetMap.containsKey(KAFKA_OFFSET_KEY)) { throw new BadRequestException(String.format("The offset for a sink connector should either be null or contain " + "the key '%s'", KAFKA_OFFSET_KEY)); } long offset; try { // We parse it this way because both "1000" and 1000 should be accepted as valid offset values in the REST API's // JSON request payload. If it throws an exception, we should propagate it since it's indicative of a badly formatted value. offset = Long.parseLong(String.valueOf(offsetMap.get(KAFKA_OFFSET_KEY))); } catch (Exception e) { throw new BadRequestException("Failed to parse the following Kafka offset value in the provided offsets: '" + offsetMap.get(KAFKA_OFFSET_KEY) + "'. Offset values for sink connectors need " + "to be integers.", e); } parsedOffsetMap.put(tp, offset); } } return parsedOffsetMap; }
@Test public void testValidateAndParseIntegerOffsetValue() { Map<Map<String, ?>, Map<String, ?>> partitionOffsets = createPartitionOffsetMap("topic", "10", 100); Map<TopicPartition, Long> parsedOffsets = SinkUtils.parseSinkConnectorOffsets(partitionOffsets); assertEquals(1, parsedOffsets.size()); Long offsetValue = parsedOffsets.values().iterator().next(); assertEquals(100L, offsetValue.longValue()); }
public static String getStackTrace(final Throwable throwable) { final StringWriter sw = new StringWriter(); final PrintWriter pw = new PrintWriter(sw, true); throwable.printStackTrace(pw); return sw.getBuffer().toString(); }
@Test void getStackTrace() { Assertions.assertTrue(ExceptionUtils.getStackTrace(exception).contains("Exception0")); }
@Override public Optional<OriginalFile> getOriginalFile(Component file) { return retrieveOriginalFileFromCache(originalFiles, file); }
@Test public void getOriginalFile_throws_NPE_when_file_is_null() { assertThatThrownBy(() -> underTest.getOriginalFile(null)) .isInstanceOf(NullPointerException.class) .hasMessage("file can't be null"); }
@Override public void deprecated(String message, Object... params) { logger.warn(message, params); }
@Test public void testDeprecationLoggerWriteOut_nested() throws IOException { final DefaultDeprecationLogger deprecationLogger = new DefaultDeprecationLogger(LogManager.getLogger("org.logstash.my_nested_logger")); // Exercise deprecationLogger.deprecated("Simple deprecation message"); String logs = LogTestUtils.loadLogFileContent("logstash-deprecation.log"); assertTrue("Deprecation logs MUST contains the out line", logs.matches(".*\\[org\\.logstash\\.deprecation\\.my_nested_logger.*\\].*Simple deprecation message")); }
@Override protected String throwableProxyToString(IThrowableProxy tp) { final String prefixed = PATTERN.matcher(super.throwableProxyToString(tp)).replaceAll(PREFIX); return CAUSING_PATTERN.matcher(prefixed).replaceAll(CAUSING); }
@Test void prefixesExceptionsWithExclamationMarks() { assertThat(converter.throwableProxyToString(proxy).split("\\R")) .filteredOn(Objects::nonNull) .filteredOn(s -> !s.isEmpty()) .isNotEmpty() .allSatisfy(line -> assertThat(line).startsWith("!")); }
@Override public boolean registerServer(String nodeId, boolean isLeader, URI httpPublishUri, String clusterUri, String hostname) { ServerNodeDto dto = ServerNodeDto.Builder.builder() .setId(nodeId) .setLeader(isLeader) .setTransportAddress(httpPublishUri.toString()) .setHostname(hostname) .build(); return delegate.registerServer(dto); }
@Test @MongoDBFixtures("NodeServiceImplTest-empty.json") public void testRegisterServer() throws Exception { assertThat(nodeService.allActive()) .describedAs("The collection should be empty") .isEmpty(); nodeService.registerServer(nodeId.getNodeId(), true, TRANSPORT_URI, LOCAL_CANONICAL_HOSTNAME); final Node node = nodeService.byNodeId(nodeId); assertThat(node).isNotNull(); assertThat(node.getHostname()).isEqualTo(LOCAL_CANONICAL_HOSTNAME); assertThat(node.getTransportAddress()).isEqualTo(TRANSPORT_URI.toString()); assertThat(node.isLeader()).isTrue(); }
public Expression rewrite(final Expression expression) { return new ExpressionTreeRewriter<>(new OperatorPlugin()::process) .rewrite(expression, null); }
@Test public void shouldReplaceBetweenOnMinString() { // Given: final Expression predicate = getPredicate( "SELECT * FROM orders where WINDOWSTART BETWEEN '2017-01-01' AND 1236987;"); // When: final Expression rewritten = rewriter.rewrite(predicate); // Then: assertThat( rewritten.toString(), is(String.format("(WINDOWSTART BETWEEN %d AND 1236987)", A_TIMESTAMP)) ); }
public String registerWindowingStrategy(WindowingStrategy<?, ?> windowingStrategy) throws IOException { String existing = windowingStrategyIds.get(windowingStrategy); if (existing != null) { return existing; } String baseName = String.format( "%s(%s)", NameUtils.approximateSimpleName(windowingStrategy), NameUtils.approximateSimpleName(windowingStrategy.getWindowFn())); String name = uniqify(baseName, windowingStrategyIds.values()); windowingStrategyIds.put(windowingStrategy, name); RunnerApi.WindowingStrategy windowingStrategyProto = WindowingStrategyTranslation.toProto(windowingStrategy, this); componentsBuilder.putWindowingStrategies(name, windowingStrategyProto); return name; }
@Test public void registerWindowingStrategy() throws IOException { WindowingStrategy<?, ?> strategy = WindowingStrategy.globalDefault().withMode(AccumulationMode.ACCUMULATING_FIRED_PANES); String name = components.registerWindowingStrategy(strategy); assertThat(name, not(isEmptyOrNullString())); components.toComponents().getWindowingStrategiesOrThrow(name); }