focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public R next() { do { R r = currentTraverser.next(); if (r != null) { return r; } currentTraverser = nextTraverser(); } while (currentTraverser != NULL_TRAVERSER); return null; }
@Test public void when_flatMapToNullTraverser_then_skipOverToNext() { // This test would fail, if the internal FlatMappingTraverser.NULL_TRAVERSER instance // would be the same (as per == operator) as the instance returned by Traversers.empty() FlatMappingTraverser<Integer, String> trav = new FlatMappingTraverser<>(traverseItems(1, 2, 3), item -> item != 3 ? empty() : singleton("a")); assertEquals("a", trav.next()); assertNull(trav.next()); assertNull(trav.next()); }
public List<String> splitSql(String text) { List<String> queries = new ArrayList<>(); StringBuilder query = new StringBuilder(); char character; boolean multiLineComment = false; boolean singleLineComment = false; boolean singleQuoteString = false; boolean doubleQuoteString = false; for (int index = 0; index < text.length(); index++) { character = text.charAt(index); // end of single line comment if (singleLineComment && (character == '\n')) { singleLineComment = false; query.append(character); if (index == (text.length() - 1) && !query.toString().trim().isEmpty()) { // add query when it is the end of sql. queries.add(query.toString()); } continue; } // end of multiple line comment if (multiLineComment && (index - 1) >= 0 && text.charAt(index - 1) == '/' && (index - 2) >= 0 && text.charAt(index - 2) == '*') { multiLineComment = false; } if (character == '\'' && !(singleLineComment || multiLineComment)) { if (singleQuoteString) { singleQuoteString = false; } else if (!doubleQuoteString) { singleQuoteString = true; } } if (character == '"' && !(singleLineComment || multiLineComment)) { if (doubleQuoteString && index > 0) { doubleQuoteString = false; } else if (!singleQuoteString) { doubleQuoteString = true; } } if (!singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment && text.length() > (index + 1)) { if (isSingleLineComment(text.charAt(index), text.charAt(index + 1))) { singleLineComment = true; } else if (text.charAt(index) == '/' && text.length() > (index + 2) && text.charAt(index + 1) == '*' && text.charAt(index + 2) != '+') { multiLineComment = true; } } if (character == ';' && !singleQuoteString && !doubleQuoteString && !multiLineComment && !singleLineComment) { // meet the end of semicolon if (!query.toString().trim().isEmpty()) { queries.add(query.toString()); query = new StringBuilder(); } } else if (index == (text.length() - 1)) { // meet the last character if ((!singleLineComment && !multiLineComment)) { query.append(character); } if (!query.toString().trim().isEmpty()) { queries.add(query.toString()); query = new StringBuilder(); } } else if (!singleLineComment && !multiLineComment) { // normal case, not in single line comment and not in multiple line comment query.append(character); } else if (character == '\n') { query.append(character); } } List<String> refinedQueries = new ArrayList<>(); for (int i = 0; i < queries.size(); ++i) { String emptyLine = ""; if (i > 0) { emptyLine = createEmptyLine(refinedQueries.get(i-1)); } if (isSingleLineComment(queries.get(i)) || isMultipleLineComment(queries.get(i))) { // refine the last refinedQuery if (refinedQueries.size() > 0) { String lastRefinedQuery = refinedQueries.get(refinedQueries.size() - 1); refinedQueries.set(refinedQueries.size() - 1, lastRefinedQuery + createEmptyLine(queries.get(i))); } } else { String refinedQuery = emptyLine + queries.get(i); refinedQueries.add(refinedQuery); } } return refinedQueries; }
@Test void testCustomSplitter_2() { SqlSplitter sqlSplitter = new SqlSplitter("#"); List<String> sqls = sqlSplitter.splitSql("show tables;\n#comment_1"); assertEquals(1, sqls.size()); assertEquals("show tables", sqls.get(0)); sqls = sqlSplitter.splitSql("show tables;\n#comment_1"); assertEquals(1, sqls.size()); assertEquals("show tables", sqls.get(0)); sqls = sqlSplitter.splitSql("show tables;\n#comment_1;"); assertEquals(1, sqls.size()); assertEquals("show tables", sqls.get(0)); sqls = sqlSplitter.splitSql("show tables;\n#comment_1;\n"); assertEquals(1, sqls.size()); assertEquals("show tables", sqls.get(0)); sqls = sqlSplitter.splitSql("#comment_1;\nshow tables"); assertEquals(1, sqls.size()); assertEquals("\nshow tables", sqls.get(0)); sqls = sqlSplitter.splitSql("#comment_1;\nshow tables;\n#comment_2"); assertEquals(1, sqls.size()); assertEquals("\nshow tables", sqls.get(0)); sqls = sqlSplitter.splitSql("show tables;\ndescribe table_1"); assertEquals(2, sqls.size()); assertEquals("show tables", sqls.get(0)); assertEquals("\ndescribe table_1", sqls.get(1)); sqls = sqlSplitter.splitSql("show tables;\n#comment_1;\ndescribe table_1"); assertEquals(2, sqls.size()); assertEquals("show tables", sqls.get(0)); assertEquals("\n\ndescribe table_1", sqls.get(1)); sqls = sqlSplitter.splitSql("select a\nfrom table_1;\ndescribe table_1;#comment_1"); assertEquals(2, sqls.size()); assertEquals("select a\nfrom table_1", sqls.get(0)); assertEquals("\n\ndescribe table_1", sqls.get(1)); sqls = sqlSplitter.splitSql("#comment_1;\n#comment_2\n"); assertEquals(0, sqls.size()); sqls = sqlSplitter.splitSql("select a # comment\n from table_1"); assertEquals(1, sqls.size()); assertEquals("select a \n from table_1", sqls.get(0)); }
List<JobFilter> getFilters() { return filters; }
@Test void jobDefaultFiltersHasDefaultJobFilterAndRetryFilter() { JobDefaultFilters jobDefaultFilters = new JobDefaultFilters(); assertThat(jobDefaultFilters.getFilters()) .hasAtLeastOneElementOfType(DefaultJobFilter.class) .hasAtLeastOneElementOfType(RetryFilter.class); }
@Override public Optional<SubflowExecutionResult> createSubflowExecutionResult( RunContext runContext, TaskRun taskRun, io.kestra.core.models.flows.Flow flow, Execution execution ) { // we only create a worker task result when the execution is terminated if (!taskRun.getState().isTerminated()) { return Optional.empty(); } boolean isOutputsAllowed = runContext .<Boolean>pluginConfiguration(PLUGIN_FLOW_OUTPUTS_ENABLED) .orElse(true); final Output.OutputBuilder builder = Output.builder() .executionId(execution.getId()) .state(execution.getState().getCurrent()); final Map<String, Object> subflowOutputs = Optional .ofNullable(flow.getOutputs()) .map(outputs -> outputs .stream() .collect(Collectors.toMap( io.kestra.core.models.flows.Output::getId, io.kestra.core.models.flows.Output::getValue) ) ) .orElseGet(() -> isOutputsAllowed ? this.getOutputs() : null); if (subflowOutputs != null) { try { Map<String, Object> outputs = runContext.render(subflowOutputs); FlowInputOutput flowInputOutput = ((DefaultRunContext)runContext).getApplicationContext().getBean(FlowInputOutput.class); // this is hacking if (flow.getOutputs() != null && flowInputOutput != null) { outputs = flowInputOutput.typedOutputs(flow, execution, outputs); } builder.outputs(outputs); } catch (Exception e) { runContext.logger().warn("Failed to extract outputs with the error: '{}'", e.getLocalizedMessage(), e); var state = this.isAllowFailure() ? State.Type.WARNING : State.Type.FAILED; taskRun = taskRun .withState(state) .withAttempts(Collections.singletonList(TaskRunAttempt.builder().state(new State().withState(state)).build())) .withOutputs(builder.build().toMap()); return Optional.of(SubflowExecutionResult.builder() .executionId(execution.getId()) .state(State.Type.FAILED) .parentTaskRun(taskRun) .build()); } } taskRun = taskRun.withOutputs(builder.build().toMap()); State.Type finalState = ExecutableUtils.guessState(execution, this.transmitFailed, this.isAllowFailure()); if (taskRun.getState().getCurrent() != finalState) { taskRun = taskRun.withState(finalState); } return Optional.of(ExecutableUtils.subflowExecutionResult(taskRun, execution)); }
@Test void shouldNotReturnResultForExecutionNotTerminated() { TaskRun taskRun = TaskRun .builder() .state(State.of(State.Type.CREATED, Collections.emptyList())) .build(); Optional<SubflowExecutionResult> result = new Subflow().createSubflowExecutionResult( runContext, taskRun, Flow.builder().build(), Execution.builder().build() ); assertThat(result, is(Optional.empty())); }
static void dissectFrame( final DriverEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); builder.append(": address="); encodedLength += dissectSocketAddress(buffer, offset + encodedLength, builder); builder.append(" "); final int frameOffset = offset + encodedLength; final int frameType = frameType(buffer, frameOffset); switch (frameType) { case HeaderFlyweight.HDR_TYPE_PAD: case HeaderFlyweight.HDR_TYPE_DATA: DATA_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectDataFrame(builder); break; case HeaderFlyweight.HDR_TYPE_SM: SM_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectStatusFrame(builder); break; case HeaderFlyweight.HDR_TYPE_NAK: NAK_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectNakFrame(builder); break; case HeaderFlyweight.HDR_TYPE_SETUP: SETUP_HEADER.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectSetupFrame(builder); break; case HeaderFlyweight.HDR_TYPE_RTTM: RTT_MEASUREMENT.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectRttFrame(builder); break; case HeaderFlyweight.HDR_TYPE_RES: dissectResFrame(buffer, frameOffset, builder); break; case HeaderFlyweight.HDR_TYPE_RSP_SETUP: RSP_SETUP.wrap(buffer, frameOffset, buffer.capacity() - frameOffset); dissectRspSetupFrame(builder); break; default: builder.append("type=UNKNOWN(").append(frameType).append(")"); break; } }
@Test void dissectFrameTypeNak() { internalEncodeLogHeader(buffer, 0, 3, 3, () -> 3_000_000_000L); final int socketAddressOffset = encodeSocketAddress( buffer, LOG_HEADER_LENGTH, new InetSocketAddress("localhost", 8888)); final NakFlyweight flyweight = new NakFlyweight(); flyweight.wrap(buffer, LOG_HEADER_LENGTH + socketAddressOffset, 300); flyweight.headerType(HDR_TYPE_NAK); flyweight.flags((short)2); flyweight.frameLength(54); flyweight.sessionId(5); flyweight.streamId(8); flyweight.termId(20); flyweight.termOffset(0); flyweight.length(999999); dissectFrame(FRAME_OUT, buffer, 0, builder); assertEquals("[3.000000000] " + CONTEXT + ": " + FRAME_OUT.name() + " [3/3]: address=127.0.0.1:8888 type=NAK flags=00000010 frameLength=54 sessionId=5 streamId=8 " + "termId=20 termOffset=0 length=999999", builder.toString()); }
public static void execute(Task task) { try { task.start(); task.doPrivileged(); } finally { task.stop(); } }
@Test public void allow_everything_in_privileged_block_only() { UserSessionCatcherTask catcher = new UserSessionCatcherTask(); DoPrivileged.execute(catcher); // verify the session used inside Privileged task assertThat(catcher.userSession.isLoggedIn()).isFalse(); assertThat(catcher.userSession.hasComponentPermission("any permission", new ComponentDto())).isTrue(); assertThat(catcher.userSession.isSystemAdministrator()).isTrue(); assertThat(catcher.userSession.shouldResetPassword()).isFalse(); assertThat(catcher.userSession.isActive()).isTrue(); assertThat(catcher.userSession.hasChildProjectsPermission(USER, new ComponentDto().setUuid("uuid"))).isTrue(); assertThat(catcher.userSession.hasPortfolioChildProjectsPermission(USER, new ComponentDto())).isTrue(); assertThat(catcher.userSession.isAuthenticatedBrowserSession()).isFalse(); // verify session in place after task is done assertThat(threadLocalUserSession.get()).isSameAs(session); }
public Optional<String> retrieveTitle(final GRN itemGrn, final SearchUser searchUser) { if (isSpecialView(itemGrn)) { final ViewResolverDecoder decoder = new ViewResolverDecoder(itemGrn.entity()); if (decoder.isResolverViewId()) { final ViewResolver viewResolver = viewResolvers.get(decoder.getResolverName()); if (viewResolver != null) { Optional<ViewDTO> view = viewResolver.get(decoder.getViewId()); if (view.isPresent() && searchUser.canReadView(view.get())) { return Optional.ofNullable(view.get().title()); } } } } final Optional<Catalog.Entry> entry = catalog.getEntry(itemGrn); final Optional<String> title = entry.map(Catalog.Entry::title); if (title.isPresent()) { return title; } else { return entry.map(Catalog.Entry::id); } }
@Test void testReturnsEmptyOptionalOnEmptyEntryInCatalog() throws Exception { doReturn(Optional.empty()).when(catalog).getEntry(any()); assertTrue(toTest.retrieveTitle(grn, searchUser).isEmpty()); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testUpdateAlreadyUpdatedPipeline() throws IOException { DataflowPipelineOptions options = buildPipelineOptions(); options.setUpdate(true); options.setJobName("oldJobName"); Dataflow mockDataflowClient = options.getDataflowClient(); Dataflow.Projects.Locations.Jobs.Create mockRequest = mock(Dataflow.Projects.Locations.Jobs.Create.class); when(mockDataflowClient .projects() .locations() .jobs() .create(eq(PROJECT_ID), eq(REGION_ID), any(Job.class))) .thenReturn(mockRequest); final Job resultJob = new Job(); resultJob.setId("newid"); // Return a different request id. resultJob.setClientRequestId("different_request_id"); when(mockRequest.execute()).thenReturn(resultJob); Pipeline p = buildDataflowPipeline(options); thrown.expect(DataflowJobAlreadyUpdatedException.class); thrown.expect( new TypeSafeMatcher<DataflowJobAlreadyUpdatedException>() { @Override public void describeTo(Description description) { description.appendText("Expected job ID: " + resultJob.getId()); } @Override protected boolean matchesSafely(DataflowJobAlreadyUpdatedException item) { return resultJob.getId().equals(item.getJob().getJobId()); } }); thrown.expectMessage( "The job named oldjobname with id: oldJobId has already been updated " + "into job id: newid and cannot be updated again."); p.run(); }
@Override public SortedSet<Path> convertFrom(String value) { if (value == null) { throw new ParameterException("Path list must not be null."); } return Arrays.stream(value.split(SEPARATOR)) .map(StringUtils::trimToNull) .filter(Objects::nonNull) .map(Paths::get) .collect(Collectors.toCollection(sortedPathSupplier())); }
@Test(expected = ParameterException.class) public void testConvertFromNull() { converter.convertFrom(null); }
public BeamFnApi.InstructionResponse.Builder processBundle(BeamFnApi.InstructionRequest request) throws Exception { BeamFnApi.ProcessBundleResponse.Builder response = BeamFnApi.ProcessBundleResponse.newBuilder(); BundleProcessor bundleProcessor = bundleProcessorCache.get( request, () -> { try { return createBundleProcessor( request.getProcessBundle().getProcessBundleDescriptorId(), request.getProcessBundle()); } catch (IOException e) { throw new RuntimeException(e); } }); try { PTransformFunctionRegistry startFunctionRegistry = bundleProcessor.getStartFunctionRegistry(); PTransformFunctionRegistry finishFunctionRegistry = bundleProcessor.getFinishFunctionRegistry(); ExecutionStateTracker stateTracker = bundleProcessor.getStateTracker(); try (HandleStateCallsForBundle beamFnStateClient = bundleProcessor.getBeamFnStateClient()) { stateTracker.start(request.getInstructionId()); try { // Already in reverse topological order so we don't need to do anything. for (ThrowingRunnable startFunction : startFunctionRegistry.getFunctions()) { LOG.debug("Starting function {}", startFunction); startFunction.run(); } if (request.getProcessBundle().hasElements()) { boolean inputFinished = bundleProcessor .getInboundObserver() .multiplexElements(request.getProcessBundle().getElements()); if (!inputFinished) { throw new RuntimeException( "Elements embedded in ProcessBundleRequest do not contain stream terminators for " + "all data and timer inputs. Unterminated endpoints: " + bundleProcessor.getInboundObserver().getUnfinishedEndpoints()); } } else if (!bundleProcessor.getInboundEndpointApiServiceDescriptors().isEmpty()) { BeamFnDataInboundObserver observer = bundleProcessor.getInboundObserver(); beamFnDataClient.registerReceiver( request.getInstructionId(), bundleProcessor.getInboundEndpointApiServiceDescriptors(), observer); observer.awaitCompletion(); beamFnDataClient.unregisterReceiver( request.getInstructionId(), bundleProcessor.getInboundEndpointApiServiceDescriptors()); } // Need to reverse this since we want to call finish in topological order. for (ThrowingRunnable finishFunction : Lists.reverse(finishFunctionRegistry.getFunctions())) { LOG.debug("Finishing function {}", finishFunction); finishFunction.run(); } // If bundleProcessor has not flushed any elements, embed them in response. embedOutboundElementsIfApplicable(response, bundleProcessor); // Add all checkpointed residuals to the response. response.addAllResidualRoots(bundleProcessor.getSplitListener().getResidualRoots()); // Add all metrics to the response. bundleProcessor.getProgressRequestLock().lock(); Map<String, ByteString> monitoringData = finalMonitoringData(bundleProcessor); if (runnerAcceptsShortIds) { response.putAllMonitoringData(monitoringData); } else { for (Map.Entry<String, ByteString> metric : monitoringData.entrySet()) { response.addMonitoringInfos( shortIds.get(metric.getKey()).toBuilder().setPayload(metric.getValue())); } } if (!bundleProcessor.getBundleFinalizationCallbackRegistrations().isEmpty()) { finalizeBundleHandler.registerCallbacks( bundleProcessor.getInstructionId(), ImmutableList.copyOf(bundleProcessor.getBundleFinalizationCallbackRegistrations())); response.setRequiresFinalization(true); } } finally { // We specifically deactivate state tracking while we are holding the progress request and // sampling locks. stateTracker.reset(); } } // Mark the bundle processor as re-usable. bundleProcessorCache.release( request.getProcessBundle().getProcessBundleDescriptorId(), bundleProcessor); return BeamFnApi.InstructionResponse.newBuilder().setProcessBundle(response); } catch (Exception e) { // Make sure we clean-up from the active set of bundle processors. bundleProcessorCache.discard(bundleProcessor); throw e; } }
@Test public void testPTransformFinishExceptionsArePropagated() throws Exception { BeamFnApi.ProcessBundleDescriptor processBundleDescriptor = BeamFnApi.ProcessBundleDescriptor.newBuilder() .putTransforms( "2L", RunnerApi.PTransform.newBuilder() .setSpec(RunnerApi.FunctionSpec.newBuilder().setUrn(DATA_INPUT_URN).build()) .build()) .build(); Map<String, BeamFnApi.ProcessBundleDescriptor> fnApiRegistry = ImmutableMap.of("1L", processBundleDescriptor); ProcessBundleHandler handler = new ProcessBundleHandler( PipelineOptionsFactory.create(), Collections.emptySet(), fnApiRegistry::get, beamFnDataClient, null /* beamFnStateGrpcClientCache */, null /* finalizeBundleHandler */, new ShortIdMap(), executionStateSampler, ImmutableMap.of( DATA_INPUT_URN, (PTransformRunnerFactory<Object>) (context) -> { context.addFinishBundleFunction(ProcessBundleHandlerTest::throwException); return null; }), Caches.noop(), new BundleProcessorCache(), null /* dataSampler */); assertThrows( "TestException", IllegalStateException.class, () -> handler.processBundle( BeamFnApi.InstructionRequest.newBuilder() .setProcessBundle( BeamFnApi.ProcessBundleRequest.newBuilder() .setProcessBundleDescriptorId("1L")) .build())); // BundleProcessor is not re-added back to the BundleProcessorCache in case of an exception // during bundle processing assertThat(handler.bundleProcessorCache.getCachedBundleProcessors().get("1L"), empty()); }
@Override public void upgrade() { final FindIterable<Document> documentsWithMissingFields = collection.find(or(not(exists(ContentPack.FIELD_META_ID)), not(exists(ContentPack.FIELD_META_REVISION)))); for (Document document : documentsWithMissingFields) { final ObjectId objectId = document.getObjectId("_id"); LOG.debug("Found document with missing \"id\" or \"rev\" field with ID <{}>", objectId); final String id = document.get("id", objectId.toHexString()); final int rev = document.get("rev", 0); document.put("id", id); document.put("rev", rev); final UpdateResult updateResult = collection.replaceOne(eq("_id", objectId), document); if (updateResult.wasAcknowledged()) { LOG.debug("Successfully updated document with ID <{}>", objectId); } else { LOG.error("Failed to update document with ID <{}>", objectId); } } }
@Test @MongoDBFixtures("V20180718155800_AddContentPackIdAndRevTest.json") public void upgrade() { final MongoCollection<Document> collection = mongodb.mongoConnection() .getMongoDatabase() .getCollection(ContentPackPersistenceService.COLLECTION_NAME); final Bson filter = and(exists(ContentPack.FIELD_META_ID), exists(ContentPack.FIELD_META_REVISION)); assertThat(collection.countDocuments(filter)).isEqualTo(1L); migration.upgrade(); assertThat(collection.countDocuments(filter)).isEqualTo(2L); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { switch (request.getCode()) { case RequestCode.UPDATE_AND_CREATE_TOPIC: return this.updateAndCreateTopic(ctx, request); case RequestCode.UPDATE_AND_CREATE_TOPIC_LIST: return this.updateAndCreateTopicList(ctx, request); case RequestCode.DELETE_TOPIC_IN_BROKER: return this.deleteTopic(ctx, request); case RequestCode.GET_ALL_TOPIC_CONFIG: return this.getAllTopicConfig(ctx, request); case RequestCode.GET_TIMER_CHECK_POINT: return this.getTimerCheckPoint(ctx, request); case RequestCode.GET_TIMER_METRICS: return this.getTimerMetrics(ctx, request); case RequestCode.UPDATE_BROKER_CONFIG: return this.updateBrokerConfig(ctx, request); case RequestCode.GET_BROKER_CONFIG: return this.getBrokerConfig(ctx, request); case RequestCode.UPDATE_COLD_DATA_FLOW_CTR_CONFIG: return this.updateColdDataFlowCtrGroupConfig(ctx, request); case RequestCode.REMOVE_COLD_DATA_FLOW_CTR_CONFIG: return this.removeColdDataFlowCtrGroupConfig(ctx, request); case RequestCode.GET_COLD_DATA_FLOW_CTR_INFO: return this.getColdDataFlowCtrInfo(ctx); case RequestCode.SET_COMMITLOG_READ_MODE: return this.setCommitLogReadaheadMode(ctx, request); case RequestCode.SEARCH_OFFSET_BY_TIMESTAMP: return this.searchOffsetByTimestamp(ctx, request); case RequestCode.GET_MAX_OFFSET: return this.getMaxOffset(ctx, request); case RequestCode.GET_MIN_OFFSET: return this.getMinOffset(ctx, request); case RequestCode.GET_EARLIEST_MSG_STORETIME: return this.getEarliestMsgStoretime(ctx, request); case RequestCode.GET_BROKER_RUNTIME_INFO: return this.getBrokerRuntimeInfo(ctx, request); case RequestCode.LOCK_BATCH_MQ: return this.lockBatchMQ(ctx, request); case RequestCode.UNLOCK_BATCH_MQ: return this.unlockBatchMQ(ctx, request); case RequestCode.UPDATE_AND_CREATE_SUBSCRIPTIONGROUP: return this.updateAndCreateSubscriptionGroup(ctx, request); case RequestCode.GET_ALL_SUBSCRIPTIONGROUP_CONFIG: return this.getAllSubscriptionGroup(ctx, request); case RequestCode.DELETE_SUBSCRIPTIONGROUP: return this.deleteSubscriptionGroup(ctx, request); case RequestCode.GET_TOPIC_STATS_INFO: return this.getTopicStatsInfo(ctx, request); case RequestCode.GET_CONSUMER_CONNECTION_LIST: return this.getConsumerConnectionList(ctx, request); case RequestCode.GET_PRODUCER_CONNECTION_LIST: return this.getProducerConnectionList(ctx, request); case RequestCode.GET_ALL_PRODUCER_INFO: return this.getAllProducerInfo(ctx, request); case RequestCode.GET_CONSUME_STATS: return this.getConsumeStats(ctx, request); case RequestCode.GET_ALL_CONSUMER_OFFSET: return this.getAllConsumerOffset(ctx, request); case RequestCode.GET_ALL_DELAY_OFFSET: return this.getAllDelayOffset(ctx, request); case RequestCode.GET_ALL_MESSAGE_REQUEST_MODE: return this.getAllMessageRequestMode(ctx, request); case RequestCode.INVOKE_BROKER_TO_RESET_OFFSET: return this.resetOffset(ctx, request); case RequestCode.INVOKE_BROKER_TO_GET_CONSUMER_STATUS: return this.getConsumerStatus(ctx, request); case RequestCode.QUERY_TOPIC_CONSUME_BY_WHO: return this.queryTopicConsumeByWho(ctx, request); case RequestCode.QUERY_TOPICS_BY_CONSUMER: return this.queryTopicsByConsumer(ctx, request); case RequestCode.QUERY_SUBSCRIPTION_BY_CONSUMER: return this.querySubscriptionByConsumer(ctx, request); case RequestCode.QUERY_CONSUME_TIME_SPAN: return this.queryConsumeTimeSpan(ctx, request); case RequestCode.GET_SYSTEM_TOPIC_LIST_FROM_BROKER: return this.getSystemTopicListFromBroker(ctx, request); case RequestCode.CLEAN_EXPIRED_CONSUMEQUEUE: return this.cleanExpiredConsumeQueue(); case RequestCode.DELETE_EXPIRED_COMMITLOG: return this.deleteExpiredCommitLog(); case RequestCode.CLEAN_UNUSED_TOPIC: return this.cleanUnusedTopic(); case RequestCode.GET_CONSUMER_RUNNING_INFO: return this.getConsumerRunningInfo(ctx, request); case RequestCode.QUERY_CORRECTION_OFFSET: return this.queryCorrectionOffset(ctx, request); case RequestCode.CONSUME_MESSAGE_DIRECTLY: return this.consumeMessageDirectly(ctx, request); case RequestCode.CLONE_GROUP_OFFSET: return this.cloneGroupOffset(ctx, request); case RequestCode.VIEW_BROKER_STATS_DATA: return ViewBrokerStatsData(ctx, request); case RequestCode.GET_BROKER_CONSUME_STATS: return fetchAllConsumeStatsInBroker(ctx, request); case RequestCode.QUERY_CONSUME_QUEUE: return queryConsumeQueue(ctx, request); case RequestCode.UPDATE_AND_GET_GROUP_FORBIDDEN: return this.updateAndGetGroupForbidden(ctx, request); case RequestCode.GET_SUBSCRIPTIONGROUP_CONFIG: return this.getSubscriptionGroup(ctx, request); case RequestCode.UPDATE_AND_CREATE_ACL_CONFIG: return updateAndCreateAccessConfig(ctx, request); case RequestCode.DELETE_ACL_CONFIG: return deleteAccessConfig(ctx, request); case RequestCode.GET_BROKER_CLUSTER_ACL_INFO: return getBrokerAclConfigVersion(ctx, request); case RequestCode.UPDATE_GLOBAL_WHITE_ADDRS_CONFIG: return updateGlobalWhiteAddrsConfig(ctx, request); case RequestCode.RESUME_CHECK_HALF_MESSAGE: return resumeCheckHalfMessage(ctx, request); case RequestCode.GET_TOPIC_CONFIG: return getTopicConfig(ctx, request); case RequestCode.UPDATE_AND_CREATE_STATIC_TOPIC: return this.updateAndCreateStaticTopic(ctx, request); case RequestCode.NOTIFY_MIN_BROKER_ID_CHANGE: return this.notifyMinBrokerIdChange(ctx, request); case RequestCode.EXCHANGE_BROKER_HA_INFO: return this.updateBrokerHaInfo(ctx, request); case RequestCode.GET_BROKER_HA_STATUS: return this.getBrokerHaStatus(ctx, request); case RequestCode.RESET_MASTER_FLUSH_OFFSET: return this.resetMasterFlushOffset(ctx, request); case RequestCode.GET_BROKER_EPOCH_CACHE: return this.getBrokerEpochCache(ctx, request); case RequestCode.NOTIFY_BROKER_ROLE_CHANGED: return this.notifyBrokerRoleChanged(ctx, request); case RequestCode.AUTH_CREATE_USER: return this.createUser(ctx, request); case RequestCode.AUTH_UPDATE_USER: return this.updateUser(ctx, request); case RequestCode.AUTH_DELETE_USER: return this.deleteUser(ctx, request); case RequestCode.AUTH_GET_USER: return this.getUser(ctx, request); case RequestCode.AUTH_LIST_USER: return this.listUser(ctx, request); case RequestCode.AUTH_CREATE_ACL: return this.createAcl(ctx, request); case RequestCode.AUTH_UPDATE_ACL: return this.updateAcl(ctx, request); case RequestCode.AUTH_DELETE_ACL: return this.deleteAcl(ctx, request); case RequestCode.AUTH_GET_ACL: return this.getAcl(ctx, request); case RequestCode.AUTH_LIST_ACL: return this.listAcl(ctx, request); default: return getUnknownCmdResponse(ctx, request); } }
@Test public void testProcessRequest_fail() throws RemotingCommandException, UnknownHostException { RemotingCommand request = createResumeCheckHalfMessageCommand(); when(messageStore.selectOneMessageByOffset(any(Long.class))).thenReturn(createSelectMappedBufferResult()); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); }
@Override public void isEqualTo(@Nullable Object expected) { super.isEqualTo(expected); }
@Test public void isEqualTo_WithoutToleranceParameter_Success() { assertThat(array(2.2f, 5.4f, POSITIVE_INFINITY, NEGATIVE_INFINITY, NaN, 0.0f, -0.0f)) .isEqualTo(array(2.2f, 5.4f, POSITIVE_INFINITY, NEGATIVE_INFINITY, NaN, 0.0f, -0.0f)); }
public static String getIntfNameFromPciAddress(Port port) { String intfName; if (port.getProfile() == null || port.getProfile().isEmpty()) { log.error("Port profile is not found"); return null; } if (!port.getProfile().containsKey(PCISLOT) || Strings.isNullOrEmpty(port.getProfile().get(PCISLOT).toString())) { log.error("Failed to retrieve the interface name because of no " + "pci_slot information from the port"); return null; } String vendorInfoForPort = String.valueOf(port.getProfile().get(PCI_VENDOR_INFO)); if (!portNamePrefixMap().containsKey(vendorInfoForPort)) { log.debug("{} is an non-smart NIC prefix.", vendorInfoForPort); return UNSUPPORTED_VENDOR; } String portNamePrefix = portNamePrefixMap().get(vendorInfoForPort); String busNumHex = port.getProfile().get(PCISLOT).toString().split(":")[1]; String busNumDecimal = String.valueOf(Integer.parseInt(busNumHex, HEX_RADIX)); String deviceNumHex = port.getProfile().get(PCISLOT).toString() .split(":")[2] .split("\\.")[0]; String deviceNumDecimal = String.valueOf(Integer.parseInt(deviceNumHex, HEX_RADIX)); String functionNumHex = port.getProfile().get(PCISLOT).toString() .split(":")[2] .split("\\.")[1]; String functionNumDecimal = String.valueOf(Integer.parseInt(functionNumHex, HEX_RADIX)); if (functionNumDecimal.equals(ZERO_FUNCTION_NUMBER)) { intfName = portNamePrefix + busNumDecimal + PREFIX_DEVICE_NUMBER + deviceNumDecimal; } else { intfName = portNamePrefix + busNumDecimal + PREFIX_DEVICE_NUMBER + deviceNumDecimal + PREFIX_FUNCTION_NUMBER + functionNumDecimal; } return intfName; }
@Test public void testGetIntfNameFromPciAddress() { String expectedIntfName1 = "enp5s8"; String expectedIntfName2 = "enp5s8f3"; assertNull(getIntfNameFromPciAddress(openstackPort)); assertEquals(expectedIntfName1, getIntfNameFromPciAddress(openstackSriovPort1)); assertEquals(expectedIntfName2, getIntfNameFromPciAddress(openstackSriovPort2)); assertEquals(UNSUPPORTED_VENDOR, getIntfNameFromPciAddress(openstackSriovPort3)); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testParseCorruptedRecord() throws Exception { buildFetcher(); assignFromUser(singleton(tp0)); ByteBuffer buffer = ByteBuffer.allocate(1024); DataOutputStream out = new DataOutputStream(new ByteBufferOutputStream(buffer)); byte magic = RecordBatch.MAGIC_VALUE_V1; byte[] key = "foo".getBytes(); byte[] value = "baz".getBytes(); long offset = 0; long timestamp = 500L; int size = LegacyRecord.recordSize(magic, key.length, value.length); byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME); long crc = LegacyRecord.computeChecksum(magic, attributes, timestamp, key, value); // write one valid record out.writeLong(offset); out.writeInt(size); LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value); // and one invalid record (note the crc) out.writeLong(offset + 1); out.writeInt(size); LegacyRecord.write(out, magic, crc + 1, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value); // write one valid record out.writeLong(offset + 2); out.writeInt(size); LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value); // Write a record whose size field is invalid. out.writeLong(offset + 3); out.writeInt(1); // write one valid record out.writeLong(offset + 4); out.writeInt(size); LegacyRecord.write(out, magic, crc, LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME), timestamp, key, value); buffer.flip(); subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); // normal fetch assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tidp0, MemoryRecords.readableRecords(buffer), Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); // the first fetchRecords() should return the first valid message assertEquals(1, fetchRecords().get(tp0).size()); assertEquals(1, subscriptions.position(tp0).offset); ensureBlockOnRecord(1L); seekAndConsumeRecord(buffer, 2L); ensureBlockOnRecord(3L); try { // For a record that cannot be retrieved from the iterator, we cannot seek over it within the batch. seekAndConsumeRecord(buffer, 4L); fail("Should have thrown exception when fail to retrieve a record from iterator."); } catch (KafkaException ke) { // let it go } ensureBlockOnRecord(4L); }
public static ObjectMapper newObjectMapper() { final ObjectMapper mapper = new ObjectMapper(); return configure(mapper); }
@Test void objectMapperSerializesNullValues() throws IOException { final ObjectMapper mapper = Jackson.newObjectMapper(); final Issue1627 pojo = new Issue1627(null, null); final String json = "{\"string\":null,\"uuid\":null}"; assertThat(mapper.writeValueAsString(pojo)).isEqualTo(json); }
@Override public void filter(ContainerRequestContext requestContext) { if (isInternalRequest(requestContext)) { log.trace("Skipping authentication for internal request"); return; } try { log.debug("Authenticating request"); BasicAuthCredentials credentials = new BasicAuthCredentials(requestContext.getHeaderString(AUTHORIZATION)); LoginContext loginContext = new LoginContext( CONNECT_LOGIN_MODULE, null, new BasicAuthCallBackHandler(credentials), configuration); loginContext.login(); setSecurityContextForRequest(requestContext, credentials); } catch (LoginException | ConfigException e) { // Log at debug here in order to avoid polluting log files whenever someone mistypes their credentials log.debug("Request failed authentication", e); requestContext.abortWith( Response.status(Response.Status.UNAUTHORIZED) .entity("User cannot access the resource.") .build()); } }
@Test public void testSecurityContextSet() throws IOException, URISyntaxException { File credentialFile = setupPropertyLoginFile(true); JaasBasicAuthFilter jaasBasicAuthFilter = setupJaasFilter("KafkaConnect", credentialFile.getPath()); ContainerRequestContext requestContext = setMock("Basic", "user1", "password1"); when(requestContext.getUriInfo()).thenReturn(mock(UriInfo.class)); when(requestContext.getUriInfo().getRequestUri()).thenReturn(new URI("https://foo.bar")); jaasBasicAuthFilter.filter(requestContext); ArgumentCaptor<SecurityContext> capturedContext = ArgumentCaptor.forClass(SecurityContext.class); verify(requestContext).setSecurityContext(capturedContext.capture()); assertEquals("user1", capturedContext.getValue().getUserPrincipal().getName()); assertTrue(capturedContext.getValue().isSecure()); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { MessageNode messageNode = chatMessage.getMessageNode(); boolean update = false; switch (chatMessage.getType()) { case TRADEREQ: if (chatMessage.getMessage().contains("wishes to trade with you.")) { notifier.notify(config.notifyOnTrade(), chatMessage.getMessage()); } break; case CHALREQ_TRADE: if (chatMessage.getMessage().contains("wishes to duel with you.")) { notifier.notify(config.notifyOnDuel(), chatMessage.getMessage()); } break; case BROADCAST: // Some broadcasts have links attached, notated by `|` followed by a number, while others contain color tags. // We don't want to see either in the printed notification. String broadcast = chatMessage.getMessage(); int urlTokenIndex = broadcast.lastIndexOf('|'); if (urlTokenIndex != -1) { broadcast = broadcast.substring(0, urlTokenIndex); } notifier.notify(config.notifyOnBroadcast(), Text.removeFormattingTags(broadcast)); break; case PRIVATECHAT: case MODPRIVATECHAT: notifier.notify(config.notifyOnPM(), Text.removeTags(chatMessage.getName()) + ": " + chatMessage.getMessage()); break; case PRIVATECHATOUT: case DIALOG: case MESBOX: return; case MODCHAT: case PUBLICCHAT: case FRIENDSCHAT: case CLAN_CHAT: case CLAN_GUEST_CHAT: case CLAN_GIM_CHAT: case AUTOTYPER: case MODAUTOTYPER: if (client.getLocalPlayer() != null && Text.toJagexName(Text.removeTags(chatMessage.getName())).equals(client.getLocalPlayer().getName())) { return; } break; case CONSOLE: // Don't notify for notification messages if (chatMessage.getName().equals(runeliteTitle)) { return; } break; } if (usernameMatcher == null && client.getLocalPlayer() != null && client.getLocalPlayer().getName() != null) { String username = client.getLocalPlayer().getName(); String pattern = Arrays.stream(username.split(" ")) .map(s -> s.isEmpty() ? "" : Pattern.quote(s)) .collect(Collectors.joining("[\u00a0\u0020]")); // space or nbsp usernameMatcher = Pattern.compile("\\b" + pattern + "\\b", Pattern.CASE_INSENSITIVE); } if (config.highlightOwnName() && usernameMatcher != null) { final String message = messageNode.getValue(); Matcher matcher = usernameMatcher.matcher(message); if (matcher.find()) { final String username = client.getLocalPlayer().getName(); StringBuffer stringBuffer = new StringBuffer(); do { final int start = matcher.start(); // start not end, since username won't contain a col tag final String closeColor = MoreObjects.firstNonNull( getLastColor(message.substring(0, start)), "<col" + ChatColorType.NORMAL + '>'); final String replacement = "<col" + ChatColorType.HIGHLIGHT.name() + "><u>" + username + "</u>" + closeColor; matcher.appendReplacement(stringBuffer, replacement); } while (matcher.find()); matcher.appendTail(stringBuffer); messageNode.setValue(stringBuffer.toString()); update = true; if (chatMessage.getType() == ChatMessageType.PUBLICCHAT || chatMessage.getType() == ChatMessageType.PRIVATECHAT || chatMessage.getType() == ChatMessageType.FRIENDSCHAT || chatMessage.getType() == ChatMessageType.MODCHAT || chatMessage.getType() == ChatMessageType.MODPRIVATECHAT || chatMessage.getType() == ChatMessageType.CLAN_CHAT || chatMessage.getType() == ChatMessageType.CLAN_GUEST_CHAT) { sendNotification(config.notifyOnOwnName(), chatMessage); } } } boolean matchesHighlight = false; // Get nodeValue to store and update in between the different pattern passes // The messageNode value is only set after all patterns have been processed String nodeValue = messageNode.getValue(); for (Pattern pattern : highlightPatterns) { Matcher matcher = pattern.matcher(nodeValue); if (!matcher.find()) { continue; } StringBuffer stringBuffer = new StringBuffer(); do { final int end = matcher.end(); // Determine the ending color by finding the last color tag up to and // including the match. final String closeColor = MoreObjects.firstNonNull( getLastColor(nodeValue.substring(0, end)), "<col" + ChatColorType.NORMAL + '>'); // Strip color tags from the highlighted region so that it remains highlighted correctly final String value = stripColor(matcher.group()); matcher.appendReplacement(stringBuffer, "<col" + ChatColorType.HIGHLIGHT + '>' + value + closeColor); update = true; matchesHighlight = true; } while (matcher.find()); // Append stringBuffer with remainder of message and update nodeValue matcher.appendTail(stringBuffer); nodeValue = stringBuffer.toString(); } if (matchesHighlight) { messageNode.setValue(nodeValue); sendNotification(config.notifyOnHighlight(), chatMessage); } if (update) { messageNode.setRuneLiteFormatMessage(messageNode.getValue()); } }
@Test public void testHighlightOwnName() { Player player = mock(Player.class); when(player.getName()).thenReturn("Logic Knot"); when(client.getLocalPlayer()).thenReturn(player); when(config.highlightOwnName()).thenReturn(true); MessageNode messageNode = mock(MessageNode.class); when(messageNode.getValue()).thenReturn("Logic Knot received a drop: Adamant longsword"); ChatMessage chatMessage = new ChatMessage(messageNode, ChatMessageType.GAMEMESSAGE, "", "", "", 0); chatNotificationsPlugin.onChatMessage(chatMessage); verify(messageNode).setValue("<colHIGHLIGHT><u>Logic Knot</u><colNORMAL> received a drop: Adamant longsword"); }
public boolean remove(@Nonnull T toRemove) { final int elementIndex = toRemove.getInternalIndex(); removeInternal(elementIndex); return elementIndex == getHeadElementIndex(); }
@Test void testRemove() { HeapPriorityQueue<TestElement> priorityQueue = newPriorityQueue(1); final long key = 4711L; final long priorityValue = 42L; final TestElement testElement = new TestElement(key, priorityValue); assertThat(priorityQueue.add(testElement)).isTrue(); assertThat(priorityQueue.remove(testElement)).isTrue(); assertThat(priorityQueue.isEmpty()).isTrue(); }
public void setFallback(ApacheHttpClientFallback fallback) { AssertUtil.notNull(fallback, "fallback cannot be null"); this.fallback = fallback; }
@Test(expected = IllegalArgumentException.class) public void testConfigSetFallback() { SentinelApacheHttpClientConfig config = new SentinelApacheHttpClientConfig(); config.setFallback(null); }
static void writeResponse(Configuration conf, Writer out, String format, String propertyName) throws IOException, IllegalArgumentException, BadFormatException { if (FORMAT_JSON.equals(format)) { Configuration.dumpConfiguration(conf, propertyName, out); } else if (FORMAT_XML.equals(format)) { conf.writeXml(propertyName, out, conf); } else { throw new BadFormatException("Bad format: " + format); } }
@Test public void testWriteXml() throws Exception { StringWriter sw = new StringWriter(); ConfServlet.writeResponse(getTestConf(), sw, "xml"); String xml = sw.toString(); DocumentBuilderFactory docBuilderFactory = XMLUtils.newSecureDocumentBuilderFactory(); DocumentBuilder builder = docBuilderFactory.newDocumentBuilder(); Document doc = builder.parse(new InputSource(new StringReader(xml))); NodeList nameNodes = doc.getElementsByTagName("name"); boolean foundSetting = false; for (int i = 0; i < nameNodes.getLength(); i++) { Node nameNode = nameNodes.item(i); String key = nameNode.getTextContent(); if (TEST_KEY.equals(key)) { foundSetting = true; Element propertyElem = (Element)nameNode.getParentNode(); String val = propertyElem.getElementsByTagName("value").item(0).getTextContent(); assertEquals(TEST_VAL, val); } } assertTrue(foundSetting); }
public static Packet ensureUniqueAndStableStanzaID( final Packet packet, final JID self ) { if ( !JiveGlobals.getBooleanProperty( "xmpp.sid.enabled", true ) ) { return packet; } if ( packet instanceof IQ && !JiveGlobals.getBooleanProperty( "xmpp.sid.iq.enabled", false ) ) { return packet; } if ( packet instanceof Message && !JiveGlobals.getBooleanProperty( "xmpp.sid.message.enabled", true ) ) { return packet; } if ( packet instanceof Presence && !JiveGlobals.getBooleanProperty( "xmpp.sid.presence.enabled", false ) ) { return packet; } final Element parentElement; if ( packet instanceof IQ ) { parentElement = ((IQ) packet).getChildElement(); } else { parentElement = packet.getElement(); } // The packet likely is an IQ result or error, which can, but are not required to have a child element. // To have a consistent behavior for these, we'll not add a stanza-ID here. if ( parentElement == null ) { Log.debug( "Unable to find appropriate element. Not adding stanza-id to packet: {}", packet ); return packet; } // Stanza ID generating entities, which encounter a <stanza-id/> element where the 'by' attribute matches the 'by' // attribute they would otherwise set, MUST delete that element even if they are not adding their own stanza ID. final Iterator<Element> existingElementIterator = parentElement.elementIterator( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); while (existingElementIterator.hasNext()) { final Element element = existingElementIterator.next(); if (self.toString().equals( element.attributeValue( "by" ) ) ) { Log.warn( "Removing a 'stanza-id' element from an inbound stanza, as its 'by' attribute value matches the value that we would set. Offending stanza: {}", packet ); existingElementIterator.remove(); } } final String id = UUID.randomUUID().toString(); Log.debug( "Using newly generated value '{}' for stanza that has id '{}'.", id, packet.getID() ); final Element stanzaIdElement = parentElement.addElement( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); stanzaIdElement.addAttribute( "id", id ); stanzaIdElement.addAttribute( "by", self.toString() ); return packet; }
@Test public void testDontOverwriteStanzaIDElement() throws Exception { // Setup fixture. final Packet input = new Message(); final JID self = new JID( "foobar" ); final String notExpected = "de305d54-75b4-431b-adb2-eb6b9e546013"; final Element toOverwrite = input.getElement().addElement( "stanza-id", "urn:xmpp:sid:0" ); toOverwrite.addAttribute( "by", new JID( "someoneelse" ).toString() ); toOverwrite.addAttribute( "id", notExpected ); // Execute system under test. final Packet result = StanzaIDUtil.ensureUniqueAndStableStanzaID( input, self ); // Verify results. assertNotNull( result ); final List<Element> elements = result.getElement().elements( QName.get( "stanza-id", "urn:xmpp:sid:0" ) ); assertEquals( 2, elements.size() ); }
public static String decode(String str, Charset charset) { return decode(str, charset, true); }
@Test public void issue3063Test() throws UnsupportedEncodingException { // https://github.com/dromara/hutool/issues/3063 final String s = "测试"; final String expectedDecode = "%FE%FF%6D%4B%8B%D5"; final String s1 = URLUtil.encode(s, StandardCharsets.UTF_16); assertEquals(expectedDecode, s1); final String s2 = java.net.URLEncoder.encode(s, "UTF-16"); assertEquals(expectedDecode, s2); final String decode = URLDecoder.decode(s1, StandardCharsets.UTF_16); assertEquals(s, decode); // 测试编码字符串和非编码字符串混合 final String mixDecoded = expectedDecode + "你好"; final String decode2 = URLDecoder.decode(mixDecoded, StandardCharsets.UTF_16); assertEquals("测试你好", decode2); assertEquals( java.net.URLDecoder.decode(mixDecoded, "UTF-16"), URLDecoder.decode(mixDecoded, StandardCharsets.UTF_16) ); }
public NumericIndicator previous(int barCount) { return NumericIndicator.of(new PreviousValueIndicator(this, barCount)); }
@Test public void previous() { final NumericIndicator numericIndicator = NumericIndicator.of(cp1); final Indicator<Num> previous = numericIndicator.previous(); assertNumEquals(cp1.getValue(0), previous.getValue(1)); final Indicator<Num> previous3 = numericIndicator.previous(3); assertNumEquals(cp1.getValue(3), previous3.getValue(6)); }
public DLQEntry pollEntry(long timeout) throws IOException, InterruptedException { byte[] bytes = pollEntryBytes(timeout); if (bytes == null) { return null; } return DLQEntry.deserialize(bytes); }
@Test public void testRereadFinalBlock() throws Exception { Event event = createEventWithConstantSerializationOverhead(Collections.emptyMap()); // Fill event with not quite enough characters to fill block. Fill event with valid RecordType characters - this // was the cause of https://github.com/elastic/logstash/issues/7868 event.setField("message", generateMessageContent(32495)); long startTime = System.currentTimeMillis(); int messageSize = 0; try(DeadLetterQueueWriter writeManager = DeadLetterQueueWriter .newBuilder(dir, 10 * 1024 * 1024, defaultDlqSize, Duration.ofSeconds(1)) .build()) { for (int i = 0; i < 2; i++) { DLQEntry entry = new DLQEntry(event, "", "", String.valueOf(i), constantSerializationLengthTimestamp(startTime++)); final int serializationLength = entry.serialize().length; assertThat("setup: serialized entry size...", serializationLength, is(lessThan(BLOCK_SIZE))); messageSize += serializationLength; writeManager.writeEntry(entry); } assertThat(messageSize, is(greaterThan(BLOCK_SIZE))); } try (DeadLetterQueueReader readManager = new DeadLetterQueueReader(dir)) { for (int i = 0; i < 2;i++) { final DLQEntry dlqEntry = readManager.pollEntry(100); assertThat(String.format("read index `%s`", i), dlqEntry, is(notNullValue())); assertThat("", dlqEntry.getReason(), is(String.valueOf(i))); } final DLQEntry entryBeyondEnd = readManager.pollEntry(100); assertThat("read beyond end", entryBeyondEnd, is(nullValue())); } }
boolean filterUrl(HTTPSamplerBase sampler) { String domain = sampler.getDomain(); if (domain == null || domain.isEmpty()) { return false; } String url = generateMatchUrl(sampler); CollectionProperty includePatterns = getIncludePatterns(); if (!includePatterns.isEmpty() && !matchesPatterns(url, includePatterns)) { return false; } CollectionProperty excludePatterns = getExcludePatterns(); if (!excludePatterns.isEmpty() && matchesPatterns(url, excludePatterns)) { return false; } return true; }
@Test public void testFilter3() throws Exception { sampler.setPath("header.gif"); sampler.setDomain("jakarta.org"); assertFalse(control.filterUrl(sampler), "Should not match header.gif"); }
static List<String> locateScripts(ArgsMap argsMap) { String script = argsMap.get(SCRIPT); String scriptDir = argsMap.get(SCRIPT_DIR); List<String> scripts = new ArrayList<>(); if (script != null) { StringTokenizer tokenizer = new StringTokenizer(script, ":"); if (log.isDebugEnabled()) { log.debug( ((tokenizer.countTokens() == 1) ? "initial script is {}" : "initial scripts are {}"), script); } while (tokenizer.hasMoreTokens()) { scripts.add(tokenizer.nextToken()); } } if (scriptDir != null) { File dir = new File(scriptDir); if (dir.isDirectory()) { if (log.isDebugEnabled()) { log.debug("found scriptdir: {}", dir.getAbsolutePath()); } File[] files = dir.listFiles(); if (files != null) { for (File file : files) { scripts.add(file.getAbsolutePath()); } } } } return scripts; }
@Test void locateScriptsSingle() { ArgsMap argsMap = new ArgsMap(new String[] {"script=script1"}); List<String> scripts = Main.locateScripts(argsMap); assertEquals(1, scripts.size()); }
@Override public Set<UnloadDecision> findBundlesForUnloading(LoadManagerContext context, Map<String, Long> recentlyUnloadedBundles, Map<String, Long> recentlyUnloadedBrokers) { final var conf = context.brokerConfiguration(); decisionCache.clear(); stats.clear(); Map<String, BrokerLookupData> availableBrokers; try { availableBrokers = context.brokerRegistry().getAvailableBrokerLookupDataAsync() .get(context.brokerConfiguration().getMetadataStoreOperationTimeoutSeconds(), TimeUnit.SECONDS); } catch (ExecutionException | InterruptedException | TimeoutException e) { counter.update(Failure, Unknown); log.warn("Failed to fetch available brokers. Stop unloading.", e); return decisionCache; } try { final var loadStore = context.brokerLoadDataStore(); stats.setLoadDataStore(loadStore); boolean debugMode = ExtensibleLoadManagerImpl.debug(conf, log); var skipReason = stats.update( context.brokerLoadDataStore(), availableBrokers, recentlyUnloadedBrokers, conf); if (skipReason.isPresent()) { if (debugMode) { log.warn(CANNOT_CONTINUE_UNLOAD_MSG + " Skipped the load stat update. Reason:{}.", skipReason.get()); } counter.update(Skip, skipReason.get()); return decisionCache; } counter.updateLoadData(stats.avg, stats.std); if (debugMode) { log.info("brokers' load stats:{}", stats); } // skip metrics int numOfBrokersWithEmptyLoadData = 0; int numOfBrokersWithFewBundles = 0; final double targetStd = conf.getLoadBalancerBrokerLoadTargetStd(); boolean transfer = conf.isLoadBalancerTransferEnabled(); if (stats.std() > targetStd || isUnderLoaded(context, stats.peekMinBroker(), stats) || isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { unloadConditionHitCount++; } else { unloadConditionHitCount = 0; } if (unloadConditionHitCount <= conf.getLoadBalancerSheddingConditionHitCountThreshold()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Shedding condition hit count:{} is less than or equal to the threshold:{}.", unloadConditionHitCount, conf.getLoadBalancerSheddingConditionHitCountThreshold()); } counter.update(Skip, HitCount); return decisionCache; } while (true) { if (!stats.hasTransferableBrokers()) { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + " Exhausted target transfer brokers."); } break; } UnloadDecision.Reason reason; if (stats.std() > targetStd) { reason = Overloaded; } else if (isUnderLoaded(context, stats.peekMinBroker(), stats)) { reason = Underloaded; if (debugMode) { log.info(String.format("broker:%s is underloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this underloaded broker.", stats.peekMinBroker(), context.brokerLoadDataStore().get(stats.peekMinBroker()).get(), stats.std(), targetStd)); } } else if (isOverLoaded(context, stats.peekMaxBroker(), stats.avg)) { reason = Overloaded; if (debugMode) { log.info(String.format("broker:%s is overloaded:%s although " + "load std:%.2f <= targetStd:%.2f. " + "Continuing unload for this overloaded broker.", stats.peekMaxBroker(), context.brokerLoadDataStore().get(stats.peekMaxBroker()).get(), stats.std(), targetStd)); } } else { if (debugMode) { log.info(CANNOT_CONTINUE_UNLOAD_MSG + "The overall cluster load meets the target, std:{} <= targetStd:{}." + "minBroker:{} is not underloaded. maxBroker:{} is not overloaded.", stats.std(), targetStd, stats.peekMinBroker(), stats.peekMaxBroker()); } break; } String maxBroker = stats.pollMaxBroker(); String minBroker = stats.peekMinBroker(); Optional<BrokerLoadData> maxBrokerLoadData = context.brokerLoadDataStore().get(maxBroker); Optional<BrokerLoadData> minBrokerLoadData = context.brokerLoadDataStore().get(minBroker); if (maxBrokerLoadData.isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " MaxBrokerLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } if (minBrokerLoadData.isEmpty()) { log.error("Can't transfer load to broker:{}. MinBrokerLoadData is empty.", minBroker); numOfBrokersWithEmptyLoadData++; continue; } double maxLoad = maxBrokerLoadData.get().getWeightedMaxEMA(); double minLoad = minBrokerLoadData.get().getWeightedMaxEMA(); double offload = (maxLoad - minLoad) / 2; BrokerLoadData brokerLoadData = maxBrokerLoadData.get(); double maxBrokerThroughput = brokerLoadData.getMsgThroughputIn() + brokerLoadData.getMsgThroughputOut(); double minBrokerThroughput = minBrokerLoadData.get().getMsgThroughputIn() + minBrokerLoadData.get().getMsgThroughputOut(); double offloadThroughput = maxBrokerThroughput * offload / maxLoad; if (debugMode) { log.info(String.format( "Attempting to shed load from broker:%s%s, which has the max resource " + "usage:%.2f%%, targetStd:%.2f," + " -- Trying to offload %.2f%%, %.2f KByte/s of traffic.", maxBroker, transfer ? " to broker:" + minBroker : "", maxLoad * 100, targetStd, offload * 100, offloadThroughput / KB )); } double trafficMarkedToOffload = 0; double trafficMarkedToGain = 0; Optional<TopBundlesLoadData> bundlesLoadData = context.topBundleLoadDataStore().get(maxBroker); if (bundlesLoadData.isEmpty() || bundlesLoadData.get().getTopBundlesLoadData().isEmpty()) { log.error(String.format(CANNOT_UNLOAD_BROKER_MSG + " TopBundlesLoadData is empty.", maxBroker)); numOfBrokersWithEmptyLoadData++; continue; } var maxBrokerTopBundlesLoadData = bundlesLoadData.get().getTopBundlesLoadData(); if (maxBrokerTopBundlesLoadData.size() == 1) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Sole namespace bundle:%s is overloading the broker. ", maxBroker, maxBrokerTopBundlesLoadData.iterator().next())); continue; } Optional<TopBundlesLoadData> minBundlesLoadData = context.topBundleLoadDataStore().get(minBroker); var minBrokerTopBundlesLoadDataIter = minBundlesLoadData.isPresent() ? minBundlesLoadData.get().getTopBundlesLoadData().iterator() : null; if (maxBrokerTopBundlesLoadData.isEmpty()) { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " Broker overloaded despite having no bundles", maxBroker)); continue; } int remainingTopBundles = maxBrokerTopBundlesLoadData.size(); for (var e : maxBrokerTopBundlesLoadData) { String bundle = e.bundleName(); if (channel != null && !channel.isOwner(bundle, maxBroker)) { if (debugMode) { log.warn(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " MaxBroker:%s is not the owner.", bundle, maxBroker)); } continue; } if (recentlyUnloadedBundles.containsKey(bundle)) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " Bundle has been recently unloaded at ts:%d.", bundle, recentlyUnloadedBundles.get(bundle))); } continue; } if (!isTransferable(context, availableBrokers, bundle, maxBroker, Optional.of(minBroker))) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " This unload can't meet " + "affinity(isolation) or anti-affinity group policies.", bundle)); } continue; } if (remainingTopBundles <= 1) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The remaining bundles in TopBundlesLoadData from the maxBroker:%s is" + " less than or equal to 1.", bundle, maxBroker)); } break; } var bundleData = e.stats(); double maxBrokerBundleThroughput = bundleData.msgThroughputIn + bundleData.msgThroughputOut; boolean swap = false; List<Unload> minToMaxUnloads = new ArrayList<>(); double minBrokerBundleSwapThroughput = 0.0; if (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput > offloadThroughput) { // see if we can swap bundles from min to max broker to balance better. if (transfer && minBrokerTopBundlesLoadDataIter != null) { var maxBrokerNewThroughput = maxBrokerThroughput - trafficMarkedToOffload + trafficMarkedToGain - maxBrokerBundleThroughput; var minBrokerNewThroughput = minBrokerThroughput + trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput; while (minBrokerTopBundlesLoadDataIter.hasNext()) { var minBrokerBundleData = minBrokerTopBundlesLoadDataIter.next(); if (!isTransferable(context, availableBrokers, minBrokerBundleData.bundleName(), minBroker, Optional.of(maxBroker))) { continue; } var minBrokerBundleThroughput = minBrokerBundleData.stats().msgThroughputIn + minBrokerBundleData.stats().msgThroughputOut; var maxBrokerNewThroughputTmp = maxBrokerNewThroughput + minBrokerBundleThroughput; var minBrokerNewThroughputTmp = minBrokerNewThroughput - minBrokerBundleThroughput; if (maxBrokerNewThroughputTmp < maxBrokerThroughput && minBrokerNewThroughputTmp < maxBrokerThroughput) { minToMaxUnloads.add(new Unload(minBroker, minBrokerBundleData.bundleName(), Optional.of(maxBroker))); maxBrokerNewThroughput = maxBrokerNewThroughputTmp; minBrokerNewThroughput = minBrokerNewThroughputTmp; minBrokerBundleSwapThroughput += minBrokerBundleThroughput; if (minBrokerNewThroughput <= maxBrokerNewThroughput && maxBrokerNewThroughput < maxBrokerThroughput * 0.75) { swap = true; break; } } } } if (!swap) { if (debugMode) { log.info(String.format(CANNOT_UNLOAD_BUNDLE_MSG + " The traffic to unload:%.2f - gain:%.2f = %.2f KByte/s is " + "greater than the target :%.2f KByte/s.", bundle, (trafficMarkedToOffload + maxBrokerBundleThroughput) / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain + maxBrokerBundleThroughput) / KB, offloadThroughput / KB)); } break; } } Unload unload; if (transfer) { if (swap) { minToMaxUnloads.forEach(minToMaxUnload -> { if (debugMode) { log.info("Decided to gain bundle:{} from min broker:{}", minToMaxUnload.serviceUnit(), minToMaxUnload.sourceBroker()); } var decision = new UnloadDecision(); decision.setUnload(minToMaxUnload); decision.succeed(reason); decisionCache.add(decision); }); if (debugMode) { log.info(String.format( "Total traffic %.2f KByte/s to transfer from min broker:%s to max broker:%s.", minBrokerBundleSwapThroughput / KB, minBroker, maxBroker)); trafficMarkedToGain += minBrokerBundleSwapThroughput; } } unload = new Unload(maxBroker, bundle, Optional.of(minBroker)); } else { unload = new Unload(maxBroker, bundle); } var decision = new UnloadDecision(); decision.setUnload(unload); decision.succeed(reason); decisionCache.add(decision); trafficMarkedToOffload += maxBrokerBundleThroughput; remainingTopBundles--; if (debugMode) { log.info(String.format("Decided to unload bundle:%s, throughput:%.2f KByte/s." + " The traffic marked to unload:%.2f - gain:%.2f = %.2f KByte/s." + " Target:%.2f KByte/s.", bundle, maxBrokerBundleThroughput / KB, trafficMarkedToOffload / KB, trafficMarkedToGain / KB, (trafficMarkedToOffload - trafficMarkedToGain) / KB, offloadThroughput / KB)); } } if (trafficMarkedToOffload > 0) { var adjustedOffload = (trafficMarkedToOffload - trafficMarkedToGain) * maxLoad / maxBrokerThroughput; stats.offload(maxLoad, minLoad, adjustedOffload); if (debugMode) { log.info( String.format("brokers' load stats:%s, after offload{max:%.2f, min:%.2f, offload:%.2f}", stats, maxLoad, minLoad, adjustedOffload)); } } else { numOfBrokersWithFewBundles++; log.warn(String.format(CANNOT_UNLOAD_BROKER_MSG + " There is no bundle that can be unloaded in top bundles load data. " + "Consider splitting bundles owned by the broker " + "to make each bundle serve less traffic " + "or increasing loadBalancerMaxNumberOfBundlesInBundleLoadReport" + " to report more bundles in the top bundles load data.", maxBroker)); } } // while end if (debugMode) { log.info("decisionCache:{}", decisionCache); } if (decisionCache.isEmpty()) { UnloadDecision.Reason reason; if (numOfBrokersWithEmptyLoadData > 0) { reason = NoLoadData; } else if (numOfBrokersWithFewBundles > 0) { reason = NoBundles; } else { reason = HitCount; } counter.update(Skip, reason); } else { unloadConditionHitCount = 0; } } catch (Throwable e) { log.error("Failed to process unloading. ", e); this.counter.update(Failure, Unknown); } return decisionCache; }
@Test public void testNoOwnerLoadData() throws IllegalAccessException { UnloadCounter counter = new UnloadCounter(); TransferShedder transferShedder = new TransferShedder(counter); FieldUtils.writeDeclaredField(transferShedder, "channel", channel, true); var ctx = setupContext(); doReturn(false).when(channel).isOwner(any(), any()); var res = transferShedder.findBundlesForUnloading(ctx, Map.of(), Map.of()); assertTrue(res.isEmpty()); assertEquals(counter.getBreakdownCounters().get(Skip).get(NoBundles).get(), 1); assertEquals(counter.getLoadAvg(), setupLoadAvg); assertEquals(counter.getLoadStd(), setupLoadStd); }
public static SqlOperator convert(final CombineType combineType) { Preconditions.checkState(REGISTRY.containsKey(combineType), "Unsupported combine type: `%s`", combineType); return REGISTRY.get(combineType); }
@Test void assertConvertSuccess() { assertThat(CombineOperatorConverter.convert(CombineType.UNION_ALL), is(SqlStdOperatorTable.UNION_ALL)); assertThat(CombineOperatorConverter.convert(CombineType.UNION), is(SqlStdOperatorTable.UNION)); assertThat(CombineOperatorConverter.convert(CombineType.INTERSECT_ALL), is(SqlStdOperatorTable.INTERSECT_ALL)); assertThat(CombineOperatorConverter.convert(CombineType.INTERSECT), is(SqlStdOperatorTable.INTERSECT)); assertThat(CombineOperatorConverter.convert(CombineType.EXCEPT_ALL), is(SqlStdOperatorTable.EXCEPT_ALL)); assertThat(CombineOperatorConverter.convert(CombineType.EXCEPT), is(SqlStdOperatorTable.EXCEPT)); assertThat(CombineOperatorConverter.convert(CombineType.MINUS_ALL), is(SqlStdOperatorTable.EXCEPT_ALL)); assertThat(CombineOperatorConverter.convert(CombineType.MINUS), is(SqlStdOperatorTable.EXCEPT)); }
@Override public Column convert(BasicTypeDefine typeDefine) { try { return super.convert(typeDefine); } catch (SeaTunnelRuntimeException e) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String kingbaseDataType = typeDefine.getDataType().toUpperCase(); switch (kingbaseDataType) { case KB_TINYINT: builder.dataType(BasicType.BYTE_TYPE); break; case KB_MONEY: builder.dataType(new DecimalType(38, 18)); builder.columnLength(38L); builder.scale(18); break; case KB_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength((long) (1024 * 1024 * 1024)); break; case KB_CLOB: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(typeDefine.getLength()); builder.columnLength((long) (1024 * 1024 * 1024)); break; case KB_BIT: builder.dataType(PrimitiveByteArrayType.INSTANCE); // BIT(M) -> BYTE(M/8) long byteLength = typeDefine.getLength() / 8; byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0; builder.columnLength(byteLength); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.KINGBASE, typeDefine.getDataType(), typeDefine.getName()); } return builder.build(); } }
@Test public void testConvertOtherString() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder().name("test").columnType("text").dataType("text").build(); Column column = KingbaseTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(null, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder().name("test").columnType("json").dataType("json").build(); column = KingbaseTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(null, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder() .name("test") .columnType("jsonb") .dataType("jsonb") .build(); column = KingbaseTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(null, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); typeDefine = BasicTypeDefine.builder().name("test").columnType("xml").dataType("xml").build(); column = KingbaseTypeConverter.INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(BasicType.STRING_TYPE, column.getDataType()); Assertions.assertEquals(null, column.getColumnLength()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
@VisibleForTesting void validateEmailUnique(Long id, String email) { if (StrUtil.isBlank(email)) { return; } AdminUserDO user = userMapper.selectByEmail(email); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_EMAIL_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_EMAIL_EXISTS); } }
@Test public void testValidateEmailUnique_emailExistsForUpdate() { // 准备参数 Long id = randomLongId(); String email = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setEmail(email))); // 调用,校验异常 assertServiceException(() -> userService.validateEmailUnique(id, email), USER_EMAIL_EXISTS); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentExecutor, TokenSecretAuthData authData, PhotosContainerResource data) throws Exception { if (data == null) { // Nothing to do return ImportResult.OK; } BackblazeDataTransferClient b2Client = b2ClientFactory.getOrCreateB2Client(jobId, authData); if (data.getAlbums() != null && data.getAlbums().size() > 0) { for (PhotoAlbum album : data.getAlbums()) { idempotentExecutor.executeAndSwallowIOExceptions( album.getId(), String.format("Caching album name for album '%s'", album.getId()), () -> album.getName()); } } final LongAdder totalImportedFilesSizes = new LongAdder(); if (data.getPhotos() != null && data.getPhotos().size() > 0) { for (PhotoModel photo : data.getPhotos()) { idempotentExecutor.importAndSwallowIOExceptions( photo, p -> { ItemImportResult<String> fileImportResult = importSinglePhoto(idempotentExecutor, b2Client, jobId, p); if (fileImportResult.hasBytes()) { totalImportedFilesSizes.add(fileImportResult.getBytes()); } return fileImportResult; }); } } return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue()); }
@Test public void testNullPhotosAndAlbums() throws Exception { PhotosContainerResource data = mock(PhotosContainerResource.class); when(data.getAlbums()).thenReturn(null); when(data.getPhotos()).thenReturn(null); BackblazePhotosImporter sut = new BackblazePhotosImporter(monitor, dataStore, streamProvider, clientFactory); ImportResult result = sut.importItem(UUID.randomUUID(), executor, authData, data); assertEquals(ImportResult.ResultType.OK, result.getType()); }
public static void mergeDeepLinkProperty(JSONObject properties) { try { if (mDeepLinkProcessor != null) { mDeepLinkProcessor.mergeDeepLinkProperty(properties); } } catch (Exception ex) { SALog.printStackTrace(ex); } }
@Test public void mergeDeepLinkProperty() { JSONObject jsonObject = new JSONObject(); DeepLinkManager.mergeDeepLinkProperty(jsonObject); }
static <T> CompletionStage<T> executeCompletionStageSupplier(Observation observation, Supplier<CompletionStage<T>> supplier) { return decorateCompletionStageSupplier(observation, supplier).get(); }
@Test public void shouldExecuteCompletionStageSupplier() throws Throwable { given(helloWorldService.returnHelloWorld()).willReturn("Hello world"); Supplier<CompletionStage<String>> completionStageSupplier = () -> CompletableFuture.supplyAsync(helloWorldService::returnHelloWorld); CompletionStage<String> stringCompletionStage = Observations .executeCompletionStageSupplier(observation, completionStageSupplier); String value = stringCompletionStage.toCompletableFuture().get(); assertThat(value).isEqualTo("Hello world"); await().atMost(1, SECONDS) .until(() -> { assertThatObservationWasStartedAndFinishedWithoutErrors(); }); then(helloWorldService).should(times(1)).returnHelloWorld(); }
public int getDepth(Throwable ex) { return getDepth(ex.getClass(), 0); }
@Test public void foundImmediatelyWithString() { RollbackRule rr = new RollbackRule(Exception.class.getName()); assertThat(rr.getDepth(new Exception())).isEqualTo(0); }
public String parseBodyToHTML() throws IOException, SAXException, TikaException { ContentHandler handler = new BodyContentHandler(new ToXMLContentHandler()); AutoDetectParser parser = new AutoDetectParser(); Metadata metadata = new Metadata(); try (InputStream stream = ContentHandlerExample.class.getResourceAsStream("test.doc")) { parser.parse(stream, handler, metadata); return handler.toString(); } }
@Test public void testParseBodyToHTML() throws IOException, SAXException, TikaException { String result = example .parseBodyToHTML() .trim(); assertNotContained("<html", result); assertNotContained("<head>", result); assertNotContained("<meta name=\"dc:creator\"", result); assertNotContained("<title>", result); assertNotContained("<body>", result); assertContains(">test", result); }
public Tree<T> filterNew(Filter<Tree<T>> filter) { return cloneTree().filter(filter); }
@Test public void filterNewTest() { final Tree<String> tree = TreeUtil.buildSingle(nodeList, "0"); // 经过过滤,生成新的树 Tree<String> newTree = tree.filterNew((t) -> { final CharSequence name = t.getName(); return null != name && name.toString().contains("店铺"); }); List<String> ids = new ArrayList<>(); newTree.walk((tr) -> ids.add(tr.getId())); assertEquals(4, ids.size()); List<String> ids2 = new ArrayList<>(); tree.walk((tr) -> ids2.add(tr.getId())); assertEquals(7, ids2.size()); }
public Struct put(String fieldName, Object value) { Field field = lookupField(fieldName); return put(field, value); }
@Test public void testInvalidFieldType() { assertThrows(DataException.class, () -> new Struct(FLAT_STRUCT_SCHEMA).put("int8", "should fail because this is a string, not int8")); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testDistinctAggregations() { analyze("SELECT COUNT(DISTINCT a), SUM(a) FROM t1"); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); DateColumnStatsDataInspector aggregateData = dateInspectorFromStats(aggregateColStats); DateColumnStatsDataInspector newData = dateInspectorFromStats(newColStats); Date lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData)); if (lowValue != null) { aggregateData.setLowValue(lowValue); } Date highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData)); if (highValue != null) { aggregateData.setHighValue(highValue); } aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator(); NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator(); List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst); aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(), ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs())); aggregateData.setNdvEstimator(ndvEstimatorsList.get(0)); KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator(); KllHistogramEstimator newKllEst = newData.getHistogramEstimator(); aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst)); aggregateColStats.getStatsData().setDateStats(aggregateData); }
@Test public void testMergeNullWithNonNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Date.class) .low(null) .high(null) .numNulls(0) .numDVs(0) .build()); ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Date.class) .low(DATE_1) .high(DATE_3) .numNulls(4) .numDVs(2) .hll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch()) .kll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch()) .build()); merger.merge(aggrObj, newObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Date.class) .low(DATE_1) .high(DATE_3) .numNulls(4) .numDVs(2) .hll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch()) .kll(DATE_1.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch(), DATE_3.getDaysSinceEpoch()) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
public int maxValue() { final int initialValue = this.initialValue; int max = 0 == size ? initialValue : Integer.MIN_VALUE; final int[] entries = this.entries; @DoNotSub final int length = entries.length; for (@DoNotSub int i = 1; i < length; i += 2) { final int value = entries[i]; if (initialValue != value) { max = Math.max(max, value); } } return max; }
@Test void shouldHaveNoMaxValueForEmptyCollection() { assertEquals(INITIAL_VALUE, map.maxValue()); }
@Override public void deleteTenant(Long id) { // 校验存在 validateUpdateTenant(id); // 删除 tenantMapper.deleteById(id); }
@Test public void testDeleteTenant_system() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setPackageId(PACKAGE_ID_SYSTEM)); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbTenant.getId(); // 调用, 并断言异常 assertServiceException(() -> tenantService.deleteTenant(id), TENANT_CAN_NOT_UPDATE_SYSTEM); }
@Override @Transactional(rollbackFor = Exception.class) public void updateCodegen(CodegenUpdateReqVO updateReqVO) { // 校验是否已经存在 if (codegenTableMapper.selectById(updateReqVO.getTable().getId()) == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } // 校验主表字段存在 if (Objects.equals(updateReqVO.getTable().getTemplateType(), CodegenTemplateTypeEnum.SUB.getType())) { if (codegenTableMapper.selectById(updateReqVO.getTable().getMasterTableId()) == null) { throw exception(CODEGEN_MASTER_TABLE_NOT_EXISTS, updateReqVO.getTable().getMasterTableId()); } if (CollUtil.findOne(updateReqVO.getColumns(), // 关联主表的字段不存在 column -> column.getId().equals(updateReqVO.getTable().getSubJoinColumnId())) == null) { throw exception(CODEGEN_SUB_COLUMN_NOT_EXISTS, updateReqVO.getTable().getSubJoinColumnId()); } } // 更新 table 表定义 CodegenTableDO updateTableObj = BeanUtils.toBean(updateReqVO.getTable(), CodegenTableDO.class); codegenTableMapper.updateById(updateTableObj); // 更新 column 字段定义 List<CodegenColumnDO> updateColumnObjs = BeanUtils.toBean(updateReqVO.getColumns(), CodegenColumnDO.class); updateColumnObjs.forEach(updateColumnObj -> codegenColumnMapper.updateById(updateColumnObj)); }
@Test public void testUpdateCodegen_sub_columnNotExists() { // mock 数据 CodegenTableDO subTable = randomPojo(CodegenTableDO.class, o -> o.setTemplateType(CodegenTemplateTypeEnum.SUB.getType()) .setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(subTable); // mock 数据(master) CodegenTableDO masterTable = randomPojo(CodegenTableDO.class, o -> o.setTemplateType(CodegenTemplateTypeEnum.MASTER_ERP.getType()) .setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(masterTable); // 准备参数 CodegenUpdateReqVO updateReqVO = randomPojo(CodegenUpdateReqVO.class, o -> o.getTable().setId(subTable.getId()) .setTemplateType(CodegenTemplateTypeEnum.SUB.getType()) .setMasterTableId(masterTable.getId())); // 调用,并断言 assertServiceException(() -> codegenService.updateCodegen(updateReqVO), CODEGEN_SUB_COLUMN_NOT_EXISTS, updateReqVO.getTable().getSubJoinColumnId()); }
public void addDependency(T from, T to) { if (from == null || to == null || from.equals(to)) { throw new IllegalArgumentException("Invalid parameters"); } long stamp = lock.writeLock(); try { if (addOutgoingEdge(from, to)) { addIncomingEdge(to, from); } } finally { lock.unlockWrite(stamp); } }
@Test(expectedExceptions = IllegalArgumentException.class) public void testAdNull() { new DependencyGraph<>().addDependency("N", null); }
@Override public Iterator<QueryableEntry> iterator() { return new It(); }
@Test(expected = UnsupportedOperationException.class) public void removeUnsupported() { Set<QueryableEntry> entries = generateEntries(100000); AndResultSet resultSet = new AndResultSet(entries, null, asList(Predicates.alwaysTrue())); resultSet.remove(resultSet.iterator().next()); }
@Override public void run() { if (!redoService.isConnected()) { LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task"); return; } try { redoForInstances(); redoForSubscribes(); } catch (Exception e) { LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e); } }
@Test void testRunRedoRegisterSubscriberWithClientDisabled() throws NacosException { when(clientProxy.isEnable()).thenReturn(false); Set<SubscriberRedoData> mockData = generateMockSubscriberData(false, false, true); when(redoService.findSubscriberRedoData()).thenReturn(mockData); redoTask.run(); verify(clientProxy, never()).doSubscribe(SERVICE, GROUP, CLUSTER); }
public int registerUser(final User user) throws SQLException { var sql = "insert into USERS (username, password) values (?,?)"; try (var connection = dataSource.getConnection(); var preparedStatement = connection.prepareStatement(sql) ) { preparedStatement.setString(1, user.getUsername()); preparedStatement.setString(2, user.getPassword()); var result = preparedStatement.executeUpdate(); LOGGER.info("Register successfully!"); return result; } }
@Test void registerShouldFail() throws SQLException { var dataSource = createDataSource(); var userTableModule = new UserTableModule(dataSource); var user = new User(1, "123456", "123456"); userTableModule.registerUser(user); assertThrows(SQLException.class, () -> userTableModule.registerUser(user)); }
@Override public void send(Message msg, List<RoutingNode> recipients) { for (RoutingNode recipient : recipients) { new MessageEnvelope(this, msg, recipient).send(); } }
@Test void requireThatUnknownServiceRepliesWithNoAddressForService() throws InterruptedException { final Server server = new Server(new LocalWire()); final SourceSession source = server.newSourceSession(); final Message msg = new SimpleMessage("foo").setRoute(Route.parse("bar")); assertTrue(source.send(msg).isAccepted()); final Reply reply = server.replies.poll(60, TimeUnit.SECONDS); assertTrue(reply instanceof EmptyReply); server.mbus.destroy(); }
public void writeInt4(final int value) { byteBuf.writeIntLE(value); }
@Test void assertWriteInt4() { new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeInt4(1); verify(byteBuf).writeIntLE(1); }
public static Document readDocument(@Nonnull final InputStream stream) throws ExecutionException, InterruptedException { return readDocumentAsync(stream).get(); }
@Test public void testDocumentException() throws Exception { // Setup test fixture. final InputStream input = new ByteArrayInputStream("this is not valid XML".getBytes(StandardCharsets.UTF_8)); final ExecutionException result; try { // Execute system under test. SAXReaderUtil.readDocument(input); fail("An ExecutionException should have been thrown."); return; } catch (ExecutionException e) { result = e; } // Verify result. assertNotNull(result); assertNotNull(result.getCause()); assertEquals(DocumentException.class, result.getCause().getClass()); }
public static boolean isUnclosedQuote(final String line) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity int quoteStart = -1; for (int i = 0; i < line.length(); ++i) { if (quoteStart < 0 && isQuoteChar(line, i)) { quoteStart = i; } else if (quoteStart >= 0 && isTwoQuoteStart(line, i) && !isEscaped(line, i)) { // Together, two quotes are effectively an escaped quote and don't act as a quote character. // Skip the next quote char, since it's coupled with the first. i++; } else if (quoteStart >= 0 && isQuoteChar(line, i) && !isEscaped(line, i)) { quoteStart = -1; } } final int commentInd = line.indexOf(COMMENT); if (commentInd < 0) { return quoteStart >= 0; } else if (quoteStart < 0) { return false; } else { return commentInd > quoteStart; } }
@Test public void shouldFindUnclosedQuote_manyQuote() { // Given: final String line = "some line 'this is in a quote''''"; // Then: assertThat(UnclosedQuoteChecker.isUnclosedQuote(line), is(true)); }
protected void handshakeFailure(ChannelHandlerContext ctx, Throwable cause) throws Exception { logger.warn("{} TLS handshake failed:", ctx.channel(), cause); ctx.close(); }
@Test public void testHandshakeFailure() { ChannelHandler alpnHandler = new ApplicationProtocolNegotiationHandler(ApplicationProtocolNames.HTTP_1_1) { @Override protected void configurePipeline(ChannelHandlerContext ctx, String protocol) { fail(); } }; EmbeddedChannel channel = new EmbeddedChannel(alpnHandler); SSLHandshakeException exception = new SSLHandshakeException("error"); SslHandshakeCompletionEvent completionEvent = new SslHandshakeCompletionEvent(exception); channel.pipeline().fireUserEventTriggered(completionEvent); channel.pipeline().fireExceptionCaught(new DecoderException(exception)); assertNull(channel.pipeline().context(alpnHandler)); assertFalse(channel.finishAndReleaseAll()); }
@Override public Stream<FileSlice> getLatestUnCompactedFileSlices(String partitionPath) { return execute(partitionPath, preferredView::getLatestUnCompactedFileSlices, (path) -> getSecondaryView().getLatestUnCompactedFileSlices(path)); }
@Test public void testGetLatestUnCompactedFileSlices() { Stream<FileSlice> actual; Stream<FileSlice> expected = testFileSliceStream; String partitionPath = "/table2"; when(primary.getLatestUnCompactedFileSlices(partitionPath)).thenReturn(testFileSliceStream); actual = fsView.getLatestUnCompactedFileSlices(partitionPath); assertEquals(expected, actual); verify(secondaryViewSupplier, never()).get(); resetMocks(); when(secondaryViewSupplier.get()).thenReturn(secondary); when(primary.getLatestUnCompactedFileSlices(partitionPath)).thenThrow(new RuntimeException()); when(secondary.getLatestUnCompactedFileSlices(partitionPath)).thenReturn(testFileSliceStream); actual = fsView.getLatestUnCompactedFileSlices(partitionPath); assertEquals(expected, actual); resetMocks(); when(secondary.getLatestUnCompactedFileSlices(partitionPath)).thenReturn(testFileSliceStream); actual = fsView.getLatestUnCompactedFileSlices(partitionPath); assertEquals(expected, actual); resetMocks(); when(secondary.getLatestUnCompactedFileSlices(partitionPath)).thenThrow(new RuntimeException()); assertThrows(RuntimeException.class, () -> { fsView.getLatestUnCompactedFileSlices(partitionPath); }); }
public boolean shouldShow(@Nullable Keyboard.Key pressedKey) { return pressedKey != null && shouldShow(pressedKey.getPrimaryCode()); }
@Test public void testHandlesNullKey() { final OnKeyWordHelper helper = new OnKeyWordHelper("test".toCharArray()); Assert.assertFalse(helper.shouldShow(null)); }
public EndpointResponse streamQuery( final KsqlSecurityContext securityContext, final KsqlRequest request, final CompletableFuture<Void> connectionClosedFuture, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context ) { throwIfNotConfigured(); activenessRegistrar.updateLastRequestTime(); final PreparedStatement<?> statement = parseStatement(request); CommandStoreUtil.httpWaitForCommandSequenceNumber( commandQueue, request, commandQueueCatchupTimeout); return handleStatement(securityContext, request, statement, connectionClosedFuture, isInternalRequest, metricsCallbackHolder, context); }
@Test public void shouldThrowOnDenyListedStreamProperty() { // Given: when(mockStatementParser.<Query>parseSingleStatement(PULL_QUERY_STRING)).thenReturn(query); testResource = new StreamedQueryResource( mockKsqlEngine, ksqlRestConfig, mockStatementParser, commandQueue, DISCONNECT_CHECK_INTERVAL, COMMAND_QUEUE_CATCHUP_TIMOEUT, activenessRegistrar, Optional.of(authorizationValidator), errorsHandler, denyListPropertyValidator, queryExecutor ); final Map<String, Object> props = new HashMap<>(ImmutableMap.of( StreamsConfig.APPLICATION_SERVER_CONFIG, "something:1" )); props.put(KsqlConfig.KSQL_PROPERTIES_OVERRIDES_DENYLIST, StreamsConfig.NUM_STREAM_THREADS_CONFIG); when(mockKsqlEngine.getKsqlConfig()).thenReturn(new KsqlConfig(props)); final Map<String, Object> overrides = ImmutableMap.of(StreamsConfig.NUM_STREAM_THREADS_CONFIG, 1); doThrow(new KsqlException("deny override")).when(denyListPropertyValidator) .validateAll(overrides); when(errorsHandler.generateResponse(any(), any())) .thenReturn(badRequest("A property override was set locally for a property that the " + "server prohibits overrides for: 'num.stream.threads'")); // When: final EndpointResponse response = testResource.streamQuery( securityContext, new KsqlRequest( PULL_QUERY_STRING, overrides, // stream properties Collections.emptyMap(), null ), new CompletableFuture<>(), Optional.empty(), new MetricsCallbackHolder(), context ); // Then: verify(denyListPropertyValidator).validateAll(overrides); assertThat(response.getStatus(), CoreMatchers.is(BAD_REQUEST.code())); assertThat(((KsqlErrorMessage) response.getEntity()).getMessage(), is("A property override was set locally for a property that the server prohibits " + "overrides for: '" + StreamsConfig.NUM_STREAM_THREADS_CONFIG + "'")); }
@Override public void shutdown(final Callback<None> callback) { info(_log, "shutting down dynamic client"); _balancer.shutdown(() -> { info(_log, "dynamic client shutdown complete"); callback.onSuccess(None.none()); }); TimingKey.unregisterKey(TIMING_KEY); }
@Test(groups = { "small", "back-end" }) public void testShutdown() throws URISyntaxException, InterruptedException { LoadBalancerMock balancer = new LoadBalancerMock(true); DynamicClient client = new DynamicClient(balancer, null, true); final CountDownLatch latch = new CountDownLatch(1); assertFalse(balancer.shutdown); client.shutdown(new Callback<None>() { @Override public void onError(Throwable e) { } @Override public void onSuccess(None t) { latch.countDown(); } }); if (!latch.await(5, TimeUnit.SECONDS)) { fail("unable to shut down dynamic client"); } assertTrue(balancer.shutdown); }
@Override public float readFloat(@Nonnull String fieldName) throws IOException { FieldDefinition fd = cd.getField(fieldName); if (fd == null) { return 0f; } switch (fd.getType()) { case FLOAT: return super.readFloat(fieldName); case INT: return super.readInt(fieldName); case BYTE: return super.readByte(fieldName); case CHAR: return super.readChar(fieldName); case SHORT: return super.readShort(fieldName); default: throw createIncompatibleClassChangeError(fd, FLOAT); } }
@Test(expected = IncompatibleClassChangeError.class) public void testReadFloat_IncompatibleClass() throws Exception { reader.readFloat("string"); }
@Override public List<String> splitAndEvaluate() { return Strings.isNullOrEmpty(inlineExpression) ? Collections.emptyList() : flatten(evaluate(GroovyUtils.split(handlePlaceHolder(inlineExpression)))); }
@Test void assertEvaluateForLiteral() { List<String> expected = TypedSPILoader.getService(InlineExpressionParser.class, "GROOVY", PropertiesBuilder.build( new PropertiesBuilder.Property(InlineExpressionParser.INLINE_EXPRESSION_KEY, "t_order_${'xx'}"))).splitAndEvaluate(); assertThat(expected.size(), is(1)); assertThat(expected, hasItems("t_order_xx")); }
public static Object convertAvroFormat( FieldType beamFieldType, Object avroValue, BigQueryUtils.ConversionOptions options) { TypeName beamFieldTypeName = beamFieldType.getTypeName(); if (avroValue == null) { if (beamFieldType.getNullable()) { return null; } else { throw new IllegalArgumentException(String.format("Field %s not nullable", beamFieldType)); } } switch (beamFieldTypeName) { case BYTE: case INT16: case INT32: case INT64: case FLOAT: case DOUBLE: case STRING: case BYTES: case BOOLEAN: return convertAvroPrimitiveTypes(beamFieldTypeName, avroValue); case DATETIME: // Expecting value in microseconds. switch (options.getTruncateTimestamps()) { case TRUNCATE: return truncateToMillis(avroValue); case REJECT: return safeToMillis(avroValue); default: throw new IllegalArgumentException( String.format( "Unknown timestamp truncation option: %s", options.getTruncateTimestamps())); } case DECIMAL: return convertAvroNumeric(avroValue); case ARRAY: return convertAvroArray(beamFieldType, avroValue, options); case LOGICAL_TYPE: LogicalType<?, ?> logicalType = beamFieldType.getLogicalType(); assert logicalType != null; String identifier = logicalType.getIdentifier(); if (SqlTypes.DATE.getIdentifier().equals(identifier)) { return convertAvroDate(avroValue); } else if (SqlTypes.TIME.getIdentifier().equals(identifier)) { return convertAvroTime(avroValue); } else if (SqlTypes.DATETIME.getIdentifier().equals(identifier)) { return convertAvroDateTime(avroValue); } else if (SQL_DATE_TIME_TYPES.contains(identifier)) { switch (options.getTruncateTimestamps()) { case TRUNCATE: return truncateToMillis(avroValue); case REJECT: return safeToMillis(avroValue); default: throw new IllegalArgumentException( String.format( "Unknown timestamp truncation option: %s", options.getTruncateTimestamps())); } } else if (logicalType instanceof PassThroughLogicalType) { return convertAvroFormat(logicalType.getBaseType(), avroValue, options); } else { throw new RuntimeException("Unknown logical type " + identifier); } case ROW: Schema rowSchema = beamFieldType.getRowSchema(); if (rowSchema == null) { throw new IllegalArgumentException("Nested ROW missing row schema"); } GenericData.Record record = (GenericData.Record) avroValue; return toBeamRow(record, rowSchema, options); case MAP: return convertAvroRecordToMap(beamFieldType, avroValue, options); default: throw new RuntimeException( "Does not support converting unknown type value: " + beamFieldTypeName); } }
@Test public void testSubMilliPrecisionTruncated() { long millis = 123456789L; assertThat( BigQueryUtils.convertAvroFormat(FieldType.DATETIME, millis * 1000 + 123, TRUNCATE_OPTIONS), equalTo(new Instant(millis))); }
@Override public FileObject[] findJarFiles() throws KettleFileException { return findJarFiles( searchLibDir ); }
@Test public void testFindJarFiles_ExceptionThrows() { String nullFolder = null; String expectedMessage = "Unable to list jar files in plugin folder '" + nullFolder + "'"; plFolder = new PluginFolder( nullFolder, false, true ); try { plFolder.findJarFiles(); fail( "KettleFileException was not occured but expected." ); } catch ( KettleFileException e ) { assertTrue( e instanceof KettleFileException ); assertTrue( e.getLocalizedMessage().trim().startsWith( expectedMessage ) ); } }
@Override public MetricsCollector create(final MetricConfiguration metricConfig) { switch (metricConfig.getType()) { case COUNTER: return new PrometheusMetricsCounterCollector(metricConfig); case GAUGE: return new PrometheusMetricsGaugeCollector(metricConfig); case HISTOGRAM: return new PrometheusMetricsHistogramCollector(metricConfig); case SUMMARY: return new PrometheusMetricsSummaryCollector(metricConfig); case GAUGE_METRIC_FAMILY: return new PrometheusMetricsGaugeMetricFamilyCollector(metricConfig); default: throw new UnsupportedOperationException(String.format("Can not support type `%s`.", metricConfig.getType())); } }
@Test void assertCreateGaugeMetricFamilyCollector() { MetricConfiguration config = new MetricConfiguration("test_summary", MetricCollectorType.GAUGE_METRIC_FAMILY, null, Collections.emptyList(), Collections.emptyMap()); assertThat(new PrometheusMetricsCollectorFactory().create(config), instanceOf(PrometheusMetricsGaugeMetricFamilyCollector.class)); }
@Override public Connection getConnection() throws SQLException { return dataSourceProxyXA.getConnection(); }
@Test public void testGetConnection() throws SQLException { // Mock Driver driver = Mockito.mock(Driver.class); JDBC4MySQLConnection connection = Mockito.mock(JDBC4MySQLConnection.class); Mockito.when(connection.getAutoCommit()).thenReturn(true); DatabaseMetaData metaData = Mockito.mock(DatabaseMetaData.class); Mockito.when(metaData.getURL()).thenReturn("jdbc:mysql:xxx"); Mockito.when(connection.getMetaData()).thenReturn(metaData); Mockito.when(driver.connect(any(), any())).thenReturn(connection); DruidDataSource druidDataSource = new DruidDataSource(); druidDataSource.setDriver(driver); DataSourceProxyXA dataSourceProxyXA = new DataSourceProxyXA(druidDataSource); RootContext.unbind(); Connection connFromDataSourceProxyXA = dataSourceProxyXA.getConnection(); Assertions.assertFalse(connFromDataSourceProxyXA instanceof ConnectionProxyXA); RootContext.bind("test"); connFromDataSourceProxyXA = dataSourceProxyXA.getConnection(); Assertions.assertTrue(connFromDataSourceProxyXA instanceof ConnectionProxyXA); ConnectionProxyXA connectionProxyXA = (ConnectionProxyXA)dataSourceProxyXA.getConnection(); Connection wrappedConnection = connectionProxyXA.getWrappedConnection(); Assertions.assertTrue(wrappedConnection instanceof PooledConnection); Connection wrappedPhysicalConn = ((PooledConnection)wrappedConnection).getConnection(); Assertions.assertSame(wrappedPhysicalConn, connection); XAConnection xaConnection = connectionProxyXA.getWrappedXAConnection(); Connection connectionInXA = xaConnection.getConnection(); Assertions.assertTrue(connectionInXA instanceof JDBC4ConnectionWrapper); tearDown(); }
public static int getCpuCores() { // 找不到文件或者异常,则去物理机的核心数 int cpu = RpcConfigs.getIntValue(RpcOptions.SYSTEM_CPU_CORES); return cpu > 0 ? cpu : Runtime.getRuntime().availableProcessors(); }
@Test public void getCpuCores() { Assert.assertTrue(SystemInfo.getCpuCores() > 0); }
@Override public void clear() { ipv4Tree = new ConcurrentRadixTree<>(new DefaultCharArrayNodeFactory()); ipv6Tree = new ConcurrentRadixTree<>(new DefaultCharArrayNodeFactory()); }
@Test public void testClear() { radixTree.clear(); assertThat("Incorrect size of radix tree for IPv4 maps", radixTree.size(IpAddress.Version.INET), is(0)); assertThat("Incorrect size of radix tree for IPv6 maps", radixTree.size(IpAddress.Version.INET6), is(0)); }
@Override public CompletableFuture<T> toCompletableFuture() { return _task.toCompletionStage().toCompletableFuture(); }
@Test public void testToCompletableFuture_success() throws Exception { CompletionStage completableFuture = createTestStage(TESTVALUE1).toCompletableFuture(); assertEquals(completableFuture.toCompletableFuture().get(), TESTVALUE1); }
@Override public void removeListener(int listenerId) { super.removeListener(listenerId); String topicName = getNameByListenerId(listenerId); if (topicName != null) { RTopic topic = getTopic(topicName); removeListenerId(topicName, listenerId); topic.removeListener(listenerId); } }
@Test public void testRemoveListener() { RMapCache<Long, String> rMapCache = redisson.getMapCache("test"); rMapCache.trySetMaxSize(5); AtomicBoolean removed = new AtomicBoolean(); rMapCache.addListener(new EntryRemovedListener() { @Override public void onRemoved(EntryEvent event) { removed.set(true); } }); rMapCache.put(1L, "1"); rMapCache.put(2L, "2"); rMapCache.put(3L, "3"); rMapCache.put(4L, "4"); rMapCache.put(5L, "5"); rMapCache.put(6L, "6"); Awaitility.await().atMost(5, TimeUnit.SECONDS).untilTrue(removed); }
public String getEcosystem(DefCveItem cve) { final int[] ecosystemMap = new int[ECOSYSTEMS.length]; cve.getCve().getDescriptions().stream() .filter((langString) -> (langString.getLang().equals("en"))) .forEachOrdered((langString) -> search(langString.getValue(), ecosystemMap)); return getResult(ecosystemMap); }
@Test public void testSubsetKeywordsDoNotMatch() throws IOException { DescriptionEcosystemMapper mapper = new DescriptionEcosystemMapper(); String value = "Wonder if java senses the gc."; // i.e. does not match 'java se' assertNull(mapper.getEcosystem(asCve(value))); }
@Override public Executor getExecutor(final URL url) { try { return SpringBeanUtils.getInstance().getBean(ShenyuThreadPoolExecutor.class); } catch (NoSuchBeanDefinitionException t) { throw new ShenyuException("shared thread pool is not enable, config ${shenyu.sharedPool.enable} in your xml/yml !", t); } }
@Test public void testGetExecutor() { ShenyuThreadPoolExecutor shenyuThreadPoolExecutor = new ShenyuThreadPoolExecutor(1, 2, 30, TimeUnit.SECONDS, new MemorySafeTaskQueue<>(100), Executors.defaultThreadFactory(), new ThreadPoolExecutor.AbortPolicy()); SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class)); when(SpringBeanUtils.getInstance().getBean(ShenyuThreadPoolExecutor.class)).thenReturn(shenyuThreadPoolExecutor); assertEquals(sharedThreadPool.getExecutor(URL.valueOf("localhost")), shenyuThreadPoolExecutor); when(SpringBeanUtils.getInstance().getBean(ShenyuThreadPoolExecutor.class)).thenThrow(new NoSuchBeanDefinitionException("not bean")); assertThrows(ShenyuException.class, () -> sharedThreadPool.getExecutor(URL.valueOf("localhost"))); }
public Options getPaimonOptions() { return this.paimonOptions; }
@Test public void testCreatePaimonConnectorWithS3() { Map<String, String> properties = new HashMap<>(); properties.put("paimon.catalog.warehouse", "s3://bucket/warehouse"); properties.put("paimon.catalog.type", "filesystem"); String accessKeyValue = "s3_access_key"; String secretKeyValue = "s3_secret_key"; String endpointValue = "s3_endpoint"; properties.put("aws.s3.access_key", accessKeyValue); properties.put("aws.s3.secret_key", secretKeyValue); properties.put("aws.s3.endpoint", endpointValue); PaimonConnector connector = new PaimonConnector(new ConnectorContext("paimon_catalog", "paimon", properties)); Options paimonOptions = connector.getPaimonOptions(); String accessKeyOption = paimonOptions.get("s3.access-key"); String secretKeyOption = paimonOptions.get("s3.secret-key"); String endpointOption = paimonOptions.get("s3.endpoint"); Assert.assertEquals(accessKeyOption, accessKeyValue); Assert.assertEquals(secretKeyOption, secretKeyValue); Assert.assertEquals(endpointOption, endpointValue); }
public void append(long offset, int position) { lock.lock(); try { if (isFull()) throw new IllegalArgumentException("Attempt to append to a full index (size = " + entries() + ")."); if (entries() == 0 || offset > lastOffset) { log.trace("Adding index entry {} => {} to {}", offset, position, file().getAbsolutePath()); mmap().putInt(relativeOffset(offset)); mmap().putInt(position); incrementEntries(); lastOffset = offset; if (entries() * ENTRY_SIZE != mmap().position()) throw new IllegalStateException(entries() + " entries but file position in index is " + mmap().position()); } else throw new InvalidOffsetException("Attempt to append an offset " + offset + " to position " + entries() + " no larger than the last offset appended (" + lastOffset + ") to " + file().getAbsolutePath()); } finally { lock.unlock(); } }
@Test public void appendTooMany() { for (int i = 0; i < index.maxEntries(); ++i) { long offset = index.baseOffset() + i + 1; index.append(offset, i); } assertWriteFails("Append should fail on a full index", index, index.maxEntries() + 1); }
public static <InputT> UsingBuilder<InputT> of(PCollection<InputT> input) { return new UsingBuilder<>(DEFAULT_NAME, input); }
@SuppressWarnings("unchecked") @Test public void testBuild_NegatedPredicate() { final PCollection<Integer> dataset = TestUtils.createMockDataset(TypeDescriptors.integers()); final Split.Output<Integer> split = Split.of(dataset).using((UnaryPredicate<Integer>) what -> what % 2 == 0).output(); final Filter<Integer> oddNumbers = (Filter) TestUtils.getProducer(split.negative()); assertFalse(oddNumbers.getPredicate().apply(0)); assertFalse(oddNumbers.getPredicate().apply(2)); assertFalse(oddNumbers.getPredicate().apply(4)); assertTrue(oddNumbers.getPredicate().apply(1)); assertTrue(oddNumbers.getPredicate().apply(3)); assertTrue(oddNumbers.getPredicate().apply(5)); }
public Collection<SQLToken> generateSQLTokens(final TablesContext tablesContext, final SetAssignmentSegment setAssignmentSegment) { String tableName = tablesContext.getSimpleTables().iterator().next().getTableName().getIdentifier().getValue(); EncryptTable encryptTable = encryptRule.getEncryptTable(tableName); Collection<SQLToken> result = new LinkedList<>(); String schemaName = tablesContext.getSchemaName().orElseGet(() -> new DatabaseTypeRegistry(databaseType).getDefaultSchemaName(databaseName)); for (ColumnAssignmentSegment each : setAssignmentSegment.getAssignments()) { String columnName = each.getColumns().get(0).getIdentifier().getValue(); if (encryptTable.isEncryptColumn(columnName)) { generateSQLToken(schemaName, encryptTable.getTable(), encryptTable.getEncryptColumn(columnName), each).ifPresent(result::add); } } return result; }
@Test void assertGenerateSQLTokenWithUpdateParameterMarkerExpressionSegment() { when(assignmentSegment.getValue()).thenReturn(mock(ParameterMarkerExpressionSegment.class)); assertThat(tokenGenerator.generateSQLTokens(tablesContext, setAssignmentSegment).size(), is(1)); }
String getLockName(String namespace, String name) { return "lock::" + namespace + "::" + kind() + "::" + name; }
@Test /* * Verifies that the lock is released by a call to `releaseLockAndTimer`. * The call is made through a chain of futures ending with `eventually` after a failed execution via a handled exception in the `Callable`. */ void testWithLockCallableHandledExceptionReleasesLock(VertxTestContext context) { var resourceOperator = new DefaultWatchableStatusedResourceOperator<>(vertx, null, "TestResource"); @SuppressWarnings({ "unchecked", "rawtypes" }) var target = new DefaultOperator(vertx, "Test", resourceOperator, new MicrometerMetricsProvider(BackendRegistries.getDefaultNow()), null); Reconciliation reconciliation = new Reconciliation("test", "TestResource", "my-namespace", "my-resource"); String lockName = target.getLockName(reconciliation); Checkpoint callableFailed = context.checkpoint(); Checkpoint lockObtained = context.checkpoint(); @SuppressWarnings("unchecked") Future<String> result = target.withLockTest(reconciliation, () -> Future.failedFuture(new UnsupportedOperationException(EXPECTED_MESSAGE))); Promise<Void> failHandlerCalled = Promise.promise(); result.onComplete(context.failing(e -> context.verify(() -> { assertThat(e.getMessage(), is(EXPECTED_MESSAGE)); failHandlerCalled.complete(); callableFailed.flag(); }))); failHandlerCalled.future() .compose(nothing -> vertx.sharedData().getLockWithTimeout(lockName, 10000L)) .onComplete(context.succeeding(lock -> context.verify(() -> { assertThat(lock, instanceOf(Lock.class)); lock.release(); lockObtained.flag(); }))); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewName(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewName()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename() { connection.stringCommands().set(originalKey, value).block(); if (hasTtl) { connection.keyCommands().expire(originalKey, Duration.ofSeconds(1000)).block(); } Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(value); if (hasTtl) { assertThat(connection.keyCommands().ttl(newKey).block()).isGreaterThan(0); } else { assertThat(connection.keyCommands().ttl(newKey).block()).isEqualTo(-1); } }
public void expectErrorsForTag(String tag) { if (UNPREVENTABLE_TAGS.contains(tag)) { throw new AssertionError("Tag `" + tag + "` is already suppressed."); } expectedTags.add(tag); }
@Test public void testNoExpectedTagFailsTest() { expectedException.expect(AssertionError.class); rule.expectErrorsForTag("Mytag"); }
@Override public boolean contains(Object o) { if (!(o instanceof Integer)) { throw new ClassCastException("PartitionIdSet can be only used with Integers"); } return bitSet.get((Integer) o); }
@Test(expected = ClassCastException.class) public void test_contains_whenNotInteger() { partitionIdSet.contains(new Object()); }
public static String extractAttributeNameNameWithoutArguments(String attributeNameWithArguments) { int start = StringUtil.lastIndexOf(attributeNameWithArguments, '['); int end = StringUtil.lastIndexOf(attributeNameWithArguments, ']'); if (start > 0 && end > 0 && end > start) { return attributeNameWithArguments.substring(0, start); } if (start < 0 && end < 0) { return attributeNameWithArguments; } throw new IllegalArgumentException("Wrong argument input passed " + attributeNameWithArguments); }
@Test(expected = IllegalArgumentException.class) public void extractAttributeName_wrongArguments_noOpening() { extractAttributeNameNameWithoutArguments("car.wheelleft]"); }
@Override public void validateSmsCode(SmsCodeValidateReqDTO reqDTO) { validateSmsCode0(reqDTO.getMobile(), reqDTO.getCode(), reqDTO.getScene()); }
@Test public void validateSmsCode_expired() { // 准备参数 SmsCodeValidateReqDTO reqDTO = randomPojo(SmsCodeValidateReqDTO.class, o -> { o.setMobile("15601691300"); o.setScene(randomEle(SmsSceneEnum.values()).getScene()); }); // mock 数据 SqlConstants.init(DbType.MYSQL); smsCodeMapper.insert(randomPojo(SmsCodeDO.class, o -> o.setMobile(reqDTO.getMobile()) .setScene(reqDTO.getScene()).setCode(reqDTO.getCode()).setUsed(false) .setCreateTime(LocalDateTime.now().minusMinutes(6)))); // 调用,并断言异常 assertServiceException(() -> smsCodeService.validateSmsCode(reqDTO), SMS_CODE_EXPIRED); }
public CompiledPipeline.CompiledExecution buildExecution() { return buildExecution(false); }
@Test @SuppressWarnings({"unchecked"}) public void testReuseCompiledClasses() throws IOException, InvalidIRException { final FixedPluginFactory pluginFactory = new FixedPluginFactory( () -> null, () -> IDENTITY_FILTER, mockOutputSupplier() ); final ConfigVariableExpander cve = ConfigVariableExpander.withoutSecret(EnvironmentVariableProvider.defaultProvider()); // this pipeline generates 10 classes // - 7 for the filters for the nested and leaf Datasets // - 3 for the sequence of outputs with a conditional final PipelineIR baselinePipeline = ConfigCompiler.configToPipelineIR( IRHelpers.toSourceWithMetadataFromPath("org/logstash/config/ir/cache/pipeline_reuse_baseline.conf"), false, cve); final CompiledPipeline cBaselinePipeline = new CompiledPipeline(baselinePipeline, pluginFactory); // this pipeline is much bigger than the baseline // but is carefully crafted to reuse the same classes as the baseline pipeline final PipelineIR pipelineTwiceAsBig = ConfigCompiler.configToPipelineIR( IRHelpers.toSourceWithMetadataFromPath("org/logstash/config/ir/cache/pipeline_reuse_test.conf"), false, cve); final CompiledPipeline cPipelineTwiceAsBig = new CompiledPipeline(pipelineTwiceAsBig, pluginFactory); // test: compiling a much bigger pipeline and asserting no additional classes are generated ComputeStepSyntaxElement.cleanClassCache(); cBaselinePipeline.buildExecution(); final int cachedBefore = ComputeStepSyntaxElement.classCacheSize(); cPipelineTwiceAsBig.buildExecution(); final int cachedAfter = ComputeStepSyntaxElement.classCacheSize(); final String message = String.format("unexpected cache size, cachedAfter: %d, cachedBefore: %d", cachedAfter, cachedBefore); assertEquals(message, 0, cachedAfter - cachedBefore); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("Compute Engine Tasks"); try (DbSession dbSession = dbClient.openSession(false)) { setAttribute(protobuf, "Total Pending", dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.PENDING)); setAttribute(protobuf, "Total In Progress", dbClient.ceQueueDao().countByStatus(dbSession, CeQueueDto.Status.IN_PROGRESS)); setAttribute(protobuf, "Max Workers per Node", workerCountProvider == null ? DEFAULT_NB_OF_WORKERS : workerCountProvider.get()); setAttribute(protobuf, "Workers Paused", "true".equals(dbClient.internalPropertiesDao().selectByKey(dbSession, InternalProperties.COMPUTE_ENGINE_PAUSE).orElse(null))); } return protobuf.build(); }
@Test public void test_queue_state_with_default_settings() { when(dbClient.ceQueueDao().countByStatus(any(), eq(CeQueueDto.Status.PENDING))).thenReturn(10); when(dbClient.ceQueueDao().countByStatus(any(), eq(CeQueueDto.Status.IN_PROGRESS))).thenReturn(1); CeQueueGlobalSection underTest = new CeQueueGlobalSection(dbClient, null); ProtobufSystemInfo.Section section = underTest.toProtobuf(); assertThatAttributeIs(section, "Total Pending", 10); assertThatAttributeIs(section, "Total In Progress", 1); assertThatAttributeIs(section, "Max Workers per Node", 1); }
@Override public <R> R run(Action<R, C, E> action) throws E, InterruptedException { return run(action, retryByDefault); }
@Test public void testRetriesExhaustedAndSurfacesFailure() { int maxRetries = 3; int succeedAfterAttempts = 5; try (MockClientPoolImpl mockClientPool = new MockClientPoolImpl(2, RetryableException.class, true, maxRetries)) { assertThatThrownBy( () -> mockClientPool.run(client -> client.succeedAfter(succeedAfterAttempts))) .isInstanceOf(RetryableException.class); assertThat(mockClientPool.reconnectionAttempts()).isEqualTo(maxRetries); } }
@Override public void execute(Map<String, List<String>> parameters, PrintWriter output) throws Exception { final List<String> loggerNames = getLoggerNames(parameters); final Level loggerLevel = getLoggerLevel(parameters); final Duration duration = getDuration(parameters); for (String loggerName : loggerNames) { Logger logger = ((LoggerContext) loggerContext).getLogger(loggerName); String message = String.format("Configured logging level for %s to %s", loggerName, loggerLevel); if (loggerLevel != null && duration != null) { final long millis = duration.toMillis(); getTimer().schedule(new TimerTask() { @Override public void run() { logger.setLevel(null); } }, millis); message += String.format(" for %s milliseconds", millis); } logger.setLevel(loggerLevel); output.println(message); output.flush(); } }
@Test void configuresSpecificLevelForALoggerForADuration() throws Exception { // given Level oneEffectiveBefore = logger1.getEffectiveLevel(); Map<String, List<String>> parameters = Map.of( "logger", List.of("logger.one"), "level", List.of("debug"), "duration", List.of(Duration.ofMillis(2_000).toString())); Timer timer = mock(Timer.class); ArgumentCaptor<TimerTask> timerAction = ArgumentCaptor.forClass(TimerTask.class); ArgumentCaptor<Long> timerDuration = ArgumentCaptor.forClass(Long.class); // when new LogConfigurationTask(loggerContext, () -> timer).execute(parameters, output); // then assertThat(logger1.getLevel()).isEqualTo(Level.DEBUG); assertThat(stringWriter).hasToString(String.format("Configured logging level for logger.one to DEBUG for 2000 milliseconds%n")); verify(timer).schedule(timerAction.capture(), timerDuration.capture()); assertThat(timerDuration.getValue()).isEqualTo(2_000); // after timerAction.getValue().run(); assertThat(logger1.getEffectiveLevel()).isEqualTo(oneEffectiveBefore); }
public DoubleArrayAsIterable usingTolerance(double tolerance) { return new DoubleArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_containsAnyOf_primitiveDoubleArray_failure() { expectFailureWhenTestingThat(array(1.1, TOLERABLE_2POINT2, 3.3)) .usingTolerance(DEFAULT_TOLERANCE) .containsAnyOf(array(99.99, 999.999)); assertFailureKeys("value of", "expected to contain any of", "testing whether", "but was"); assertFailureValue("expected to contain any of", "[99.99, 999.999]"); }
public String generate() { StringBuilder sb = new StringBuilder(); sb.append(firstCharacterAlphabet.charAt(rng.nextInt(firstCharacterAlphabet.length()))); for (int i = 1; i < length; i++) { sb.append(alphabet.charAt(rng.nextInt(alphabet.length()))); } return sb.toString(); }
@Test public void length() { PasswordGenerator generator = new PasswordGenerator(10, "a", "a"); assertThat(generator.generate(), is("aaaaaaaaaa")); }
public ArtifactResponse buildArtifactResponse(ArtifactResolveRequest artifactResolveRequest, String entityId, SignType signType) throws InstantiationException, ValidationException, ArtifactBuildException, BvdException { final var artifactResponse = OpenSAMLUtils.buildSAMLObject(ArtifactResponse.class); final var status = OpenSAMLUtils.buildSAMLObject(Status.class); final var statusCode = OpenSAMLUtils.buildSAMLObject(StatusCode.class); final var issuer = OpenSAMLUtils.buildSAMLObject(Issuer.class); return ArtifactResponseBuilder .newInstance(artifactResponse) .addID() .addIssueInstant() .addInResponseTo(artifactResolveRequest.getArtifactResolve().getID()) .addStatus(StatusBuilder .newInstance(status) .addStatusCode(statusCode, StatusCode.SUCCESS) .build()) .addIssuer(issuer, entityId) .addMessage(buildResponse(artifactResolveRequest, entityId, signType)) .addSignature(signatureService, signType) .build(); }
@Test void parseArtifactResolveCancelled() throws ValidationException, SamlParseException, ArtifactBuildException, BvdException, InstantiationException { ArtifactResponse artifactResponse = artifactResponseService.buildArtifactResponse(getArtifactResolveRequest("canceled", true, false, SAML_COMBICONNECT, EncryptionType.BSN, ENTRANCE_ENTITY_ID), ENTRANCE_ENTITY_ID, TD); assertEquals("urn:oasis:names:tc:SAML:2.0:status:Responder", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getValue()); assertEquals("urn:oasis:names:tc:SAML:2.0:status:AuthnFailed", ((Response) artifactResponse.getMessage()).getStatus().getStatusCode().getStatusCode().getValue()); }
@Override public ManifestTemplate call() throws IOException { Preconditions.checkState(!builtImages.isEmpty(), "no images given"); EventHandlers eventHandlers = buildContext.getEventHandlers(); try (TimerEventDispatcher ignored = new TimerEventDispatcher(eventHandlers, DESCRIPTION); ProgressEventDispatcher ignored2 = progressEventDispatcherFactory.create( "building a manifest list or a single manifest", 1)) { if (builtImages.size() == 1) { eventHandlers.dispatch(LogEvent.info("Building a single manifest")); ImageToJsonTranslator imageTranslator = new ImageToJsonTranslator(builtImages.get(0)); BlobDescriptor configDescriptor = Digests.computeDigest(imageTranslator.getContainerConfiguration()); return imageTranslator.getManifestTemplate( buildContext.getTargetFormat(), configDescriptor); } eventHandlers.dispatch(LogEvent.info("Building a manifest list")); return new ManifestListGenerator(builtImages) .getManifestListTemplate(buildContext.getTargetFormat()); } }
@Test public void testCall_emptyImagesList() throws IOException { try { new BuildManifestListOrSingleManifestStep( buildContext, progressDispatcherFactory, Collections.emptyList()) .call(); Assert.fail(); } catch (IllegalStateException ex) { Assert.assertEquals("no images given", ex.getMessage()); } }
@Override public List<AwsEndpoint> getClusterEndpoints() { List<AwsEndpoint> result = new ArrayList<>(); EurekaHttpClient client = null; try { client = clientFactory.newClient(); EurekaHttpResponse<Applications> response = client.getVip(vipAddress); if (validResponse(response)) { Applications applications = response.getEntity(); if (applications != null) { applications.shuffleInstances(true); // filter out non-UP instances List<InstanceInfo> validInstanceInfos = applications.getInstancesByVirtualHostName(vipAddress); for (InstanceInfo instanceInfo : validInstanceInfos) { AwsEndpoint endpoint = ResolverUtils.instanceInfoToEndpoint(clientConfig, transportConfig, instanceInfo); if (endpoint != null) { result.add(endpoint); } } logger.debug("Retrieved endpoint list {}", result); return result; } } } catch (Exception e) { logger.error("Error contacting server for endpoints with vipAddress:{}", vipAddress, e); } finally { if (client != null) { client.shutdown(); } } logger.info("Returning empty endpoint list"); return Collections.emptyList(); }
@Test public void testHappyCase() { List<AwsEndpoint> endpoints = resolver.getClusterEndpoints(); assertThat(endpoints.size(), equalTo(applications.getInstancesByVirtualHostName(vipAddress).size())); verify(httpClient, times(1)).shutdown(); }
@Override public DenseMatrix matrixMultiply(Matrix other) { if (dim2 == other.getDimension1Size()) { if (other instanceof DenseMatrix) { DenseMatrix otherDense = (DenseMatrix) other; double[][] output = new double[dim1][otherDense.dim2]; for (int i = 0; i < dim1; i++) { for (int j = 0; j < otherDense.dim2; j++) { output[i][j] = columnRowDot(i,j,otherDense); } } return new DenseMatrix(output); } else if (other instanceof DenseSparseMatrix) { DenseSparseMatrix otherSparse = (DenseSparseMatrix) other; int otherDim2 = otherSparse.getDimension2Size(); double[][] output = new double[dim1][otherDim2]; for (int i = 0; i < dim1; i++) { for (int j = 0; j < otherDim2; j++) { output[i][j] = columnRowDot(i,j,otherSparse); } } return new DenseMatrix(output); } else { throw new IllegalArgumentException("Unknown matrix type " + other.getClass().getName()); } } else { throw new IllegalArgumentException("Invalid matrix dimensions, this.shape=" + Arrays.toString(shape) + ", other.shape = " + Arrays.toString(other.getShape())); } }
@Test public void identityTest() { DenseMatrix a = generateA(); DenseMatrix b = generateB(); DenseMatrix c = generateC(); DenseMatrix d = generateD(); DenseMatrix e = generateE(); DenseMatrix f = generateF(); DenseMatrix eye = identity(10); DenseMatrix eye4 = identity(4); DenseMatrix eye3 = identity(3); DenseMatrix eye7 = identity(7); // Identity matrix tests assertEquals(eye,eye.matrixMultiply(eye)); assertEquals(eye4,eye4.matrixMultiply(eye4)); assertEquals(eye3,eye3.matrixMultiply(eye3)); assertEquals(eye7,eye7.matrixMultiply(eye7)); // 4x4 tests assertEquals(a,a.matrixMultiply(eye4)); assertEquals(b,b.matrixMultiply(eye4)); assertEquals(c,c.matrixMultiply(eye4)); assertEquals(a,eye4.matrixMultiply(a)); assertEquals(b,eye4.matrixMultiply(b)); assertEquals(c,eye4.matrixMultiply(c)); // 4x7 tests assertEquals(d,d.matrixMultiply(eye7)); assertEquals(d,eye4.matrixMultiply(d)); // 7x3 tests assertEquals(e,e.matrixMultiply(eye3)); assertEquals(e,eye7.matrixMultiply(e)); // 3x4 tests assertEquals(f,f.matrixMultiply(eye4)); assertEquals(f,eye3.matrixMultiply(f)); }
public NonClosedTracking<RAW, BASE> trackNonClosed(Input<RAW> rawInput, Input<BASE> baseInput) { NonClosedTracking<RAW, BASE> tracking = NonClosedTracking.of(rawInput, baseInput); // 1. match by rule, line, line hash and message match(tracking, LineAndLineHashAndMessage::new); // 2. match issues with same rule, same line and same line hash, but not necessarily with same message match(tracking, LineAndLineHashKey::new); // 3. detect code moves by comparing blocks of codes detectCodeMoves(rawInput, baseInput, tracking); // 4. match issues with same rule, same message and same line hash match(tracking, LineHashAndMessageKey::new); // 5. match issues with same rule, same line and same message match(tracking, LineAndMessageKey::new); // 6. match issues with same rule and same line hash but different line and different message. // See SONAR-2812 match(tracking, LineHashKey::new); return tracking; }
@Test public void similar_issues_if_trimmed_messages_match() { FakeInput baseInput = new FakeInput("H1"); Issue base = baseInput.createIssueOnLine(1, RULE_SYSTEM_PRINT, " message "); FakeInput rawInput = new FakeInput("H2"); Issue raw = rawInput.createIssueOnLine(1, RULE_SYSTEM_PRINT, "message"); Tracking<Issue, Issue> tracking = tracker.trackNonClosed(rawInput, baseInput); assertThat(tracking.baseFor(raw)).isSameAs(base); }
public static CharSequence escapeCsv(CharSequence value) { return escapeCsv(value, false); }
@Test public void escapeCsvGarbageFree() { // 'StringUtil#escapeCsv()' should return same string object if string didn't changing. assertSame("1", StringUtil.escapeCsv("1", true)); assertSame(" 123 ", StringUtil.escapeCsv(" 123 ", false)); assertSame("\" 123 \"", StringUtil.escapeCsv("\" 123 \"", true)); assertSame("\"\"", StringUtil.escapeCsv("\"\"", true)); assertSame("123 \"\"", StringUtil.escapeCsv("123 \"\"", true)); assertSame("123\"\"321", StringUtil.escapeCsv("123\"\"321", true)); assertSame("\"123\"\"321\"", StringUtil.escapeCsv("\"123\"\"321\"", true)); }
@Override public Optional<Listener> acquire(ContextT context) { return tryAcquire(context).map(delegate -> new Listener() { @Override public void onSuccess() { delegate.onSuccess(); unblock(); } @Override public void onIgnore() { delegate.onIgnore(); unblock(); } @Override public void onDropped() { delegate.onDropped(); unblock(); } }); }
@Test public void adaptWhenLimitDecreases() { List<Optional<Limiter.Listener>> listeners = acquireN(blockingLimiter, 4); limit.setLimit(3); listeners.get(0).get().onSuccess(); // Next acquire will reject and block long start = System.nanoTime(); Optional<Limiter.Listener> listener = blockingLimiter.acquire(null); long duration = TimeUnit.SECONDS.toMillis(System.nanoTime() - start); Assert.assertTrue("Duration = " + duration, duration >= 1); Assert.assertFalse(listener.isPresent()); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatDoubleLiteralWithLargeScale() { assertThat(ExpressionFormatter.formatExpression( new DoubleLiteral(1234.56789876d)), equalTo("1.23456789876E3")); }
@Override public void monitor(RedisServer master) { connection.sync(RedisCommands.SENTINEL_MONITOR, master.getName(), master.getHost(), master.getPort().intValue(), master.getQuorum().intValue()); }
@Test public void testMonitor() { Collection<RedisServer> masters = connection.masters(); RedisServer master = masters.iterator().next(); master.setName(master.getName() + ":"); connection.monitor(master); }
public void store(byte[] hash) { db.putItem(PutItemRequest.builder() .tableName(tableName) .item(Map.of( KEY_HASH, AttributeValues.fromByteArray(hash), ATTR_TTL, AttributeValues.fromLong(Instant.now().plus(ttl).getEpochSecond()) )) .build()); }
@Test void testStore() { final byte[] hash1 = UUIDUtil.toBytes(UUID.randomUUID()); final byte[] hash2 = UUIDUtil.toBytes(UUID.randomUUID()); assertAll("database should be empty", () -> assertFalse(reportMessageDynamoDb.remove(hash1)), () -> assertFalse(reportMessageDynamoDb.remove(hash2)) ); reportMessageDynamoDb.store(hash1); reportMessageDynamoDb.store(hash2); assertAll("both hashes should be found", () -> assertTrue(reportMessageDynamoDb.remove(hash1)), () -> assertTrue(reportMessageDynamoDb.remove(hash2)) ); assertAll( "database should be empty", () -> assertFalse(reportMessageDynamoDb.remove(hash1)), () -> assertFalse(reportMessageDynamoDb.remove(hash2)) ); }
public <T> Future<Iterable<Map.Entry<ByteString, Iterable<T>>>> multimapFetchAllFuture( boolean omitValues, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) { StateTag<ByteString> stateTag = StateTag.<ByteString>of(Kind.MULTIMAP_ALL, encodedTag, stateFamily) .toBuilder() .setOmitValues(omitValues) .build(); return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder)); }
@Test public void testReadMultimapEntriesPaginated() throws Exception { Future<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> future = underTest.multimapFetchAllFuture(false, STATE_KEY_1, STATE_FAMILY, INT_CODER); Mockito.verifyNoMoreInteractions(mockWindmill); Windmill.KeyedGetDataRequest.Builder expectedRequest1 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(false) .setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES)); Windmill.KeyedGetDataResponse.Builder response1 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_1) .addValues(intData(1)) .addValues(intData(2))) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_2) .addValues(intData(3)) .addValues(intData(3))) .setContinuationPosition(STATE_MULTIMAP_CONT_1)); Windmill.KeyedGetDataRequest.Builder expectedRequest2 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(false) .setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES) .setRequestPosition(STATE_MULTIMAP_CONT_1)); Windmill.KeyedGetDataResponse.Builder response2 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_2) .addValues(intData(2))) .setRequestPosition(STATE_MULTIMAP_CONT_1) .setContinuationPosition(STATE_MULTIMAP_CONT_2)); Windmill.KeyedGetDataRequest.Builder expectedRequest3 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(false) .setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES) .setRequestPosition(STATE_MULTIMAP_CONT_2)); Windmill.KeyedGetDataResponse.Builder response3 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder() .setEntryName(STATE_MULTIMAP_KEY_2) .addValues(intData(4))) .setRequestPosition(STATE_MULTIMAP_CONT_2)); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest1.build())) .thenReturn(response1.build()); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest2.build())) .thenReturn(response2.build()); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest3.build())) .thenReturn(response3.build()); Iterable<Map.Entry<ByteString, Iterable<Integer>>> results = future.get(); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest1.build()); assertMultimapEntries( results, Arrays.asList( new AbstractMap.SimpleEntry<>(STATE_MULTIMAP_KEY_1, Arrays.asList(1, 2)), new AbstractMap.SimpleEntry<>(STATE_MULTIMAP_KEY_2, Arrays.asList(3, 3, 2, 4)))); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest2.build()); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest3.build()); Mockito.verifyNoMoreInteractions(mockWindmill); // NOTE: The future will still contain a reference to the underlying reader , thus not calling // assertNoReader(future). }
@Override protected void handlePut(final String listenTo, final ServiceProperties discoveryProperties) { LoadBalancerStateItem<ServiceProperties> oldServicePropertiesItem = _simpleLoadBalancerState.getServiceProperties().get(listenTo); ActivePropertiesResult pickedPropertiesResult = pickActiveProperties(discoveryProperties); ServiceProperties pickedProperties = pickedPropertiesResult.serviceProperties; LoadBalancerStateItem<ServiceProperties> newServiceProperties = new LoadBalancerStateItem<>( pickedProperties, _simpleLoadBalancerState.getVersionAccess().incrementAndGet(), System.currentTimeMillis(), pickedPropertiesResult.distribution); _simpleLoadBalancerState.getServiceProperties().put(listenTo, newServiceProperties); // always refresh strategies when we receive service event if (pickedProperties != null) { //if this service changes its cluster, we should update the cluster -> service map saying that //this service is no longer hosted in the old cluster. if (oldServicePropertiesItem != null) { ServiceProperties oldServiceProperties = oldServicePropertiesItem.getProperty(); if (oldServiceProperties != null && oldServiceProperties.getClusterName() != null && !oldServiceProperties.getClusterName().equals(pickedProperties.getClusterName())) { Set<String> serviceNames = _simpleLoadBalancerState.getServicesPerCluster().get(oldServiceProperties.getClusterName()); if (serviceNames != null) { serviceNames.remove(oldServiceProperties.getServiceName()); } } } _simpleLoadBalancerState.notifyListenersOnServicePropertiesUpdates(newServiceProperties); _simpleLoadBalancerState.refreshServiceStrategies(pickedProperties); _simpleLoadBalancerState.refreshClients(pickedProperties); // refresh state for which services are on which clusters Set<String> serviceNames = _simpleLoadBalancerState.getServicesPerCluster().get(pickedProperties.getClusterName()); if (serviceNames == null) { serviceNames = Collections.newSetFromMap(new ConcurrentHashMap<>()); _simpleLoadBalancerState.getServicesPerCluster().put(pickedProperties.getClusterName(), serviceNames); } serviceNames.add(pickedProperties.getServiceName()); } else if (oldServicePropertiesItem != null) { // if we've replaced a service properties with null, update the cluster -> // service state that the service is no longer on its cluster. ServiceProperties oldServiceProperties = oldServicePropertiesItem.getProperty(); if (oldServiceProperties != null) { Set<String> serviceNames = _simpleLoadBalancerState.getServicesPerCluster().get(oldServiceProperties.getClusterName()); if (serviceNames != null) { serviceNames.remove(oldServiceProperties.getServiceName()); } } } if (discoveryProperties == null) { // we'll just ignore the event and move on. // we could receive a null if the file store properties cannot read/write a file. // in this case it's better to leave the state intact and not do anything _log.warn("Received a null service properties for {}", listenTo); } }
@Test(dataProvider = "getConfigsAndDistributions") public void testWithCanaryConfigs(ServiceProperties stableConfigs, ServiceProperties canaryConfigs, CanaryDistributionStrategy distributionStrategy, CanaryDistributionProvider.Distribution distribution) { ServiceLoadBalancerSubscriberFixture fixture = new ServiceLoadBalancerSubscriberFixture(); when(fixture._canaryDistributionProvider.distribute(any())).thenReturn(distribution); fixture.getMockSubscriber(distribution != null).handlePut(SERVICE_NAME, new ServiceStoreProperties(stableConfigs, canaryConfigs, distributionStrategy)); ServiceProperties expectedPickedProperties = distribution == CanaryDistributionProvider.Distribution.CANARY ? canaryConfigs : stableConfigs; Assert.assertEquals(fixture._servicePropertiesUpdateArgsCaptor.getValue().getProperty(), expectedPickedProperties); Assert.assertEquals( fixture._servicePropertiesUpdateArgsCaptor.getValue().getDistribution(), distribution == null ? CanaryDistributionProvider.Distribution.STABLE : distribution); Assert.assertEquals(fixture._refreshClientsPropertiesArgCaptor.getValue(), expectedPickedProperties); Assert.assertEquals(fixture._refreshClientsPropertiesArgCaptor.getValue(), expectedPickedProperties); Assert.assertEquals(fixture._serviceProperties.get(SERVICE_NAME).getProperty(), expectedPickedProperties); }
@Override protected String throwableProxyToString(IThrowableProxy tp) { final String prefixed = PATTERN.matcher(super.throwableProxyToString(tp)).replaceAll(PREFIX); return CAUSING_PATTERN.matcher(prefixed).replaceAll(CAUSING); }
@Test void placesRootCauseIsFirst() { assertThat(converter.throwableProxyToString(proxy)).matches(Pattern.compile(".+" + "java\\.net\\.SocketTimeoutException: Timed-out reading from socket.+" + "java\\.io\\.IOException: Fairly general error doing some IO.+" + "java\\.lang\\.RuntimeException: Very general error doing something" + ".+", Pattern.DOTALL)); }
public Map<String, Set<String>> usagesOfProfiles(final Set<String> profilesIds) { Map<String, Set<String>> usagesInIndexSet = new HashMap<>(); profilesIds.forEach(profId -> usagesInIndexSet.put(profId, new HashSet<>())); indexSetsCollection .find(Filters.in(FIELD_PROFILE_ID, profilesIds.stream() .filter(ObjectId::isValid) .toList())) .projection(Projections.include(INDEX_SET_ID, FIELD_PROFILE_ID)) .forEach(document -> { final String indexSetId = document.getObjectId(INDEX_SET_ID).toString(); final String profileId = document.getString(FIELD_PROFILE_ID); usagesInIndexSet.get(profileId).add(indexSetId); }); return usagesInIndexSet; }
@Test public void testReturnsProperUsagesForMultipleProfiles() { Map<String, Set<String>> expectedResult = Map.of( PROFILE1_ID, Set.of("000000000000000000000001", "000000000000000000000011"), PROFILE2_ID, Set.of("000000000000000000000002"), UNUSED_PROFILE_ID, Set.of(), WRONG_PROFILE_ID, Set.of() ); assertEquals(expectedResult, toTest.usagesOfProfiles(Set.of(PROFILE1_ID, PROFILE2_ID, UNUSED_PROFILE_ID, WRONG_PROFILE_ID))); }
@Override public double calcMinWeightPerDistance() { return 1d / (maxSpeedCalc.calcMax() / SPEED_CONV) / maxPrioCalc.calcMax() + distanceInfluence; }
@Test public void testMaxSpeed() { assertEquals(155, avSpeedEnc.getMaxOrMaxStorableDecimal(), 0.1); assertEquals(1d / 72 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToSpeed(If("true", LIMIT, "72"))).calcMinWeightPerDistance(), .001); // ignore too big limit to let custom model compatibility not break when max speed of encoded value later decreases assertEquals(1d / 155 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToSpeed(If("true", LIMIT, "180"))).calcMinWeightPerDistance(), .001); // reduce speed only a bit assertEquals(1d / 150 * 3.6, createWeighting(createSpeedCustomModel(avSpeedEnc). addToSpeed(If("road_class == SERVICE", MULTIPLY, "1.5")). addToSpeed(If("true", LIMIT, "150"))).calcMinWeightPerDistance(), .001); }
public static boolean tryFillGap( final UnsafeBuffer logMetaDataBuffer, final UnsafeBuffer termBuffer, final int termId, final int gapOffset, final int gapLength) { int offset = (gapOffset + gapLength) - FRAME_ALIGNMENT; while (offset >= gapOffset) { if (0 != termBuffer.getInt(offset)) { return false; } offset -= FRAME_ALIGNMENT; } applyDefaultHeader(logMetaDataBuffer, termBuffer, gapOffset); frameType(termBuffer, gapOffset, HDR_TYPE_PAD); frameTermOffset(termBuffer, gapOffset); frameTermId(termBuffer, gapOffset, termId); frameLengthOrdered(termBuffer, gapOffset, gapLength); return true; }
@Test void shouldNotOverwriteExistingFrame() { final int gapOffset = 0; final int gapLength = 64; dataFlyweight.frameLength(32); assertFalse(TermGapFiller.tryFillGap(metaDataBuffer, termBuffer, TERM_ID, gapOffset, gapLength)); }