focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Udf(description = "Subtracts a duration from a date") public Date dateSub( @UdfParameter(description = "A unit of time, for example DAY") final TimeUnit unit, @UdfParameter( description = "An integer number of intervals to subtract") final Integer interval, @UdfParameter(description = "A DATE value.") final Date date ) { if (unit == null || interval == null || date == null) { return null; } final long epochDayResult = TimeUnit.MILLISECONDS.toDays(date.getTime() - unit.toMillis(interval)); return new Date(TimeUnit.DAYS.toMillis(epochDayResult)); }
@Test public void handleNulls() { assertNull(udf.dateSub(TimeUnit.DAYS, -300, null)); assertNull(udf.dateSub(null, 54, new Date(864000000))); assertNull(udf.dateSub(TimeUnit.DAYS, null, new Date(864000000))); }
@Override public Map<String, ClusterReindexing> getReindexingStatus(ModelResult application) throws IOException { Map<ClusterId, List<ServiceInfo>> clusters = clusterControllerClusters(application); Map<ClusterId, CompletableFuture<Map<String, ClusterReindexing>>> futureStatusPerCluster = new HashMap<>(); clusters.forEach((clusterId, clusterNodes) -> { var parallelRequests = clusterNodes.stream() .map(this::getReindexingStatus) .toList(); CompletableFuture<Map<String, ClusterReindexing>> combinedRequest = CompletableFutures.firstOf(parallelRequests); futureStatusPerCluster.put(clusterId, combinedRequest); }); try { Map<String, ClusterReindexing> statusPerCluster = new HashMap<>(); futureStatusPerCluster.forEach((clusterId, futureStatus) -> { statusPerCluster.putAll(futureStatus.join()); }); return Map.copyOf(statusPerCluster); } catch (Exception e) { throw new IOException("Failed to get reindexing status from cluster controllers: " + e.getMessage(), e); } }
@Test public void combines_result_from_multiple_cluster_controller_clusters() throws IOException { var client = new DefaultClusterReindexingStatusClient(); MockApplication app = new MockApplication(); String uriPath = "/reindexing/v1/status"; server1.stubFor(get(urlEqualTo(uriPath)).willReturn(serverError())); server2.stubFor(get(urlEqualTo(uriPath)).willReturn(okJson( "{" + " \"clusters\": {" + " \"cluster1\": {" + " \"documentTypes\": {" + " \"music\": {" + " \"startedMillis\":0," + " \"state\": \"" + ClusterReindexing.State.RUNNING.asString() + "\"" + " }" + " }" + " }" + " }" + "}"))); server3.stubFor(get(urlEqualTo(uriPath)).willReturn(okJson( "{" + " \"clusters\": {" + " \"cluster2\": {" + " \"documentTypes\": {" + " \"artist\": {" + " \"startedMillis\":50," + " \"endedMillis\":150," + " \"progress\": 0.5," + " \"state\": \"" + ClusterReindexing.State.SUCCESSFUL.asString() + "\"," + " \"message\":\"success\"" + " }" + " }" + " }" + " }" + "}"))); Map<String, ClusterReindexing> expected = Map.of("cluster1", new ClusterReindexing(Map.of("music", new ClusterReindexing.Status(Instant.ofEpochMilli(0), null, ClusterReindexing.State.RUNNING, null, null))), "cluster2", new ClusterReindexing(Map.of("artist", new ClusterReindexing.Status(Instant.ofEpochMilli(50), Instant.ofEpochMilli(150), ClusterReindexing.State.SUCCESSFUL, "success", 0.5)))); Map<String, ClusterReindexing> result = client.getReindexingStatus(app); assertEquals(expected, result); }
public Sensor nodeLevelSensor(final String threadId, final String taskId, final String processorNodeName, final String sensorSuffix, final Sensor.RecordingLevel recordingLevel, final Sensor... parents) { final String sensorPrefix = nodeSensorPrefix(threadId, taskId, processorNodeName); synchronized (nodeLevelSensors) { return getSensors(nodeLevelSensors, sensorSuffix, sensorPrefix, recordingLevel, parents); } }
@Test public void shouldGetExistingNodeLevelSensor() { final Metrics metrics = mock(Metrics.class); final RecordingLevel recordingLevel = RecordingLevel.INFO; setupGetExistingSensorTest(metrics); final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); final Sensor actualSensor = streamsMetrics.nodeLevelSensor( THREAD_ID1, TASK_ID1, NODE_ID1, SENSOR_NAME_1, recordingLevel ); assertThat(actualSensor, is(equalToObject(sensor))); }
@Override public double d(T[] a, T[] b) { Set<T> union = new HashSet<>(Arrays.asList(b)); Collections.addAll(union, a); Set<T> intersection = new HashSet<>(); Collections.addAll(intersection, a); intersection.retainAll(union); return 1.0 - (double) intersection.size() / union.size(); }
@Test public void testDistance() { System.out.println("distance"); Set<Integer> a = new HashSet<>(); a.add(1); a.add(2); a.add(3); a.add(4); Set<Integer> b = new HashSet<>(); b.add(3); b.add(4); b.add(5); b.add(6); assertEquals(0.6666667, JaccardDistance.d(a, b), 1E-7); }
public static FusedPipeline fuse(Pipeline p) { return new GreedyPipelineFuser(p).fusedPipeline; }
@Test public void compositesIgnored() { Components components = partialComponents .toBuilder() .putTransforms( "read", PTransform.newBuilder() .setUniqueName("Read") .putInputs("input", "impulse.out") .putOutputs("output", "read.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("py") .build()) .putPcollections("read.out", pc("read.out")) .putTransforms( "goTransform", PTransform.newBuilder() .setUniqueName("GoTransform") .putInputs("input", "read.out") .putOutputs("output", "go.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("go") .build()) .putPcollections("go.out", pc("go.out")) .putTransforms( "pyTransform", PTransform.newBuilder() .setUniqueName("PyTransform") .putInputs("input", "read.out") .putOutputs("output", "py.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.ASSIGN_WINDOWS_TRANSFORM_URN) .setPayload( WindowIntoPayload.newBuilder() .setWindowFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("py") .build()) .putPcollections("py.out", pc("py.out")) .putTransforms( "compositeMultiLang", PTransform.newBuilder() .setUniqueName("CompositeMultiLang") .putInputs("input", "impulse.out") .putOutputs("pyOut", "py.out") .putOutputs("goOut", "go.out") .addSubtransforms("read") .addSubtransforms("goTransform") .addSubtransforms("pyTransform") .build()) .build(); FusedPipeline fused = GreedyPipelineFuser.fuse( Pipeline.newBuilder() .addRootTransformIds("impulse") .addRootTransformIds("compositeMultiLang") .setComponents(components) .build()); // Impulse is the runner transform assertThat(fused.getRunnerExecutedTransforms(), hasSize(1)); assertThat(fused.getFusedStages(), hasSize(3)); assertThat( fused.getFusedStages(), containsInAnyOrder( ExecutableStageMatcher.withInput("impulse.out") .withOutputs("read.out") .withTransforms("read"), ExecutableStageMatcher.withInput("read.out") .withNoOutputs() .withTransforms("pyTransform"), ExecutableStageMatcher.withInput("read.out") .withNoOutputs() .withTransforms("goTransform"))); }
@Nullable String getCollectionName(BsonDocument command, String commandName) { if (COMMANDS_WITH_COLLECTION_NAME.contains(commandName)) { String collectionName = getNonEmptyBsonString(command.get(commandName)); if (collectionName != null) { return collectionName; } } // Some other commands, like getMore, have a field like {"collection": collectionName}. return getNonEmptyBsonString(command.get("collection")); }
@Test void getCollectionName_allowListedCommand() { assertThat(listener.getCollectionName(new BsonDocument("find", new BsonString(" bar ")), "find")).isEqualTo("bar"); }
protected synchronized void doRestartConnectorAndTasks(RestartRequest request) { String connectorName = request.connectorName(); Optional<RestartPlan> maybePlan = buildRestartPlan(request); if (!maybePlan.isPresent()) { log.debug("Skipping restart of connector '{}' since no status is available: {}", connectorName, request); return; } RestartPlan plan = maybePlan.get(); log.info("Executing {}", plan); // If requested, stop the connector and any tasks, marking each as restarting final ExtendedAssignment currentAssignments = assignment; final Collection<ConnectorTaskId> assignedIdsToRestart = plan.taskIdsToRestart() .stream() .filter(taskId -> currentAssignments.tasks().contains(taskId)) .collect(Collectors.toList()); final boolean restartConnector = plan.shouldRestartConnector() && currentAssignments.connectors().contains(connectorName); final boolean restartTasks = !assignedIdsToRestart.isEmpty(); if (restartConnector) { String stageDescription = "stopping to-be-restarted connector " + connectorName; try (TickThreadStage stage = new TickThreadStage(stageDescription)) { worker.stopAndAwaitConnector(connectorName); } onRestart(connectorName); } if (restartTasks) { String stageDescription = "stopping " + assignedIdsToRestart.size() + " to-be-restarted tasks for connector " + connectorName; // Stop the tasks and mark as restarting try (TickThreadStage stage = new TickThreadStage(stageDescription)) { worker.stopAndAwaitTasks(assignedIdsToRestart); } assignedIdsToRestart.forEach(this::onRestart); } // Now restart the connector and tasks if (restartConnector) { try { startConnector(connectorName, (error, targetState) -> { if (error == null) { log.info("Connector '{}' restart successful", connectorName); } else { log.error("Connector '{}' restart failed", connectorName, error); } }); } catch (Throwable t) { log.error("Connector '{}' restart failed", connectorName, t); } } if (restartTasks) { log.debug("Restarting {} of {} tasks for {}", assignedIdsToRestart.size(), plan.totalTaskCount(), request); assignedIdsToRestart.forEach(taskId -> { try { if (startTask(taskId)) { log.info("Task '{}' restart successful", taskId); } else { log.error("Task '{}' restart failed", taskId); } } catch (Throwable t) { log.error("Task '{}' restart failed", taskId, t); } }); log.debug("Restarted {} of {} tasks for {} as requested", assignedIdsToRestart.size(), plan.totalTaskCount(), request); } log.info("Completed {}", plan); }
@Test public void testDoRestartConnectorAndTasksNoAssignments() { ConnectorTaskId taskId = new ConnectorTaskId(CONN1, 0); RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = ExtendedAssignment.empty(); herder.doRestartConnectorAndTasks(restartRequest); verifyNoMoreInteractions(restartPlan, worker, member, configBackingStore, statusBackingStore); }
@Override public Map<String, Boolean> getUserUuidToManaged(DbSession dbSession, Set<String> userUuids) { return findManagedInstanceService() .map(managedInstanceService -> managedInstanceService.getUserUuidToManaged(dbSession, userUuids)) .orElse(returnNonManagedForAll(userUuids)); }
@Test public void getUserUuidToManaged_delegatesToRightService_andPropagateAnswer() { Set<String> userUuids = Set.of("a", "b"); Map<String, Boolean> serviceResponse = Map.of("a", false, "b", true); ManagedInstanceService anotherManagedInstanceService = getManagedInstanceService(userUuids, serviceResponse); DelegatingManagedServices managedInstanceService = new DelegatingManagedServices(Set.of(new NeverManagedInstanceService(), anotherManagedInstanceService)); Map<String, Boolean> userUuidToManaged = managedInstanceService.getUserUuidToManaged(dbSession, userUuids); assertThat(userUuidToManaged).containsExactlyInAnyOrderEntriesOf(serviceResponse); }
@Override public Object[] toArray() { return map.values().toArray(); }
@Test public void testToArray() { ExtendedSet<TestValue> set = new ExtendedSet<>(Maps.newConcurrentMap()); TestValue val = new TestValue("foo", 1); assertTrue(set.add(val)); TestValue nextval = new TestValue("goo", 2); assertTrue(set.add(nextval)); Object[] array = set.toArray(); TestValue[] valarray = {val, nextval}; assertArrayEquals(valarray, array); assertTrue(set.toArray(new TestValue[0])[0] instanceof TestValue); }
@Override public void login(String loginId) { }
@Test public void testLogin() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.login("abcde", new JSONObject()); Assert.assertNull(mSensorsAPI.getLoginId()); }
@Override public SelBoolean binaryOps(SelOp op, SelType rhs) { if (rhs.type() == SelTypes.NULL && (op == SelOp.EQUAL || op == SelOp.NOT_EQUAL)) { return (SelBoolean) rhs.binaryOps(op, this); } SelTypeUtil.checkTypeMatch(this.type(), rhs.type()); boolean another = ((SelBoolean) rhs).booleanVal(); switch (op) { case AND: return SelBoolean.of(val && another); case OR: return SelBoolean.of(val || another); case EQUAL: return SelBoolean.of(val == another); case NOT_EQUAL: return SelBoolean.of(val != another); case NOT: return SelBoolean.of(!val); default: throw new UnsupportedOperationException( this.type() + " DO NOT support expression operation " + op); } }
@Test(expected = IllegalArgumentException.class) public void testInvalidBinaryOpRhs() { one.binaryOps(SelOp.ADD, SelType.NULL); }
public String getRoleConfigurationView(String pluginId) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_GET_ROLE_CONFIG_VIEW, new DefaultPluginInteractionCallback<>() { @Override public String onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return getMessageConverter(resolvedExtensionVersion).getRoleConfigurationViewFromResponseBody(responseBody); } }); }
@Test void shouldTalkToPlugin_To_GetRoleConfigurationView() { String responseBody = "{ \"template\": \"<div>This is view snippet</div>\" }"; when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody)); String pluginConfigurationView = authorizationExtension.getRoleConfigurationView(PLUGIN_ID); assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_GET_ROLE_CONFIG_VIEW, null); assertThat(pluginConfigurationView).isEqualTo("<div>This is view snippet</div>"); }
@Override public void syncHoodieTable() { switch (bqSyncClient.getTableType()) { case COPY_ON_WRITE: case MERGE_ON_READ: syncTable(bqSyncClient); break; default: throw new UnsupportedOperationException(bqSyncClient.getTableType() + " table type is not supported yet."); } }
@Test void useBQManifestFile_existingPartitionedTable() { properties.setProperty(BigQuerySyncConfig.BIGQUERY_SYNC_USE_BQ_MANIFEST_FILE.key(), "true"); String prefix = "file:///local/prefix"; properties.setProperty(BigQuerySyncConfig.BIGQUERY_SYNC_SOURCE_URI_PREFIX.key(), prefix); properties.setProperty(BigQuerySyncConfig.BIGQUERY_SYNC_PARTITION_FIELDS.key(), "datestr,type"); when(mockBqSyncClient.getTableType()).thenReturn(HoodieTableType.COPY_ON_WRITE); when(mockBqSyncClient.getBasePath()).thenReturn(TEST_TABLE_BASE_PATH); when(mockBqSyncClient.datasetExists()).thenReturn(true); when(mockBqSyncClient.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)).thenReturn(false); Path manifestPath = new Path("file:///local/path"); when(mockManifestFileWriter.getManifestSourceUri(true)).thenReturn(manifestPath.toUri().getPath()); List<String> partitionFields = Arrays.asList("datestr", "type"); when(mockBqSchemaResolver.getTableSchema(any(), eq(partitionFields))).thenReturn(schema); BigQuerySyncTool tool = new BigQuerySyncTool(properties, mockManifestFileWriter, mockBqSyncClient, mockMetaClient, mockBqSchemaResolver); tool.syncHoodieTable(); verify(mockBqSyncClient).updateTableSchema(TEST_TABLE, schema, partitionFields); verify(mockManifestFileWriter).writeManifestFile(true); }
public static int tryReadFully(final InputStream in, final byte[] buf) throws IOException { int totalRead = 0; while (totalRead != buf.length) { int read = in.read(buf, totalRead, buf.length - totalRead); if (read == -1) { break; } totalRead += read; } return totalRead; }
@Test void testTryReadFullyFromShorterStream() throws IOException { ByteArrayInputStream inputStream = new ByteArrayInputStream("t".getBytes(StandardCharsets.UTF_8)); byte[] out = new byte[4]; int read = IOUtils.tryReadFully(inputStream, out); assertThat(Arrays.copyOfRange(out, 0, read)) .containsExactly("t".getBytes(StandardCharsets.UTF_8)); }
@Nonnull @Override public Sketch<IntegerSummary> getResult() { return unionAll(); }
@Test public void testEmptyAccumulator() { TupleIntSketchAccumulator accumulator = new TupleIntSketchAccumulator(_setOps, _nominalEntries, 2); Assert.assertTrue(accumulator.isEmpty()); Assert.assertEquals(accumulator.getResult().getEstimate(), 0.0); }
public static BadRequestException invalidEnvFormat(String format) { return new BadRequestException("invalid env format:%s", format); }
@Test public void testInvalidEnvFormat() { BadRequestException invalidEnvFormat = BadRequestException.invalidEnvFormat("format"); assertEquals("invalid env format:format", invalidEnvFormat.getMessage()); }
@Override public boolean isDetected() { return "true".equals(system.envVariable("CI")) && "true".equals(system.envVariable("TRAVIS")); }
@Test public void isDetected() { setEnvVariable("CI", "true"); setEnvVariable("TRAVIS", "true"); assertThat(underTest.isDetected()).isTrue(); setEnvVariable("CI", "true"); setEnvVariable("DRONE", "true"); setEnvVariable("TRAVIS", null); assertThat(underTest.isDetected()).isFalse(); }
protected void setInternalEntryCurrentDirectory() { variables.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, variables.getVariable( repository != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY : filename != null ? Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY : Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ); }
@Test public void testSetInternalEntryCurrentDirectoryWithRepository( ) { TransMeta transMetaTest = new TransMeta( ); RepositoryDirectoryInterface path = mock( RepositoryDirectoryInterface.class ); when( path.getPath() ).thenReturn( "aPath" ); transMetaTest.setRepository( mock( Repository.class ) ); transMetaTest.setRepositoryDirectory( path ); transMetaTest.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, "Original value defined at run execution" ); transMetaTest.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, "file:///C:/SomeFilenameDirectory" ); transMetaTest.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_REPOSITORY_DIRECTORY, "/SomeRepDirectory" ); transMetaTest.setInternalEntryCurrentDirectory(); assertEquals( "/SomeRepDirectory", transMetaTest.getVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ); }
public static ResultFetcher fromResults( OperationHandle operationHandle, ResolvedSchema resultSchema, List<RowData> results) { return fromResults( operationHandle, resultSchema, results, null, ResultKind.SUCCESS_WITH_CONTENT); }
@Test void testFetchResultFromDummyStoreInParallel() throws Exception { checkFetchResultInParallel( ResultFetcher.fromResults(OperationHandle.create(), schema, data)); }
public FEELFnResult<List<Object>> invoke(@ParameterName("list") List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } // spec requires us to return a new list final List<Object> result = new ArrayList<>( list ); Collections.reverse( result ); return FEELFnResult.ofResult( result ); }
@Test void invokeEmptyList() { FunctionTestUtil.assertResultList(reverseFunction.invoke(Collections.emptyList()), Collections.emptyList()); }
public static String checksum(Client client) { String s = buildUniqueString(client); if (s == null) { return "0"; } return MD5Utils.md5Hex(s, Constants.ENCODE); }
@Test void performanceTestOfChecksum() { long start = System.nanoTime(); for (int i = 0; i < N; i++) { DistroUtils.checksum(client1); } System.out.printf("Distro Verify Checksum Performance: %.2f ivk/ns\n", ((double) System.nanoTime() - start) / N); }
public static byte[] swapInts(byte b[], int off, int len) { checkLength(len, 4); for (int i = off, n = off + len; i < n; i += 4) { swap(b, i, i+3); swap(b, i+1, i+2); } return b; }
@Test public void testSwapInts() { assertArrayEquals(FLOAT_PI_LE , ByteUtils.swapInts(FLOAT_PI_BE.clone(), 0, FLOAT_PI_BE.length)); }
@Override public boolean processLine(String line) throws IOException { BugPatternInstance pattern = new Gson().fromJson(line, BugPatternInstance.class); pattern.severity = severityRemapper.apply(pattern); result.add(pattern); // replace spaces in filename with underscores Path checkPath = Paths.get(pattern.name.replace(' ', '_') + ".md"); try (Writer writer = Files.newBufferedWriter(outputDir.resolve(checkPath), UTF_8)) { // load side-car explanation file, if it exists Path sidecarExplanation = explanationDir.resolve(checkPath); if (Files.exists(sidecarExplanation)) { if (!pattern.explanation.isEmpty()) { throw new AssertionError( String.format( "%s specifies an explanation via @BugPattern and side-car", pattern.name)); } pattern.explanation = new String(Files.readAllBytes(sidecarExplanation), UTF_8).trim(); } // Construct an appropriate page for this {@code BugPattern}. Include altNames if // there are any, and explain the correct way to suppress. ImmutableMap.Builder<String, Object> templateData = ImmutableMap.<String, Object>builder() .put("tags", Joiner.on(", ").join(pattern.tags)) .put("severity", pattern.severity) .put("name", pattern.name) .put("className", pattern.className) .put("summary", pattern.summary.trim()) .put("altNames", Joiner.on(", ").join(pattern.altNames)) .put("explanation", pattern.explanation.trim()); if (baseUrl != null) { templateData.put("baseUrl", baseUrl); } if (generateFrontMatter) { ImmutableMap<String, String> frontmatterData = ImmutableMap.<String, String>builder() .put("title", pattern.name) .put("summary", pattern.summary) .put("layout", "bugpattern") .put("tags", Joiner.on(", ").join(pattern.tags)) .put("severity", pattern.severity.toString()) .buildOrThrow(); DumperOptions options = new DumperOptions(); options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); Yaml yaml = new Yaml( new SafeConstructor(new LoaderOptions()), new Representer(new DumperOptions()), options); Writer yamlWriter = new StringWriter(); yamlWriter.write("---\n"); yaml.dump(frontmatterData, yamlWriter); yamlWriter.write("---\n"); templateData.put("frontmatter", yamlWriter.toString()); } if (pattern.documentSuppression) { String suppressionString; if (pattern.suppressionAnnotations.length == 0) { suppressionString = "This check may not be suppressed."; } else { suppressionString = pattern.suppressionAnnotations.length == 1 ? "Suppress false positives by adding the suppression annotation %s to the " + "enclosing element." : "Suppress false positives by adding one of these suppression annotations to " + "the enclosing element: %s"; suppressionString = String.format( suppressionString, Arrays.stream(pattern.suppressionAnnotations) .map((String anno) -> standardizeAnnotation(anno, pattern.name)) .collect(Collectors.joining(", "))); } templateData.put("suppression", suppressionString); } MustacheFactory mf = new DefaultMustacheFactory(); Mustache mustache = mf.compile("com/google/errorprone/resources/bugpattern.mustache"); mustache.execute(writer, templateData.buildOrThrow()); } return true; }
@Test public void escapeAngleBracketsInSummary() throws Exception { // Create a BugPattern with angle brackets in the summary BugPatternInstance instance = new BugPatternInstance(); instance.className = "com.google.errorprone.bugpatterns.DontDoThis"; instance.name = "DontDoThis"; instance.summary = "Don't do this; do List<Foo> instead"; instance.explanation = "This is a bad idea, you want `List<Foo>` instead"; instance.altNames = new String[0]; instance.tags = new String[] {"LikelyError"}; instance.severity = SeverityLevel.ERROR; instance.suppressionAnnotations = new String[] {"java.lang.SuppressWarnings.class"}; // Write markdown file BugPatternFileGenerator generator = new BugPatternFileGenerator( wikiDir, explanationDirBase, false, null, input -> input.severity); generator.processLine(new Gson().toJson(instance)); String expected = CharStreams.toString( new InputStreamReader( getClass().getResourceAsStream("testdata/DontDoThis_nofrontmatter_gfm.md"), UTF_8)); String actual = new String(Files.readAllBytes(wikiDir.resolve("DontDoThis.md")), UTF_8); assertThat(actual.trim()).isEqualTo(expected.trim()); }
public static StatementExecutorResponse execute( final ConfiguredStatement<Explain> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { return StatementExecutorResponse.handled(Optional .of(ExplainExecutor.explain( serviceContext, statement, executionContext, sessionProperties))); }
@Test public void shouldFailOnNonQueryExplain() { // When: final Exception e = assertThrows( KsqlException.class, () -> CustomExecutors.EXPLAIN.execute( engine.configure("Explain SHOW TOPICS;"), sessionProperties, engine.getEngine(), engine.getServiceContext() ) ); // Then: assertThat(e.getMessage(), containsString( "The provided statement does not run a ksql query")); }
@Override public boolean checkCredentials(String username, String password) { if (username == null || password == null) { return false; } Credentials credentials = new Credentials(username, password); if (validCredentialsCache.contains(credentials)) { return true; } else if (invalidCredentialsCache.contains(credentials)) { return false; } boolean isValid = this.username.equals(username) && this.passwordHash.equals( generatePasswordHash( algorithm, salt, iterations, keyLength, password)); if (isValid) { validCredentialsCache.add(credentials); } else { invalidCredentialsCache.add(credentials); } return isValid; }
@Test public void testPBKDF2WithHmacSHA512() throws Exception { String algorithm = "PBKDF2WithHmacSHA512"; int iterations = 1000; int keyLength = 128; String hash = "07:6F:E2:27:9B:CA:48:66:9B:13:9E:02:9C:AE:FC:E4:1A:2F:0F:E6:48:A3:FF:8E:D2:30:59:68:12:A6:29:34:FC:99:29:8A:98:65:AE:4B:05:7C:B6:83:A4:83:C0:32:E4:90:61:1D:DD:2E:53:17:01:FF:6A:64:48:B2:AA:22:DE:B3:BC:56:08:C6:66:EC:98:F8:96:8C:1B:DA:B2:F2:2A:6C:22:8E:19:CC:B2:62:55:3E:BE:DC:C7:58:36:9D:92:CF:D7:D2:A1:6D:8F:DC:DE:8E:E9:36:D4:E7:2D:0A:6D:A1:B8:56:0A:53:BB:17:E2:D5:DE:A0:48:51:FC:33"; PBKDF2Authenticator PBKDF2Authenticator = new PBKDF2Authenticator( "/", VALID_USERNAME, hash, algorithm, SALT, iterations, keyLength); for (String username : TEST_USERNAMES) { for (String password : TEST_PASSWORDS) { boolean expectedIsAuthenticated = VALID_USERNAME.equals(username) && VALID_PASSWORD.equals(password); boolean actualIsAuthenticated = PBKDF2Authenticator.checkCredentials(username, password); assertEquals(expectedIsAuthenticated, actualIsAuthenticated); } } }
static PiActionProfileGroup translate(Group group, PiPipeconf pipeconf, Device device) throws PiTranslationException { if (!SUPPORTED_GROUP_TYPES.contains(group.type())) { throw new PiTranslationException(format( "group type %s not supported", group.type())); } // Get action profile from group key. // TODO: define proper field in group class. if (!(group.appCookie() instanceof PiGroupKey)) { throw new PiTranslationException( "group app cookie is not PI (class should be PiGroupKey)"); } final PiGroupKey groupKey = (PiGroupKey) group.appCookie(); final PiActionProfileId actionProfileId = groupKey.actionProfileId(); // Check validity of action profile against pipeconf. final PiActionProfileModel actionProfileModel = pipeconf.pipelineModel() .actionProfiles(actionProfileId) .orElseThrow(() -> new PiTranslationException(format( "no such action profile '%s'", actionProfileId))); if (!actionProfileModel.hasSelector()) { throw new PiTranslationException(format( "action profile '%s' does not support dynamic selection", actionProfileId)); } // Check if the table associated with the action profile supports only // one-shot action profile programming. boolean isTableOneShot = actionProfileModel.tables().stream() .map(tableId -> pipeconf.pipelineModel().table(tableId)) .allMatch(piTableModel -> piTableModel.isPresent() && piTableModel.get().oneShotOnly()); if (isTableOneShot) { throw new PiTranslationException(format( "Table associated to action profile '%s' supports only one-shot action profile programming", actionProfileId)); } // Check group validity. if (actionProfileModel.maxGroupSize() > 0 && group.buckets().buckets().size() > actionProfileModel.maxGroupSize()) { throw new PiTranslationException(format( "too many buckets, max group size for action profile '%s' is %d", actionProfileId, actionProfileModel.maxGroupSize())); } // If not INDIRECT, we set the maximum group size as specified in the // model, however this might be highly inefficient for some HW targets // which pre-allocate resources for the whole group. final int maxGroupSize = group.type() == GroupDescription.Type.INDIRECT ? 1 : actionProfileModel.maxGroupSize(); final PiActionProfileGroup.Builder piActionGroupBuilder = PiActionProfileGroup.builder() .withId(PiActionProfileGroupId.of(group.id().id())) .withActionProfileId(groupKey.actionProfileId()) .withMaxSize(maxGroupSize); // Translate group buckets to PI group members final PiPipelineInterpreter interpreter = getInterpreterOrNull(device, pipeconf); short bucketIdx = 0; for (GroupBucket bucket : group.buckets().buckets()) { /* FIXME: the way member IDs are computed can cause collisions! Problem: In P4Runtime action profile members, i.e. action buckets, are associated to a numeric ID chosen at member insertion time. This ID must be unique for the whole action profile (i.e. the group table in OpenFlow). In ONOS, GroupBucket doesn't specify any ID. Solutions: - Change GroupBucket API to force application wanting to perform group operations to specify a member id. - Maintain state to dynamically allocate/deallocate member IDs, e.g. in a dedicated service, or in a P4Runtime Group Provider. Hack: Statically derive member ID by combining groupId and position of the bucket in the list. */ final int memberId = Objects.hash(group.id(), bucketIdx); if (memberId == 0) { throw new PiTranslationException( "GroupBucket produces PiActionProfileMember " + "with invalid ID 0"); } bucketIdx++; final PiTableAction tableAction = translateTreatment( bucket.treatment(), interpreter, groupKey.tableId(), pipeconf.pipelineModel()); if (tableAction == null) { throw new PiTranslationException( "bucket treatment translator returned null"); } if (tableAction.type() != ACTION) { throw new PiTranslationException(format( "action of type '%s' cannot be used in action profile members", tableAction.type())); } final PiActionProfileMember member = PiActionProfileMember.builder() .forActionProfile(groupKey.actionProfileId()) .withId(PiActionProfileMemberId.of(memberId)) .withAction((PiAction) tableAction) .build(); // NOTE Indirect groups have weight set to -1 which is not supported // by P4RT - setting to 1 to avoid problems with the p4rt server. final int weight = group.type() == GroupDescription.Type.INDIRECT ? 1 : bucket.weight(); piActionGroupBuilder.addMember(member, weight); } return piActionGroupBuilder.build(); }
@Test public void testTranslateGroups() throws Exception { PiActionProfileGroup piGroup1 = PiGroupTranslatorImpl.translate(SELECT_GROUP, pipeconf, null); PiActionProfileGroup piGroup2 = PiGroupTranslatorImpl.translate(SELECT_GROUP, pipeconf, null); new EqualsTester() .addEqualityGroup(piGroup1, piGroup2) .testEquals(); assertThat("Group ID must be equal", piGroup1.id().id(), is(equalTo(GROUP_ID.id()))); assertThat("Action profile ID must be equal", piGroup1.actionProfile(), is(equalTo(INGRESS_WCMP_CONTROL_WCMP_SELECTOR))); // members installed Collection<PiActionProfileGroup.WeightedMember> weightedMembers = piGroup1.members(); Collection<PiActionProfileMember> memberInstances = weightedMembers.stream() .map(PiActionProfileGroup.WeightedMember::instance) .filter(Objects::nonNull) .collect(Collectors.toSet()); assertThat("The number of group members must be equal", piGroup1.members().size(), is(expectedWeightedMembers.size())); assertThat("Group weighted members must be equal", weightedMembers.containsAll(expectedWeightedMembers) && expectedWeightedMembers.containsAll(weightedMembers)); assertThat("Group member instances must be equal", memberInstances.containsAll(expectedMemberInstances) && expectedMemberInstances.containsAll(memberInstances)); }
@Override public Optional<ReadError> read(DbFileSources.Line.Builder lineBuilder) { ScannerReport.LineCoverage reportCoverage = getNextLineCoverageIfMatchLine(lineBuilder.getLine()); if (reportCoverage != null) { processCoverage(lineBuilder, reportCoverage); coverage = null; } return Optional.empty(); }
@Test public void set_coverage_on_uncovered_lines() { CoverageLineReader computeCoverageLine = new CoverageLineReader(newArrayList(ScannerReport.LineCoverage.newBuilder() .setLine(1) .setHits(false) .build()).iterator()); DbFileSources.Line.Builder lineBuilder = DbFileSources.Data.newBuilder().addLinesBuilder().setLine(1); assertThat(computeCoverageLine.read(lineBuilder)).isEmpty(); assertThat(lineBuilder.hasLineHits()).isTrue(); assertThat(lineBuilder.getLineHits()).isZero(); }
@Override public double get(int i, int j) { return A[index(i, j)]; }
@Test public void testGet() { System.out.println("get"); assertEquals(0.9, matrix.get(0, 0), 1E-7); assertEquals(0.8, matrix.get(2, 2), 1E-7); assertEquals(0.5, matrix.get(1, 1), 1E-7); assertEquals(0.0, matrix.get(2, 0), 1E-7); assertEquals(0.0, matrix.get(0, 2), 1E-7); assertEquals(0.4, matrix.get(0, 1), 1E-7); }
@Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, TimeLimiter timeLimiter, String methodName) throws Throwable { Object returnValue = proceedingJoinPoint.proceed(); if (Flux.class.isAssignableFrom(returnValue.getClass())) { Flux<?> fluxReturnValue = (Flux<?>) returnValue; return fluxReturnValue.transformDeferred(TimeLimiterOperator.of(timeLimiter)); } else if (Mono.class.isAssignableFrom(returnValue.getClass())) { Mono<?> monoReturnValue = (Mono<?>) returnValue; return monoReturnValue.transformDeferred(TimeLimiterOperator.of(timeLimiter)); } else { throw new IllegalReturnTypeException(returnValue.getClass(), methodName, "Reactor expects Mono/Flux."); } }
@Test public void testReactorTypes() throws Throwable { TimeLimiter timeLimiter = TimeLimiter.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn(Mono.just("Test")); assertThat(reactorTimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull(); when(proceedingJoinPoint.proceed()).thenReturn(Flux.just("Test")); assertThat(reactorTimeLimiterAspectExt.handle(proceedingJoinPoint, timeLimiter, "testMethod")).isNotNull(); }
public static String convertToString(Object parsedValue, Type type) { if (parsedValue == null) { return null; } if (type == null) { return parsedValue.toString(); } switch (type) { case BOOLEAN: case SHORT: case INT: case LONG: case DOUBLE: case STRING: case PASSWORD: return parsedValue.toString(); case LIST: List<?> valueList = (List<?>) parsedValue; return valueList.stream().map(Object::toString).collect(Collectors.joining(",")); case CLASS: Class<?> clazz = (Class<?>) parsedValue; return clazz.getName(); default: throw new IllegalStateException("Unknown type."); } }
@Test public void testConvertValueToStringInt() { assertEquals("2147483647", ConfigDef.convertToString(Integer.MAX_VALUE, Type.INT)); assertNull(ConfigDef.convertToString(null, Type.INT)); }
@Override public void fail() { // ignore fail if the buffer already in a terminal state. if (state.setIf(FAILED, oldState -> !oldState.isTerminal())) { memoryManager.setNoBlockOnFull(); forceFreeMemory(); // DO NOT destroy buffers or set no more pages. The coordinator manages the teardown of failed queries. } }
@Test public void testInvalidConstructorArg() { try { createPartitionedBuffer(createInitialEmptyOutputBuffers(PARTITIONED).withBuffer(FIRST, 0).withNoMoreBufferIds(), new DataSize(0, BYTE)); fail("Expected IllegalStateException"); } catch (IllegalArgumentException ignored) { } try { createPartitionedBuffer(createInitialEmptyOutputBuffers(PARTITIONED), new DataSize(0, BYTE)); fail("Expected IllegalStateException"); } catch (IllegalArgumentException ignored) { } }
public static boolean hasEmpty(CharSequence... strList) { if (ArrayUtil.isEmpty(strList)) { return true; } for (CharSequence str : strList) { if (isEmpty(str)) { return true; } } return false; }
@Test public void hasEmpty() { String strings = ""; Assert.assertTrue(StringUtil.hasEmpty(strings)); }
public AnalyticsData getAnalytics(String pluginId, String type, String metricId, Map params) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_GET_ANALYTICS, new DefaultPluginInteractionCallback<>() { @Override public String requestBody(String resolvedExtensionVersion) { return getMessageConverter(resolvedExtensionVersion).getAnalyticsRequestBody(type, metricId, params); } @Override public AnalyticsData onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { AnalyticsData analyticsData = getMessageConverter(resolvedExtensionVersion).getAnalyticsFromResponseBody(responseBody); analyticsData.setAssetRoot(getCurrentStaticAssetsPath(pluginId)); return analyticsData; } }); }
@Test public void shouldGetAnalytics() throws Exception { String responseBody = "{ \"view_path\": \"path/to/view\", \"data\": \"{}\" }"; AnalyticsPluginInfo pluginInfo = new AnalyticsPluginInfo(GoPluginDescriptor.builder().id(PLUGIN_ID).build(), null, null, null); pluginInfo.setStaticAssetsPath("/assets/root"); metadataStore.setPluginInfo(pluginInfo); when(pluginManager.submitTo(eq(PLUGIN_ID), eq(ANALYTICS_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody)); AnalyticsData pipelineAnalytics = analyticsExtension.getAnalytics(PLUGIN_ID, "pipeline", "pipeline_with_highest_wait_time", Map.of("pipeline_name", "test_pipeline")); String expectedRequestBody = "{" + "\"type\": \"pipeline\"," + "\"id\": \"pipeline_with_highest_wait_time\"," + " \"params\": {\"pipeline_name\": \"test_pipeline\"}}"; assertRequest(requestArgumentCaptor.getValue(), PluginConstants.ANALYTICS_EXTENSION, "1.0", REQUEST_GET_ANALYTICS, expectedRequestBody); assertThat(pipelineAnalytics.getData(), is("{}")); assertThat(pipelineAnalytics.getViewPath(), is("path/to/view")); assertThat(pipelineAnalytics.getFullViewPath(), is("/assets/root/path/to/view")); }
public static int interpolationSearch(int[] a, int fromIndex, int toIndex, int key) { int low = fromIndex; int lowVal = a[low]; if (key - lowVal < LINEAR_SEARCH_THRESHOLD_2) { return linearSearch(a, low, toIndex, key); } int high = toIndex - 1; int diff = high - low; if (diff <= BINARY_SEARCH_THRESHOLD) { return binarySearch(a, low, toIndex, key); } int highVal = a[high]; do { if (key == lowVal) { return low + 1; } if (key >= highVal) { return high + 1; } int mean = (int) (diff * (long) (key - lowVal) / (highVal - lowVal)); int eps = diff >>> 4; int lowMid = low + Math.max(0, mean - eps); int highMid = low + Math.min(diff, mean + eps); assert lowMid <= highMid; assert lowMid >= low; assert highMid <= high; if (a[lowMid] > key) { high = lowMid; highVal = a[lowMid]; } else if (a[highMid] <= key) { low = highMid; lowVal = a[highMid]; } else { low = lowMid; lowVal = a[lowMid]; high = highMid; highVal = a[highMid]; } assert low <= high; diff = high - low; } while (diff >= BINARY_SEARCH_THRESHOLD); return binarySearch(a, low, high + 1, key); }
@Test void require_that_search_find_index_of_first_element_higher() { int[] values = {2, 8, 4000, 4001, 4100, 10000, 10000000}; int length = values.length; assertEquals(0, PostingListSearch.interpolationSearch(values, 0, length, 1)); for (int value = 3; value < 8; value++) { assertEquals(1, PostingListSearch.interpolationSearch(values, 0, length, value)); } assertEquals(2, PostingListSearch.interpolationSearch(values, 0, length, 8)); assertEquals(values.length, PostingListSearch.interpolationSearch(values, 0, length, 10000000)); assertEquals(values.length, PostingListSearch.interpolationSearch(values, 0, length, 10000001)); }
public List<DataRecord> merge(final List<DataRecord> dataRecords) { Map<DataRecord.Key, DataRecord> result = new HashMap<>(); dataRecords.forEach(each -> { if (PipelineSQLOperationType.INSERT == each.getType()) { mergeInsert(each, result); } else if (PipelineSQLOperationType.UPDATE == each.getType()) { mergeUpdate(each, result); } else if (PipelineSQLOperationType.DELETE == each.getType()) { mergeDelete(each, result); } }); return new ArrayList<>(result.values()); }
@Test void assertDeleteBeforeInsert() { DataRecord beforeDataRecord = mockDeleteDataRecord(1, 10, 50); DataRecord afterDataRecord = mockInsertDataRecord(1, 10, 100); Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord)); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), sameInstance(afterDataRecord)); }
@Override public PipelineResult run(Pipeline pipeline) { LOG.info( "running Pipeline using {}: defaultEnvironmentType: {}, jobEndpoint: {}", PortableRunner.class.getName(), prismPipelineOptions.getDefaultEnvironmentType(), prismPipelineOptions.getJobEndpoint()); return internal.run(pipeline); }
@Test public void givenBoundedSource_runsUntilDone() { Pipeline pipeline = Pipeline.create(options()); pipeline.apply(Create.of(1, 2, 3)); PipelineResult.State state = pipeline.run().waitUntilFinish(); assertThat(state).isEqualTo(PipelineResult.State.DONE); }
public static ApplicationConfigCache getInstance() { return ApplicationConfigCacheInstance.INSTANCE; }
@Test public void getInstance() { assertNotNull(this.applicationConfigCache); }
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) { if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) { throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\""); } LoggerContext rootContext = getRootContext(); logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value)); logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value)); Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey()); boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE; logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled)); return rootContext; }
@Test public void apply_sets_logger_to_process_property_if_set() { LogLevelConfig config = newLogLevelConfig().rootLevelFor(WEB_SERVER).build(); props.set("sonar.log.level.web", "DEBUG"); LoggerContext context = underTest.apply(config, props); assertThat(context.getLogger(ROOT_LOGGER_NAME).getLevel()).isEqualTo(Level.DEBUG); }
@Override public void removeTimer(String eventName) { }
@Test public void removeTimer() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.trackTimer("TestTimerEvent", TimeUnit.SECONDS); mSensorsAPI.removeTimer("TestTimerEvent"); mSensorsAPI.trackTimerEnd("TestTimerEvent"); }
public static Map<String, Object> parseQuery(String uri) throws URISyntaxException { return parseQuery(uri, false); }
@Test public void testParseQuery() throws Exception { Map<String, Object> map = URISupport.parseQuery("password=secret&serviceName=somechat"); assertEquals(2, map.size()); assertEquals("secret", map.get("password")); assertEquals("somechat", map.get("serviceName")); map = URISupport.parseQuery("password=RAW(++?w0rd)&serviceName=somechat"); assertEquals(2, map.size()); assertEquals("RAW(++?w0rd)", map.get("password")); assertEquals("somechat", map.get("serviceName")); map = URISupport.parseQuery("password=RAW(++?)w&rd)&serviceName=somechat"); assertEquals(2, map.size()); assertEquals("RAW(++?)w&rd)", map.get("password")); assertEquals("somechat", map.get("serviceName")); map = URISupport.parseQuery("password=RAW(%2520w&rd)&serviceName=somechat"); assertEquals(2, map.size()); assertEquals("RAW(%2520w&rd)", map.get("password")); assertEquals("somechat", map.get("serviceName")); }
@Override public void serialize(Asn1OutputStream out, byte[] obj) { out.write(obj); }
@Test public void shouldSerialize() { assertArrayEquals( new byte[] { 0x31 }, serialize(new ByteArrayConverter(), byte[].class, new byte[] { 0x31 }) ); }
public boolean isInstalled() { return mTrigger != null; }
@Test public void testImeNotInstalledWhenNoVoice() { addInputMethodInfo(List.of("keyboard", "keyboard", "handwriting")); addInputMethodInfo(List.of("handwriting")); addInputMethodInfo(List.of("handwriting", "keyboard", "keyboard", "keyboard")); Assert.assertFalse(ImeTrigger.isInstalled(mMockInputMethodService)); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@Test void testFunctionInputInOutputMultipleTimes2() { RichMapFunction<Tuple2<Float, Float>, ?> function = new FieldDuplicator<Tuple2<Float, Float>>(); TypeInformation<?> ti = TypeExtractor.getMapReturnTypes( function, new TupleTypeInfo<Tuple2<Float, Float>>( BasicTypeInfo.FLOAT_TYPE_INFO, BasicTypeInfo.FLOAT_TYPE_INFO)); // should be // Tuple2<Tuple2<Float, Float>, Tuple2<Float, Float>> assertThat(ti.isTupleType()).isTrue(); assertThat(ti.getArity()).isEqualTo(2); TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti; // 2nd nested level assertThat(tti.getTypeAt(0).isTupleType()).isTrue(); TupleTypeInfo<?> tti2 = (TupleTypeInfo<?>) tti.getTypeAt(0); assertThat(tti2.getTypeAt(0)).isEqualTo(BasicTypeInfo.FLOAT_TYPE_INFO); assertThat(tti2.getTypeAt(1)).isEqualTo(BasicTypeInfo.FLOAT_TYPE_INFO); assertThat(tti.getTypeAt(0).isTupleType()).isTrue(); TupleTypeInfo<?> tti3 = (TupleTypeInfo<?>) tti.getTypeAt(1); assertThat(tti3.getTypeAt(0)).isEqualTo(BasicTypeInfo.FLOAT_TYPE_INFO); assertThat(tti3.getTypeAt(1)).isEqualTo(BasicTypeInfo.FLOAT_TYPE_INFO); }
@Override public List<PrivilegedOperation> bootstrap(Configuration configuration) throws ResourceHandlerException { conf = configuration; //We'll do this inline for the time being - since this is a one time //operation. At some point, LCE code can be refactored to batch mount //operations across multiple controllers - cpu, net_cls, blkio etc cGroupsHandler .initializeCGroupController(CGroupsHandler.CGroupController.NET_CLS); device = conf.get(YarnConfiguration.NM_NETWORK_RESOURCE_INTERFACE, YarnConfiguration.DEFAULT_NM_NETWORK_RESOURCE_INTERFACE); strictMode = configuration.getBoolean(YarnConfiguration .NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE, YarnConfiguration .DEFAULT_NM_LINUX_CONTAINER_CGROUPS_STRICT_RESOURCE_USAGE); rootBandwidthMbit = conf.getInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT, YarnConfiguration .DEFAULT_NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_MBIT); yarnBandwidthMbit = conf.getInt(YarnConfiguration .NM_NETWORK_RESOURCE_OUTBOUND_BANDWIDTH_YARN_MBIT, rootBandwidthMbit); containerBandwidthMbit = (int) Math.ceil((double) yarnBandwidthMbit / MAX_CONTAINER_COUNT); StringBuilder logLine = new StringBuilder("strict mode is set to :") .append(strictMode).append(System.lineSeparator()); if (strictMode) { logLine.append("container bandwidth will be capped to soft limit.") .append(System.lineSeparator()); } else { logLine.append( "containers will be allowed to use spare YARN bandwidth.") .append(System.lineSeparator()); } logLine .append("containerBandwidthMbit soft limit (in mbit/sec) is set to : ") .append(containerBandwidthMbit); LOG.info(logLine.toString()); trafficController.bootstrap(device, rootBandwidthMbit, yarnBandwidthMbit); return null; }
@Test public void testLifeCycle() { TrafficController trafficControllerSpy = spy(new TrafficController(conf, privilegedOperationExecutorMock)); TrafficControlBandwidthHandlerImpl handlerImpl = new TrafficControlBandwidthHandlerImpl(privilegedOperationExecutorMock, cGroupsHandlerMock, trafficControllerSpy); try { handlerImpl.bootstrap(conf); testPreStart(trafficControllerSpy, handlerImpl); testPostComplete(trafficControllerSpy, handlerImpl); } catch (ResourceHandlerException e) { LOG.error("Unexpected exception: " + e); Assert.fail("Caught unexpected ResourceHandlerException!"); } }
@Override protected SchemaTransform from(BigQueryExportReadSchemaTransformConfiguration configuration) { return new BigQueryExportSchemaTransform(configuration); }
@Test public void testQuery() { // Previous attempts using FakeBigQueryServices with a Read configuration using a query failed. // For now, we test using DisplayData and the toTypedRead method. List<Pair<BigQueryExportReadSchemaTransformConfiguration.Builder, TypedRead<TableRow>>> cases = Arrays.asList( Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder().setQuery(QUERY), BigQueryIO.readTableRowsWithSchema().fromQuery(QUERY)), Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder() .setQuery(QUERY) .setQueryLocation(LOCATION), BigQueryIO.readTableRowsWithSchema().fromQuery(QUERY).withQueryLocation(LOCATION)), Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder() .setQuery(QUERY) .setUseStandardSql(true), BigQueryIO.readTableRowsWithSchema().fromQuery(QUERY).usingStandardSql()), Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder() .setQuery(QUERY) .setUseStandardSql(false), BigQueryIO.readTableRowsWithSchema().fromQuery(QUERY))); for (Pair<BigQueryExportReadSchemaTransformConfiguration.Builder, TypedRead<TableRow>> caze : cases) { Map<Identifier, Item> want = DisplayData.from(caze.getRight()).asMap(); BigQueryExportReadSchemaTransformProvider provider = new BigQueryExportReadSchemaTransformProvider(); BigQueryExportReadSchemaTransformConfiguration configuration = caze.getLeft().build(); BigQueryExportSchemaTransform schemaTransform = (BigQueryExportSchemaTransform) provider.from(configuration); Map<Identifier, Item> got = DisplayData.from(schemaTransform.toTypedRead()).asMap(); assertEquals(want, got); } }
public Num getPricePerAsset() { return pricePerAsset; }
@Test public void testReturnBarSeriesCloseOnNaN() { MockBarSeries series = new MockBarSeries(DoubleNum::valueOf, 100, 95, 100, 80, 85, 130); Trade trade = new Trade(1, TradeType.BUY, NaN); assertNumEquals(DoubleNum.valueOf(95), trade.getPricePerAsset(series)); }
public static boolean isMap(String className) { return Map.class.getCanonicalName().equals(className) || HashMap.class.getCanonicalName().equals(className) || LinkedHashMap.class.getCanonicalName().equals(className) || TreeMap.class.getCanonicalName().equals(className); }
@Test public void isMap() { assertThat(mapValues).allMatch(ScenarioSimulationSharedUtils::isMap); }
@Udf public List<String> items(@UdfParameter final String jsonItems) { if (jsonItems == null) { return null; } final List<JsonNode> objectList = UdfJsonMapper.readAsJsonArray(jsonItems); final List<String> res = new ArrayList<>(); objectList.forEach(jsonObject -> { res.add(jsonObject.toString()); }); return res; }
@Test public void shouldReturnNullForNull() { assertNull(udf.items(null)); }
@Override public void printContent(StringBuilder sb) { if (failedParsing || json == null) { super.printContent(sb); } else { sb.append("JSON: "); sb.append(json.toPrettyString()); } }
@Test void testNonJsonOutput() { JsonHttpResult result = new JsonHttpResult(); result.setContent("Foo"); StringBuilder sb = new StringBuilder(); result.printContent(sb); assertEquals("Foo", sb.toString()); }
public String getRemote() { return remote; }
@Issue("JENKINS-13649") @Test public void multiSegmentRelativePaths() { VirtualChannel d = Mockito.mock(VirtualChannel.class); FilePath winPath = new FilePath(d, "c:\\app\\jenkins\\workspace"); FilePath nixPath = new FilePath(d, "/opt/jenkins/workspace"); assertEquals("c:\\app\\jenkins\\workspace\\foo\\bar\\manchu", new FilePath(winPath, "foo/bar/manchu").getRemote()); assertEquals("c:\\app\\jenkins\\workspace\\foo\\bar\\manchu", new FilePath(winPath, "foo\\bar/manchu").getRemote()); assertEquals("c:\\app\\jenkins\\workspace\\foo\\bar\\manchu", new FilePath(winPath, "foo\\bar\\manchu").getRemote()); assertEquals("/opt/jenkins/workspace/foo/bar/manchu", new FilePath(nixPath, "foo\\bar\\manchu").getRemote()); assertEquals("/opt/jenkins/workspace/foo/bar/manchu", new FilePath(nixPath, "foo/bar\\manchu").getRemote()); assertEquals("/opt/jenkins/workspace/foo/bar/manchu", new FilePath(nixPath, "foo/bar/manchu").getRemote()); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void timeWindowZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() { // override the default store into in-memory final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY)); builder.stream("input-topic") .groupByKey() .windowedBy(TimeWindows.of(ofMillis(1))) .count(); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topology: my-topology:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000002\n" + " Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false)); }
@Override public void validate(final SingleRule rule, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database) { DropSchemaStatement dropSchemaStatement = (DropSchemaStatement) sqlStatementContext.getSqlStatement(); boolean containsCascade = dropSchemaStatement.isContainsCascade(); for (IdentifierValue each : dropSchemaStatement.getSchemaNames()) { String schemaName = each.getValue(); ShardingSphereSchema schema = database.getSchema(schemaName); ShardingSpherePreconditions.checkNotNull(schema, () -> new SchemaNotFoundException(schemaName)); ShardingSpherePreconditions.checkState(containsCascade || schema.getAllTableNames().isEmpty(), () -> new DropNotEmptySchemaException(schemaName)); } }
@Test void assertValidateWithoutCascadeSchema() { assertThrows(DropNotEmptySchemaException.class, () -> new SingleDropSchemaMetaDataValidator().validate(mock(SingleRule.class, RETURNS_DEEP_STUBS), createSQLStatementContext("foo_schema", false), mockDatabase())); }
public Response downloadDumpFile(String topologyId, String hostPort, String fileName, String user) throws IOException { String[] hostPortSplit = hostPort.split(":"); String host = hostPortSplit[0]; String portStr = hostPortSplit[1]; Path rawFile = logRoot.resolve(topologyId).resolve(portStr).resolve(fileName); Path absFile = rawFile.toAbsolutePath().normalize(); if (!absFile.startsWith(logRoot) || !rawFile.normalize().toString().equals(rawFile.toString())) { //Ensure filename doesn't contain ../ parts return LogviewerResponseBuilder.buildResponsePageNotFound(); } if (absFile.toFile().exists()) { String workerFileRelativePath = String.join(File.separator, topologyId, portStr, WORKER_LOG_FILENAME); if (resourceAuthorizer.isUserAllowedToAccessFile(user, workerFileRelativePath)) { String downloadedFileName = host + "-" + topologyId + "-" + portStr + "-" + absFile.getFileName(); return LogviewerResponseBuilder.buildDownloadFile(downloadedFileName, absFile.toFile(), numFileDownloadExceptions); } else { return LogviewerResponseBuilder.buildResponseUnauthorizedUser(user); } } else { return LogviewerResponseBuilder.buildResponsePageNotFound(); } }
@Test public void testDownloadDumpFileTraversalInTopoId() throws IOException { try (TmpPath rootPath = new TmpPath()) { LogviewerProfileHandler handler = createHandlerTraversalTests(rootPath.getFile().toPath()); Response topoAResponse = handler.downloadDumpFile("../../", "localhost:logs", "daemon-dump.bin", "user"); Utils.forceDelete(rootPath.toString()); assertThat(topoAResponse.getStatus(), is(Response.Status.NOT_FOUND.getStatusCode())); } }
public Collection<InstancePublishInfo> getAllInstancePublishInfo() { return publishers.values(); }
@Test void testGetAllInstancePublishInfo() { ipPortBasedClient.addServiceInstance(service, instancePublishInfo); Collection<InstancePublishInfo> allInstancePublishInfo = ipPortBasedClient.getAllInstancePublishInfo(); assertEquals(1, allInstancePublishInfo.size()); assertEquals(allInstancePublishInfo.iterator().next(), instancePublishInfo); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { try { final FilesApi files = new FilesApi(session.getClient()); status.setResponse(new StoregateAttributesFinderFeature(session, fileid).toAttributes(files.filesUpdateFile(fileid.getFileId(file), new UpdateFilePropertiesRequest() .created(null != status.getCreated() ? new DateTime(status.getCreated()) : null) .modified(null != status.getModified() ? new DateTime(status.getModified()) : null) ) ) ); } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map("Failure to write attributes of {0}", e, file); } }
@Test public void testSetTimestamp() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir(new Path( String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path file = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new StoregateTouchFeature(session, nodeid).touch(file, new TransferStatus().withMime("x-application/cyberduck")); assertNotNull(new StoregateAttributesFinderFeature(session, nodeid).find(file)); final long created = 1695161463630L; final long modified = Instant.now().minusSeconds(5 * 24 * 60 * 60).getEpochSecond() * 1000; final TransferStatus status = new TransferStatus() .withCreated(created) .withModified(modified); new StoregateTimestampFeature(session, nodeid).setTimestamp(file, status); final PathAttributes attr = new StoregateAttributesFinderFeature(session, nodeid).find(file); assertEquals(created, attr.getCreationDate()); assertEquals(modified, attr.getModificationDate()); assertEquals(created, new DefaultAttributesFinderFeature(session).find(file).getCreationDate()); assertEquals(modified, new DefaultAttributesFinderFeature(session).find(file).getModificationDate()); assertEquals(attr, status.getResponse()); new StoregateDeleteFeature(session, nodeid).delete(Arrays.asList(file, room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public HttpHeaders set(HttpHeaders headers) { if (headers instanceof DefaultHttpHeaders) { this.headers.set(((DefaultHttpHeaders) headers).headers); return this; } else { return super.set(headers); } }
@Test public void setCharSequences() { final DefaultHttpHeaders headers = newDefaultDefaultHttpHeaders(); headers.set(HEADER_NAME, HeaderValue.THREE.asList()); assertDefaultValues(headers, HeaderValue.THREE); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testMissingLeaderEpochInRecords() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, Compression.NONE, TimestampType.CREATE_TIME, 0L, System.currentTimeMillis(), RecordBatch.NO_PARTITION_LEADER_EPOCH); builder.append(0L, "key".getBytes(), "1".getBytes()); builder.append(0L, "key".getBytes(), "2".getBytes()); MemoryRecords records = builder.build(); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); assertEquals(2, partitionRecords.get(tp0).size()); for (ConsumerRecord<byte[], byte[]> record : partitionRecords.get(tp0)) { assertEquals(Optional.empty(), record.leaderEpoch()); } }
@Override public CompletableFuture<List<QueueTimeSpan>> queryConsumeTimeSpan(String address, QueryConsumeTimeSpanRequestHeader requestHeader, long timeoutMillis) { CompletableFuture<List<QueueTimeSpan>> future = new CompletableFuture<>(); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_CONSUME_TIME_SPAN, requestHeader); remotingClient.invoke(address, request, timeoutMillis).thenAccept(response -> { if (response.getCode() == ResponseCode.SUCCESS) { QueryConsumeTimeSpanBody consumeTimeSpanBody = GroupList.decode(response.getBody(), QueryConsumeTimeSpanBody.class); future.complete(consumeTimeSpanBody.getConsumeTimeSpanSet()); } else { log.warn("queryConsumerTimeSpan getResponseCommand failed, {} {}, header={}", response.getCode(), response.getRemark(), requestHeader); future.completeExceptionally(new MQClientException(response.getCode(), response.getRemark())); } }); return future; }
@Test public void assertQueryConsumeTimeSpanWithError() { setResponseError(); QueryConsumeTimeSpanRequestHeader requestHeader = mock(QueryConsumeTimeSpanRequestHeader.class); CompletableFuture<List<QueueTimeSpan>> actual = mqClientAdminImpl.queryConsumeTimeSpan(defaultBrokerAddr, requestHeader, defaultTimeout); Throwable thrown = assertThrows(ExecutionException.class, actual::get); assertTrue(thrown.getCause() instanceof MQClientException); MQClientException mqException = (MQClientException) thrown.getCause(); assertEquals(ResponseCode.SYSTEM_ERROR, mqException.getResponseCode()); assertTrue(mqException.getMessage().contains("CODE: 1 DESC: null")); }
public static <T> Window<T> into(WindowFn<? super T, ?> fn) { try { fn.windowCoder().verifyDeterministic(); } catch (NonDeterministicException e) { throw new IllegalArgumentException("Window coders must be deterministic.", e); } return Window.<T>configure().withWindowFn(fn); }
@Test public void testWindowGetName() { assertEquals( "Window.Into()", Window.<String>into(FixedWindows.of(Duration.standardMinutes(10))).getName()); }
@Override public void setViewID(View view, String viewID) { }
@Test public void setViewID() { View view = new View(mApplication); mSensorsAPI.setViewID(view, "R.id.login"); Object tag = view.getTag(R.id.sensors_analytics_tag_view_id); Assert.assertNull(tag); }
@Operation(description = "Return from AD after authentication") @GetMapping(value = "/return" ) public RedirectView returnFomAd(@RequestParam(name = "sessionId") String sessionId) { return new RedirectView(service.getClientReturnId(sessionId)); }
@Test void returnFomAd() { controller.returnFomAd("sessionId"); }
@Override public int getOrder() { return PluginEnum.METRICS.getCode(); }
@Test public void testGetOrder() { Assertions.assertEquals(metricsPlugin.getOrder(), PluginEnum.METRICS.getCode()); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForDALWithTables() { tableNames.add("tbl"); when(shardingRule.getShardingRuleTableNames(tableNames)).thenReturn(tableNames); DALStatement dalStatement = mock(DALStatement.class); when(sqlStatementContext.getSqlStatement()).thenReturn(dalStatement); QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingUnicastRoutingEngine.class)); }
public static Collection<PerStepNamespaceMetrics> convert( String stepName, Map<MetricName, Long> counters, Map<MetricName, LockFreeHistogram.Snapshot> histograms, Map<MetricName, LabeledMetricNameUtils.ParsedMetricName> parsedPerWorkerMetricsCache) { Map<String, PerStepNamespaceMetrics> metricsByNamespace = new HashMap<>(); for (Entry<MetricName, Long> entry : counters.entrySet()) { MetricName metricName = entry.getKey(); Optional<MetricValue> metricValue = convertCounterToMetricValue(metricName, entry.getValue(), parsedPerWorkerMetricsCache); if (!metricValue.isPresent()) { continue; } PerStepNamespaceMetrics stepNamespaceMetrics = metricsByNamespace.get(metricName.getNamespace()); if (stepNamespaceMetrics == null) { stepNamespaceMetrics = new PerStepNamespaceMetrics() .setMetricValues(new ArrayList<>()) .setOriginalStep(stepName) .setMetricsNamespace(metricName.getNamespace()); metricsByNamespace.put(metricName.getNamespace(), stepNamespaceMetrics); } stepNamespaceMetrics.getMetricValues().add(metricValue.get()); } for (Entry<MetricName, LockFreeHistogram.Snapshot> entry : histograms.entrySet()) { MetricName metricName = entry.getKey(); Optional<MetricValue> metricValue = convertHistogramToMetricValue(metricName, entry.getValue(), parsedPerWorkerMetricsCache); if (!metricValue.isPresent()) { continue; } PerStepNamespaceMetrics stepNamespaceMetrics = metricsByNamespace.get(metricName.getNamespace()); if (stepNamespaceMetrics == null) { stepNamespaceMetrics = new PerStepNamespaceMetrics() .setMetricValues(new ArrayList<>()) .setOriginalStep(stepName) .setMetricsNamespace(metricName.getNamespace()); metricsByNamespace.put(metricName.getNamespace(), stepNamespaceMetrics); } stepNamespaceMetrics.getMetricValues().add(metricValue.get()); } return metricsByNamespace.values(); }
@Test public void testConvert_skipUnknownHistogramBucketType() { Map<MetricName, LabeledMetricNameUtils.ParsedMetricName> parsedMetricNames = new HashMap<>(); String step = "testStep"; Map<MetricName, Long> emptyCounters = new HashMap<>(); Map<MetricName, LockFreeHistogram.Snapshot> histograms = new HashMap<>(); MetricName bigQueryMetric1 = MetricName.named("BigQuerySink", "baseLabel"); LockFreeHistogram histogram = new LockFreeHistogram(bigQueryMetric1, new TestBucketType()); histogram.update(1.0, 2.0); histograms.put(bigQueryMetric1, histogram.getSnapshotAndReset().get()); Collection<PerStepNamespaceMetrics> conversionResult = MetricsToPerStepNamespaceMetricsConverter.convert( step, emptyCounters, histograms, parsedMetricNames); assertThat(conversionResult.size(), equalTo(0)); assertThat(parsedMetricNames.size(), equalTo(0)); }
public boolean hasAdminOrViewPermissions(final CaseInsensitiveString userName, List<Role> memberRoles) { return isUserAnAdmin(userName, memberRoles) || isViewUser(userName, memberRoles); }
@Test public void shouldSayThatAnAdmin_HasAdminOrViewPermissions() { CaseInsensitiveString adminUser = new CaseInsensitiveString("admin"); Authorization authorization = new Authorization(new AdminsConfig(new AdminUser(adminUser))); assertThat(authorization.hasAdminOrViewPermissions(adminUser, null), is(true)); }
@Override public Long createTenantPackage(TenantPackageSaveReqVO createReqVO) { // 插入 TenantPackageDO tenantPackage = BeanUtils.toBean(createReqVO, TenantPackageDO.class); tenantPackageMapper.insert(tenantPackage); // 返回 return tenantPackage.getId(); }
@Test public void testCreateTenantPackage_success() { // 准备参数 TenantPackageSaveReqVO reqVO = randomPojo(TenantPackageSaveReqVO.class, o -> o.setStatus(randomCommonStatus())) .setId(null); // 防止 id 被赋值 // 调用 Long tenantPackageId = tenantPackageService.createTenantPackage(reqVO); // 断言 assertNotNull(tenantPackageId); // 校验记录的属性是否正确 TenantPackageDO tenantPackage = tenantPackageMapper.selectById(tenantPackageId); assertPojoEquals(reqVO, tenantPackage, "id"); }
@Bean @ConditionalOnMissingBean public LdapContextSource contextSource(final LdapProperties ldapProp) { LdapContextSource contextSource = new LdapContextSource(); contextSource.setUrl(ldapProp.getUrl()); contextSource.setUserDn(ldapProp.getBindDn()); contextSource.setPassword(ldapProp.getPassword()); contextSource.setPooled(true); Map<String, Object> config = new HashMap<>(); config.put("java.naming.ldap.attributes.binary", "objectGUID"); config.put("com.sun.jndi.ldap.connect.timeout", String.valueOf(ldapProp.getConnectTimeout())); config.put("com.sun.jndi.ldap.read.timeout", String.valueOf(ldapProp.getReadTimeout())); contextSource.setBaseEnvironmentProperties(config); return contextSource; }
@Test public void testContextSource() { LdapProperties ldapProp = mock(LdapProperties.class); String ldapUrl = "ldap://192.168.0.10:389"; String user = "test"; String pass = "123"; when(ldapProp.getUrl()).thenReturn(ldapUrl); when(ldapProp.getBindDn()).thenReturn(user); when(ldapProp.getPassword()).thenReturn(pass); when(ldapProp.getConnectTimeout()).thenReturn(5000); when(ldapProp.getReadTimeout()).thenReturn(10000); LdapContextSource ldapContextSource = ldapConfiguration.contextSource(ldapProp); assertNotNull(ldapContextSource); assertThat(ldapContextSource.getUrls().length, is(1)); assertEquals(ldapContextSource.getUrls()[0], ldapUrl); assertEquals(ldapContextSource.getUserDn(), user); assertEquals(ldapContextSource.getPassword(), pass); }
public int numClassicProtocolMembers() { return numClassicProtocolMembers.get(); }
@Test public void testNumClassicProtocolMembers() { ConsumerGroup consumerGroup = createConsumerGroup("foo"); List<ConsumerGroupMemberMetadataValue.ClassicProtocol> protocols = new ArrayList<>(); protocols.add(new ConsumerGroupMemberMetadataValue.ClassicProtocol() .setName("range") .setMetadata(new byte[0])); // The group has member 1 (using the classic protocol). ConsumerGroupMember member1 = new ConsumerGroupMember.Builder("member-1") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(protocols)) .build(); consumerGroup.updateMember(member1); assertEquals(1, consumerGroup.numClassicProtocolMembers()); // The group has member 1 (using the classic protocol) and member 2 (using the consumer protocol). ConsumerGroupMember member2 = new ConsumerGroupMember.Builder("member-2") .build(); consumerGroup.updateMember(member2); assertEquals(1, consumerGroup.numClassicProtocolMembers()); assertFalse(consumerGroup.allMembersUseClassicProtocolExcept("member-1")); assertTrue(consumerGroup.allMembersUseClassicProtocolExcept("member-2")); // The group has member 2 (using the consumer protocol) and member 3 (using the consumer protocol). consumerGroup.removeMember(member1.memberId()); ConsumerGroupMember member3 = new ConsumerGroupMember.Builder("member-3") .build(); consumerGroup.updateMember(member3); assertEquals(0, consumerGroup.numClassicProtocolMembers()); assertFalse(consumerGroup.allMembersUseClassicProtocolExcept("member-2")); // The group has member 2 (using the classic protocol). consumerGroup.removeMember(member2.memberId()); member2 = new ConsumerGroupMember.Builder("member-2") .setClassicMemberMetadata(new ConsumerGroupMemberMetadataValue.ClassicMemberMetadata() .setSupportedProtocols(protocols)) .build(); consumerGroup.updateMember(member2); assertEquals(1, consumerGroup.numClassicProtocolMembers()); }
@SuppressWarnings("unchecked") @Override public void configure(final Map<String, ?> configs, final boolean isKey) { //check to see if the window size config is set and the window size is already set from the constructor final Long configWindowSize; if (configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG) instanceof String) { configWindowSize = Long.parseLong((String) configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG)); } else { configWindowSize = (Long) configs.get(StreamsConfig.WINDOW_SIZE_MS_CONFIG); } if (windowSize != null && configWindowSize != null) { throw new IllegalArgumentException("Window size should not be set in both the time windowed deserializer constructor and the window.size.ms config"); } else if (windowSize == null && configWindowSize == null) { throw new IllegalArgumentException("Window size needs to be set either through the time windowed deserializer " + "constructor or the window.size.ms config but not both"); } else { windowSize = windowSize == null ? configWindowSize : windowSize; } final String windowedInnerClassSerdeConfig = (String) configs.get(StreamsConfig.WINDOWED_INNER_CLASS_SERDE); Serde<T> windowInnerClassSerde = null; if (windowedInnerClassSerdeConfig != null) { try { windowInnerClassSerde = Utils.newInstance(windowedInnerClassSerdeConfig, Serde.class); } catch (final ClassNotFoundException e) { throw new ConfigException(StreamsConfig.WINDOWED_INNER_CLASS_SERDE, windowedInnerClassSerdeConfig, "Serde class " + windowedInnerClassSerdeConfig + " could not be found."); } } if (inner != null && windowedInnerClassSerdeConfig != null) { if (!inner.getClass().getName().equals(windowInnerClassSerde.deserializer().getClass().getName())) { throw new IllegalArgumentException("Inner class deserializer set using constructor " + "(" + inner.getClass().getName() + ")" + " is different from the one set in windowed.inner.class.serde config " + "(" + windowInnerClassSerde.deserializer().getClass().getName() + ")."); } } else if (inner == null && windowedInnerClassSerdeConfig == null) { throw new IllegalArgumentException("Inner class deserializer should be set either via constructor " + "or via the windowed.inner.class.serde config"); } else if (inner == null) inner = windowInnerClassSerde.deserializer(); }
@Test public void shouldThrowErrorIfWindowSizeSetInConfigsAndConstructor() { props.put(StreamsConfig.WINDOW_SIZE_MS_CONFIG, "500"); assertThrows(IllegalArgumentException.class, () -> timeWindowedDeserializer.configure(props, false)); }
public Coin parse(String str) throws NumberFormatException { return Coin.valueOf(parseValue(str, Coin.SMALLEST_UNIT_EXPONENT)); }
@Test(expected = NumberFormatException.class) public void parseInvalidWhitespaceBefore() { NO_CODE.parse(" 1"); }
public String getMountedExternalStorageDirectoryPath() { String path = null; String state = Environment.getExternalStorageState(); if (Environment.MEDIA_MOUNTED.equals(state) || Environment.MEDIA_MOUNTED_READ_ONLY.equals(state)) { path = getExternalStorageDirectoryPath(); } return path; }
@Test public void getMountedExternalStorageDirectoryPathReturnsNullWhenBadRemoval() { ShadowEnvironment.setExternalStorageState(Environment.MEDIA_BAD_REMOVAL); assertThat(contextUtil.getMountedExternalStorageDirectoryPath(), is(nullValue())); }
@Override public Device getDevice(DeviceId deviceId) { checkNotNull(deviceId, DEVICE_NULL); Optional<VirtualDevice> foundDevice = manager.getVirtualDevices(this.networkId) .stream() .filter(device -> deviceId.equals(device.id())) .findFirst(); if (foundDevice.isPresent()) { return foundDevice.get(); } return null; }
@Test(expected = NullPointerException.class) public void testGetDeviceByNullId() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); // test the getDevice() method with null device id value. deviceService.getDevice(null); }
public static GetClusterNodeLabelsResponse mergeClusterNodeLabelsResponse( Collection<GetClusterNodeLabelsResponse> responses) { GetClusterNodeLabelsResponse nodeLabelsResponse = Records.newRecord( GetClusterNodeLabelsResponse.class); Set<NodeLabel> nodeLabelsList = new HashSet<>(); for (GetClusterNodeLabelsResponse response : responses) { if (response != null && response.getNodeLabelList() != null) { nodeLabelsList.addAll(response.getNodeLabelList()); } } nodeLabelsResponse.setNodeLabelList(new ArrayList<>(nodeLabelsList)); return nodeLabelsResponse; }
@Test public void testMergeClusterNodeLabelsResponse() { NodeLabel nodeLabel1 = NodeLabel.newInstance("nodeLabel1"); NodeLabel nodeLabel2 = NodeLabel.newInstance("nodeLabel2"); NodeLabel nodeLabel3 = NodeLabel.newInstance("nodeLabel3"); // normal response List<NodeLabel> nodeLabelListSC1 = new ArrayList<>(); nodeLabelListSC1.add(nodeLabel1); nodeLabelListSC1.add(nodeLabel2); nodeLabelListSC1.add(nodeLabel3); GetClusterNodeLabelsResponse response1 = Records.newRecord( GetClusterNodeLabelsResponse.class); response1.setNodeLabelList(nodeLabelListSC1); // empty response List<NodeLabel> nodeLabelListSC2 = new ArrayList<>(); GetClusterNodeLabelsResponse response2 = Records.newRecord( GetClusterNodeLabelsResponse.class); response2.setNodeLabelList(nodeLabelListSC2); // null response GetClusterNodeLabelsResponse response3 = null; List<GetClusterNodeLabelsResponse> responses = new ArrayList<>(); responses.add(response1); responses.add(response2); responses.add(response3); List<NodeLabel> expectedResponse = new ArrayList<>(); expectedResponse.add(nodeLabel1); expectedResponse.add(nodeLabel2); expectedResponse.add(nodeLabel3); GetClusterNodeLabelsResponse response = RouterYarnClientUtils. mergeClusterNodeLabelsResponse(responses); Assert.assertTrue(CollectionUtils.isEqualCollection(expectedResponse, response.getNodeLabelList())); }
public Certificate add(X509Certificate cert) { final Certificate db; try { db = Certificate.from(cert); } catch (CertificateEncodingException e) { logger.error("Encoding error in certificate", e); throw new RuntimeException("Encoding error in certificate", e); } try { // Special case for first CSCA certificate for this document type if (repository.countByDocumentType(db.getDocumentType()) == 0) { cert.verify(cert.getPublicKey()); logger.warn("Added first CSCA certificate for {}, set trusted flag manually", db.getDocumentType()); } else { verify(cert, allowAddingExpired ? cert.getNotAfter() : null); } } catch (GeneralSecurityException | VerificationException e) { logger.error( String.format("Could not verify certificate of %s issued by %s", cert.getSubjectX500Principal(), cert.getIssuerX500Principal() ), e ); throw new BadRequestException("Could not verify certificate", e); } return repository.saveAndFlush(db); }
@Test public void shouldDisallowToAddCertificateIfNotFirstOfDocumentTypeAndNotTrusted() throws Exception { certificateRepo.saveAndFlush(loadCertificate("rvig/05.cer", true)); assertThrows(BadRequestException.class, () -> { service.add(readCertificate("npkd/01.cer")); }); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testStartTransaction() { analyze("START TRANSACTION"); analyze("START TRANSACTION ISOLATION LEVEL READ UNCOMMITTED"); analyze("START TRANSACTION ISOLATION LEVEL READ COMMITTED"); analyze("START TRANSACTION ISOLATION LEVEL REPEATABLE READ"); analyze("START TRANSACTION ISOLATION LEVEL SERIALIZABLE"); analyze("START TRANSACTION READ ONLY"); analyze("START TRANSACTION READ WRITE"); analyze("START TRANSACTION ISOLATION LEVEL READ COMMITTED, READ ONLY"); analyze("START TRANSACTION READ ONLY, ISOLATION LEVEL READ COMMITTED"); analyze("START TRANSACTION READ WRITE, ISOLATION LEVEL SERIALIZABLE"); }
@Override public int maxConnections() { return http1ConnectionProvider().maxConnections(); }
@Test void returnOriginalConnectionProviderUsingBuilder() { ConnectionProvider provider = ConnectionProvider.builder("provider") .maxConnections(1) .disposeTimeout(Duration.ofSeconds(1L)) .pendingAcquireTimeout(Duration.ofSeconds(1L)) .maxIdleTime(Duration.ofSeconds(1L)) .maxLifeTime(Duration.ofSeconds(10L)) .lifo() .build(); testReturnOriginalConnectionProvider(HttpClient.create(provider), provider); }
public static JobIndexInfo getIndexInfo(String jhFileName) throws IOException { String fileName = jhFileName.substring(0, jhFileName.indexOf(JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION)); JobIndexInfo indexInfo = new JobIndexInfo(); String[] jobDetails = fileName.split(DELIMITER); JobID oldJobId = JobID.forName(decodeJobHistoryFileName(jobDetails[JOB_ID_INDEX])); JobId jobId = TypeConverter.toYarn(oldJobId); indexInfo.setJobId(jobId); // Do not fail if there are some minor parse errors try { try { indexInfo.setSubmitTime(Long.parseLong( decodeJobHistoryFileName(jobDetails[SUBMIT_TIME_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse submit time from job history file " + jhFileName + " : " + e); } indexInfo.setUser( decodeJobHistoryFileName(jobDetails[USER_INDEX])); indexInfo.setJobName( decodeJobHistoryFileName(jobDetails[JOB_NAME_INDEX])); try { indexInfo.setFinishTime(Long.parseLong( decodeJobHistoryFileName(jobDetails[FINISH_TIME_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse finish time from job history file " + jhFileName + " : " + e); } try { indexInfo.setNumMaps(Integer.parseInt( decodeJobHistoryFileName(jobDetails[NUM_MAPS_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse num maps from job history file " + jhFileName + " : " + e); } try { indexInfo.setNumReduces(Integer.parseInt( decodeJobHistoryFileName(jobDetails[NUM_REDUCES_INDEX]))); } catch (NumberFormatException e) { LOG.warn("Unable to parse num reduces from job history file " + jhFileName + " : " + e); } indexInfo.setJobStatus( decodeJobHistoryFileName(jobDetails[JOB_STATUS_INDEX])); indexInfo.setQueueName( decodeJobHistoryFileName(jobDetails[QUEUE_NAME_INDEX])); try{ if (jobDetails.length <= JOB_START_TIME_INDEX) { indexInfo.setJobStartTime(indexInfo.getSubmitTime()); } else { indexInfo.setJobStartTime(Long.parseLong( decodeJobHistoryFileName(jobDetails[JOB_START_TIME_INDEX]))); } } catch (NumberFormatException e){ LOG.warn("Unable to parse start time from job history file " + jhFileName + " : " + e); } } catch (IndexOutOfBoundsException e) { LOG.warn("Parsing job history file with partial data encoded into name: " + jhFileName); } return indexInfo; }
@Test public void testJobStartTimeBackwardsCompatible() throws IOException { String jobHistoryFile = String.format(OLD_FORMAT_BEFORE_ADD_START_TIME, JOB_ID, SUBMIT_TIME, USER_NAME, JOB_NAME_WITH_DELIMITER_ESCAPE, FINISH_TIME, NUM_MAPS, NUM_REDUCES, JOB_STATUS, QUEUE_NAME); JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile); assertEquals(info.getJobStartTime(), info.getSubmitTime()); }
public String getRegex(final Exchange exchange) { return getOption(BlobExchangeHeaders::getRegexFromHeaders, configuration::getRegex, exchange); }
@Test void testIfCorrectOptionsReturnedCorrectlyWithRegexSet() { final BlobConfiguration configuration = new BlobConfiguration(); // first case: when exchange is set final Exchange exchange = new DefaultExchange(context); final BlobConfigurationOptionsProxy configurationOptionsProxy = new BlobConfigurationOptionsProxy(configuration); configuration.setRegex(".*\\.exe"); assertEquals(".*\\.exe", configurationOptionsProxy.getRegex(exchange)); //test header override exchange.getIn().setHeader(BlobConstants.REGEX, ".*\\.pdf"); assertEquals(".*\\.pdf", configurationOptionsProxy.getRegex(exchange)); }
@VisibleForTesting static List<IndexEntry> copyIndices(File srcFile, File destFile, TreeMap<IndexKey, IndexEntry> indicesToCopy) throws IOException { // Copy index from original index file and append to temp file. // Keep track of the index entry pointing to the temp index file. List<IndexEntry> retained = new ArrayList<>(); long nextOffset = 0; // With FileChannel, we can seek to the data flexibly. try (FileChannel srcCh = new RandomAccessFile(srcFile, "r").getChannel(); FileChannel dstCh = new RandomAccessFile(destFile, "rw").getChannel()) { for (IndexEntry index : indicesToCopy.values()) { org.apache.pinot.common.utils.FileUtils.transferBytes(srcCh, index._startOffset, index._size, dstCh); retained.add(new IndexEntry(index._key, nextOffset, index._size)); nextOffset += index._size; } } return retained; }
@Test public void testCopyIndices() throws IOException { File srcTmp = new File(TEMP_DIR, UUID.randomUUID().toString()); if (!srcTmp.exists()) { FileUtils.touch(srcTmp); } File dstTmp = new File(TEMP_DIR, UUID.randomUUID().toString()); TreeMap<IndexKey, IndexEntry> indicesToCopy = new TreeMap<>(ImmutableMap .of(new IndexKey("foo", StandardIndexes.inverted()), new IndexEntry(new IndexKey("foo", StandardIndexes.inverted()), 0, 0), new IndexKey("foo", StandardIndexes.forward()), new IndexEntry(new IndexKey("foo", StandardIndexes.forward()), 0, 0), new IndexKey("bar", StandardIndexes.forward()), new IndexEntry(new IndexKey("bar", StandardIndexes.forward()), 0, 0), new IndexKey("bar", StandardIndexes.dictionary()), new IndexEntry(new IndexKey("bar", StandardIndexes.dictionary()), 0, 0), new IndexKey("bar", StandardIndexes.json()), new IndexEntry(new IndexKey("bar", StandardIndexes.json()), 0, 0))); List<IndexEntry> retained = SingleFileIndexDirectory.copyIndices(srcTmp, dstTmp, indicesToCopy); List<IndexKey> retainedKeys = retained.stream().map(e -> e._key).collect(Collectors.toList()); // The returned entries are sorted. assertEquals(retainedKeys, Arrays .asList(new IndexKey("bar", StandardIndexes.dictionary()), new IndexKey("bar", StandardIndexes.forward()), new IndexKey("bar", StandardIndexes.json()), new IndexKey("foo", StandardIndexes.forward()), new IndexKey("foo", StandardIndexes.inverted()))); }
@SuppressWarnings({"unchecked", "rawtypes"}) public static int compareTo(final Comparable thisValue, final Comparable otherValue, final OrderDirection orderDirection, final NullsOrderType nullsOrderType, final boolean caseSensitive) { if (null == thisValue && null == otherValue) { return 0; } if (null == thisValue) { return NullsOrderType.FIRST == nullsOrderType ? -1 : 1; } if (null == otherValue) { return NullsOrderType.FIRST == nullsOrderType ? 1 : -1; } if (!caseSensitive && thisValue instanceof String && otherValue instanceof String) { return compareToCaseInsensitiveString((String) thisValue, (String) otherValue, orderDirection); } return OrderDirection.ASC == orderDirection ? thisValue.compareTo(otherValue) : -thisValue.compareTo(otherValue); }
@Test void assertCompareToWhenFirstValueIsNullForOrderByAscAndNullsFirst() { assertThat(CompareUtils.compareTo(null, 1, OrderDirection.ASC, NullsOrderType.FIRST, caseSensitive), is(-1)); }
@VisibleForTesting public static int getStartSize(int numArrays, int avgArrayLen) { // For each array, we store the array and its startoffset (4 bytes) long estimatedSize = numArrays * ((long) avgArrayLen + 4); if (estimatedSize > 0 && estimatedSize <= Integer.MAX_VALUE) { return (int) estimatedSize; } return Integer.MAX_VALUE; }
@Test public void startSizeTest() { Assert.assertEquals(MutableOffHeapByteArrayStore.getStartSize(1, ONE_GB), ONE_GB + 4); Assert.assertEquals(MutableOffHeapByteArrayStore.getStartSize(3, ONE_GB), Integer.MAX_VALUE); Assert.assertEquals(MutableOffHeapByteArrayStore.getStartSize(5, ONE_GB), Integer.MAX_VALUE); }
public PointBuilder<T> latLong(LatLong latitudeAndLongitude) { this.latitude = latitudeAndLongitude.latitude(); this.longitude = latitudeAndLongitude.longitude(); return this; }
@Test public void testLatLong() { LatLong latLong = LatLong.of(-5.0, -2.34); Point<String> p = Point.<String>builder().latLong(latLong).time(EPOCH).build(); assertEquals(p.latLong(), latLong); }
public synchronized String getBrowserName() { final String userAgent = getUserAgent(); if ( userAgent == null ) { return ""; } String edgeUserAgent = WINDOWS_BROWSER.substring( 0, 3 ); // Edg if ( userAgent.contains( edgeUserAgent ) ) { return WINDOWS_BROWSER; } else if ( userAgent.contains( UBUNTU_BROWSER ) ) { return UBUNTU_BROWSER; } else if ( userAgent.contains( MAC_BROWSER ) ) { return MAC_BROWSER; } return ""; }
@Test public void testGetBrowserName_mac() { EnvironmentUtilsMock mock = new EnvironmentUtilsMock( Case.MAC_OS_X ); assertEquals( MAC_BROWSER, mock.getMockedInstance().getBrowserName() ); mock = new EnvironmentUtilsMock( Case.MAC_OS_X_WRONG ); assertEquals( MAC_BROWSER, mock.getMockedInstance().getBrowserName() ); }
@Override public int hashCode() { int hash = _hash; if (hash == 1 && _bytes.length > 0) { int i = 0; for (; i + 7 < _bytes.length; i += 8) { hash = -1807454463 * hash + 1742810335 * _bytes[i] + 887503681 * _bytes[i + 1] + 28629151 * _bytes[i + 2] + 923521 * _bytes[i + 3] + 29791 * _bytes[i + 4] + 961 * _bytes[i + 5] + 31 * _bytes[i + 6] + _bytes[i + 7]; } for (; i < _bytes.length; i++) { hash = 31 * hash + _bytes[i]; } _hash = hash; } return hash; }
@Test(description = "hash code may have been used for partitioning so must be stable") public void testHashCode() { // ensure to test below 8 byte[] bytes = new byte[ThreadLocalRandom.current().nextInt(8)]; ThreadLocalRandom.current().nextBytes(bytes); assertEquals(Arrays.hashCode(bytes), new ByteArray(bytes).hashCode()); for (int i = 0; i < 10_000; i++) { bytes = new byte[ThreadLocalRandom.current().nextInt(2048)]; ThreadLocalRandom.current().nextBytes(bytes); assertEquals(Arrays.hashCode(bytes), new ByteArray(bytes).hashCode()); } }
@GetMapping(value = ApiConstants.PROMETHEUS_CONTROLLER_NAMESPACE_PATH, produces = "application/json; charset=UTF-8") public ResponseEntity<String> metricNamespace(@PathVariable("namespaceId") String namespaceId) throws NacosException { ArrayNode arrayNode = getServiceArrayNode(namespaceId, s -> true); return ResponseEntity.ok().body(arrayNode.toString()); }
@Test public void testMetricNamespace() throws Exception { when(instanceServiceV2.listAllInstances(nameSpace, NamingUtils.getGroupedName(name, group))).thenReturn(testInstanceList); String prometheusNamespacePath = ApiConstants.PROMETHEUS_CONTROLLER_NAMESPACE_PATH.replace("{namespaceId}", nameSpace); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(prometheusNamespacePath); MockHttpServletResponse response = mockMvc.perform(builder).andReturn().getResponse(); assertEquals(200, response.getStatus()); assertEquals(testInstanceList.size(), JacksonUtils.toObj(response.getContentAsString()).size()); }
@Override @NotNull public BTreeMutable getMutableCopy() { final BTreeMutable result = new BTreeMutable(this); result.addExpiredLoggable(rootLoggable); return result; }
@Test public void testSplitRight2() { int s = 1000; tm = new BTreeEmpty(log, createTestSplittingPolicy(), true, 1).getMutableCopy(); for (int i = 0; i < s; i++) { getTreeMutable().put(kv(i, "v" + i)); } checkTree(getTreeMutable(), s).run(); long rootAddress = saveTree(); checkTree(getTreeMutable(), s).run(); reopen(); t = new BTree(log, rootAddress, true, 1); checkTree(getTree(), s).run(); }
public static String asString(Duration duration) { long numDays = duration.toDays(); long numHours = duration.toHours() % 24; long numMinutes = duration.toMinutes() % 60; long numSeconds = duration.getSeconds() % 60; String output = String.format("%d:%02d:%02d", numHours, numMinutes, numSeconds); if (numDays > 0L) { return numDays + " days, " + output; } else { return output; } }
@Test public void testDurationFormatting_1() { //13 hours, 22 minutes, and 15 seconds long numSeconds = 13 * 3600 + 22 * 60 + 15; Duration dur = Duration.ofSeconds(numSeconds); assertEquals( "13:22:15", TimeUtils.asString(dur) ); }
public static Collection<Integer> compareTrees(MerkleTreeView local, MerkleTreeView remote) { Collection<Integer> deltaOrders = new LinkedList<>(); MerkleTreeView baseTree = local.depth() <= remote.depth() ? local : remote; MerkleTreeView otherTree = local.depth() <= remote.depth() ? remote : local; int leafLevel = baseTree.depth() - 1; int numberOfLeaves = getNodesOnLevel(leafLevel); int leftMostLeaf = getLeftMostNodeOrderOnLevel(leafLevel); for (int leafOrder = leftMostLeaf; leafOrder < leftMostLeaf + numberOfLeaves; leafOrder++) { if (baseTree.getNodeHash(leafOrder) != otherTree.getNodeHash(leafOrder)) { deltaOrders.add(leafOrder); } } return deltaOrders; }
@Test public void testCompareTreesCatchesCollision() { int numberOfLocalTreeLeaves = 4; int[] localTreeLeaves = new int[numberOfLocalTreeLeaves]; for (int i = 0; i < numberOfLocalTreeLeaves; i++) { localTreeLeaves[i] = i; } int numberOfRemoteTreeNodes = 4; int[] remoteTreeLeaves = new int[numberOfRemoteTreeNodes]; for (int i = 0; i < numberOfRemoteTreeNodes; i++) { remoteTreeLeaves[i] = i; } // we cause a collision here that compareTrees() will notice // hash(node5,node6) will produce the same hash for node2 in both trees // localTreeLeaves: hash(2,3)=5 // remoteTreeLeaves: hash(1,4)=5 localTreeLeaves[2] = 1; localTreeLeaves[3] = 4; MerkleTreeView localTreeView = new RemoteMerkleTreeView(localTreeLeaves, 3); MerkleTreeView remoteTreeView = new RemoteMerkleTreeView(remoteTreeLeaves, 3); Collection<Integer> deltaOrders = MerkleTreeUtil.compareTrees(localTreeView, remoteTreeView); assertEquals(localTreeView.getNodeHash(0), remoteTreeView.getNodeHash(0)); assertEquals(localTreeView.getNodeHash(2), remoteTreeView.getNodeHash(2)); assertEquals(2, deltaOrders.size()); assertTrue(deltaOrders.containsAll(asList(5, 6))); }
public SparkInterpreter(Properties properties) { super(properties); // set scala.color if (Boolean.parseBoolean(properties.getProperty("zeppelin.spark.scala.color", "true"))) { System.setProperty("scala.color", "true"); } this.enableSupportedVersionCheck = java.lang.Boolean.parseBoolean( properties.getProperty("zeppelin.spark.enableSupportedVersionCheck", "true")); innerInterpreterClassMap.put("2.12", "org.apache.zeppelin.spark.SparkScala212Interpreter"); innerInterpreterClassMap.put("2.13", "org.apache.zeppelin.spark.SparkScala213Interpreter"); }
@Test void testSparkInterpreter() throws IOException, InterruptedException, InterpreterException { Properties properties = new Properties(); properties.setProperty(SparkStringConstants.MASTER_PROP_NAME, "local"); properties.setProperty(SparkStringConstants.APP_NAME_PROP_NAME, "test"); properties.setProperty("zeppelin.spark.maxResult", "100"); properties.setProperty("zeppelin.spark.uiWebUrl", "fake_spark_weburl/{{applicationId}}"); // disable color output for easy testing properties.setProperty("zeppelin.spark.scala.color", "false"); properties.setProperty("zeppelin.spark.deprecatedMsg.show", "false"); InterpreterContext context = InterpreterContext.builder() .setInterpreterOut(new InterpreterOutput()) .setIntpEventClient(mockRemoteEventClient) .setAngularObjectRegistry(new AngularObjectRegistry("spark", null)) .build(); InterpreterContext.set(context); interpreter = new SparkInterpreter(properties); interpreter.setInterpreterGroup(mock(InterpreterGroup.class)); interpreter.open(); InterpreterResult result = interpreter.interpret("val a=\"hello world\"", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // Use contains instead of equals, because there's behavior difference between different scala versions assertTrue(output.contains("a: String = hello world\n"), output); result = interpreter.interpret("print(a)", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals("hello world", output); // java stdout result = interpreter.interpret("System.out.print(a)", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals("hello world", output); // incomplete result = interpreter.interpret("println(a", getInterpreterContext()); assertEquals(InterpreterResult.Code.INCOMPLETE, result.code()); // syntax error result = interpreter.interpret("println(b)", getInterpreterContext()); assertEquals(InterpreterResult.Code.ERROR, result.code()); assertTrue(output.contains("not found: value b")); // multiple line result = interpreter.interpret("\"123\".\ntoInt", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // single line comment result = interpreter.interpret("print(\"hello world\")/*comment here*/", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals("hello world", output); result = interpreter.interpret("/*comment here*/\nprint(\"hello world\")", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); if (!interpreter.isScala213()) { // multiple line comment, not supported by scala-2.13 context = getInterpreterContext(); result = interpreter.interpret("/*line 1 \n line 2*/", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code(), context.out.toString()); } // test function result = interpreter.interpret("def add(x:Int, y:Int)\n{ return x+y }", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); result = interpreter.interpret("print(add(1,2))", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); result = interpreter.interpret("/*line 1 \n line 2*/print(\"hello world\")", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // Companion object with case class result = interpreter.interpret("import scala.math._\n" + "object Circle {\n" + " private def calculateArea(radius: Double): Double = Pi * pow(radius, 2.0)\n" + "}\n" + "case class Circle(radius: Double) {\n" + " import Circle._\n" + " def area: Double = calculateArea(radius)\n" + "}\n" + "\n" + "val circle1 = new Circle(5.0)", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // use case class in spark // context = getInterpreterContext(); // result = interpreter.interpret("sc\n.range(1, 10)\n.map(e=>Circle(e))\n.collect()", context); // assertEquals(context.out.toString(), InterpreterResult.Code.SUCCESS, result.code()); // class extend result = interpreter.interpret("import java.util.ArrayList", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); result = interpreter.interpret("class MyArrayList extends ArrayList{}", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // spark rdd operation context = getInterpreterContext(); context.setParagraphId("pid_1"); result = interpreter.interpret("sc\n.range(1, 10)\n.map(e=>e)\n.sum", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertTrue(output.contains("45")); ArgumentCaptor<Map> captorEvent = ArgumentCaptor.forClass(Map.class); verify(mockRemoteEventClient).onParaInfosReceived(captorEvent.capture()); assertEquals("pid_1", captorEvent.getValue().get("paraId")); reset(mockRemoteEventClient); context = getInterpreterContext(); context.setParagraphId("pid_2"); result = interpreter.interpret("sc\n.range(1, 10)\n.sum", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertTrue(output.contains("45")); captorEvent = ArgumentCaptor.forClass(Map.class); verify(mockRemoteEventClient).onParaInfosReceived(captorEvent.capture()); assertEquals("pid_2", captorEvent.getValue().get("paraId")); // spark job url is sent ArgumentCaptor<Map> onParaInfosReceivedArg = ArgumentCaptor.forClass(Map.class); verify(mockRemoteEventClient).onParaInfosReceived(onParaInfosReceivedArg.capture()); assertTrue(((String) onParaInfosReceivedArg.getValue().get("jobUrl")).startsWith("fake_spark_weburl/" + interpreter.getJavaSparkContext().sc().applicationId())); // RDD of case class objects result = interpreter.interpret( "case class A(a: Integer, b: Integer)\n" + "sc.parallelize(Seq(A(10, 20), A(30, 40))).collect()", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // Dataset of case class objects result = interpreter.interpret("val bankText = sc.textFile(\"bank.csv\")", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); context = getInterpreterContext(); result = interpreter.interpret( "case class Bank(age:Integer, job:String, marital : String, education : String, balance : Integer)\n" + "val bank = bankText.map(s=>s.split(\";\")).filter(s => s(0)!=\"\\\"age\\\"\").map(\n" + " s => Bank(s(0).toInt, \n" + " s(1).replaceAll(\"\\\"\", \"\"),\n" + " s(2).replaceAll(\"\\\"\", \"\"),\n" + " s(3).replaceAll(\"\\\"\", \"\"),\n" + " s(5).replaceAll(\"\\\"\", \"\").toInt\n" + " )\n" + ").toDF()", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code(), context.out.toString()); // spark version result = interpreter.interpret("sc.version", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // spark sql test String version = output.trim(); // create dataset from case class context = getInterpreterContext(); result = interpreter.interpret("case class Person(id:Int, name:String, age:Int, country:String)\n" + "val df2 = spark.createDataFrame(Seq(Person(1, \"andy\", 20, \"USA\"), " + "Person(2, \"jeff\", 23, \"China\"), Person(3, \"james\", 18, \"USA\")))\n" + "df2.printSchema\n" + "df2.show() ", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); result = interpreter.interpret("spark", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); result = interpreter.interpret( "val df = spark.createDataFrame(Seq((1,\"a\"),(2, null)))\n" + "df.show()", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); // SPARK-43063 changed the output of null to NULL assertTrue(StringUtils.containsIgnoreCase(output, "+---+----+\n" + "| _1| _2|\n" + "+---+----+\n" + "| 1| a|\n" + "| 2|null|\n" + "+---+----+")); // ZeppelinContext context = getInterpreterContext(); result = interpreter.interpret("z.show(df)", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code(), context.out.toString()); assertEquals(InterpreterResult.Type.TABLE, messageOutput.getType()); messageOutput.flush(); assertEquals("_1\t_2\n1\ta\n2\tnull\n", messageOutput.toInterpreterResultMessage().getData()); context = getInterpreterContext(); result = interpreter.interpret("z.input(\"name\", \"default_name\")", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(1, context.getGui().getForms().size()); assertTrue(context.getGui().getForms().get("name") instanceof TextBox); TextBox textBox = (TextBox) context.getGui().getForms().get("name"); assertEquals("name", textBox.getName()); assertEquals("default_name", textBox.getDefaultValue()); context = getInterpreterContext(); result = interpreter.interpret("z.password(\"pwd\")", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(1, context.getGui().getForms().size()); assertTrue(context.getGui().getForms().get("pwd") instanceof Password); Password pwd = (Password) context.getGui().getForms().get("pwd"); assertEquals("pwd", pwd.getName()); context = getInterpreterContext(); result = interpreter.interpret("z.checkbox(\"checkbox_1\", Seq((\"value_1\", \"name_1\"), (\"value_2\", \"name_2\")), Seq(\"value_2\"))", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(1, context.getGui().getForms().size()); assertTrue(context.getGui().getForms().get("checkbox_1") instanceof CheckBox); CheckBox checkBox = (CheckBox) context.getGui().getForms().get("checkbox_1"); assertEquals("checkbox_1", checkBox.getName()); assertEquals(1, checkBox.getDefaultValue().length); assertEquals("value_2", checkBox.getDefaultValue()[0]); assertEquals(2, checkBox.getOptions().length); assertEquals("value_1", checkBox.getOptions()[0].getValue()); assertEquals("name_1", checkBox.getOptions()[0].getDisplayName()); assertEquals("value_2", checkBox.getOptions()[1].getValue()); assertEquals("name_2", checkBox.getOptions()[1].getDisplayName()); context = getInterpreterContext(); result = interpreter.interpret("z.select(\"select_1\", Seq((\"value_1\", \"name_1\"), (\"value_2\", \"name_2\")), Seq(\"value_2\"))", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(1, context.getGui().getForms().size()); assertTrue(context.getGui().getForms().get("select_1") instanceof Select); Select select = (Select) context.getGui().getForms().get("select_1"); assertEquals("select_1", select.getName()); // TODO(zjffdu) it seems a bug of GUI, the default value should be 'value_2', but it is List(value_2) // assertEquals("value_2", select.getDefaultValue()); assertEquals(2, select.getOptions().length); assertEquals("value_1", select.getOptions()[0].getValue()); assertEquals("name_1", select.getOptions()[0].getDisplayName()); assertEquals("value_2", select.getOptions()[1].getValue()); assertEquals("name_2", select.getOptions()[1].getDisplayName()); // completions List<InterpreterCompletion> completions = interpreter.completion("a.", 2, getInterpreterContext()); assertTrue(completions.size() > 0); completions = interpreter.completion("a.isEm", 6, getInterpreterContext()); assertEquals(1, completions.size()); assertEquals("isEmpty", completions.get(0).name); completions = interpreter.completion("sc.ra", 5, getInterpreterContext()); assertEquals(1, completions.size()); assertEquals("range", completions.get(0).name); // cursor in middle of code completions = interpreter.completion("sc.ra\n1+1", 5, getInterpreterContext()); assertEquals(1, completions.size()); assertEquals("range", completions.get(0).name); if (!interpreter.isScala213()) { // Zeppelin-Display result = interpreter.interpret("import org.apache.zeppelin.display.angular.notebookscope._\n" + "import AngularElem._", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); context = getInterpreterContext(); result = interpreter.interpret("<div style=\"color:blue\">\n" + "<h4>Hello Angular Display System</h4>\n" + "</div>.display", context); assertEquals(InterpreterResult.Code.SUCCESS, result.code(), context.out.toString()); assertEquals(InterpreterResult.Type.ANGULAR, messageOutput.getType()); assertTrue(messageOutput.toInterpreterResultMessage().getData().contains("Hello Angular Display System")); result = interpreter.interpret("<div class=\"btn btn-success\">\n" + " Click me\n" + "</div>.onClick{() =>\n" + " println(\"hello world\")\n" + "}.display", getInterpreterContext()); assertEquals(InterpreterResult.Code.SUCCESS, result.code()); assertEquals(InterpreterResult.Type.ANGULAR, messageOutput.getType()); assertTrue(messageOutput.toInterpreterResultMessage().getData().contains("Click me")); } // getProgress final InterpreterContext context2 = getInterpreterContext(); Thread interpretThread = new Thread() { @Override public void run() { InterpreterResult result = null; try { result = interpreter.interpret( "val df = sc.parallelize(1 to 10, 5).foreach(e=>Thread.sleep(1000))", context2); } catch (InterpreterException e) { e.printStackTrace(); } assertEquals(InterpreterResult.Code.SUCCESS, result.code()); } }; interpretThread.start(); boolean nonZeroProgress = false; int progress = 0; while (interpretThread.isAlive()) { progress = interpreter.getProgress(context2); assertTrue(progress >= 0); if (progress != 0 && progress != 100) { nonZeroProgress = true; } Thread.sleep(100); } assertTrue(nonZeroProgress); // cancel final InterpreterContext context3 = getInterpreterContext(); interpretThread = new Thread() { @Override public void run() { InterpreterResult result = null; try { result = interpreter.interpret( "val df = sc.parallelize(1 to 10, 2).foreach(e=>Thread.sleep(1000))", context3); } catch (InterpreterException e) { e.printStackTrace(); } assertEquals(InterpreterResult.Code.ERROR, result.code()); assertTrue(output.contains("cancelled")); } }; interpretThread.start(); // sleep 1 second to wait for the spark job start Thread.sleep(1000); interpreter.cancel(context3); interpretThread.join(); }
public boolean publishConfig(String dataId, String group, String tenant, String appName, String tag, String betaIps, String content, String encryptedDataKey, String casMd5, String type) throws NacosException { return agent.publishConfig(dataId, group, tenant, appName, tag, betaIps, content, encryptedDataKey, casMd5, type); }
@Test void testPublishConfigException() throws NacosException { Properties prop = new Properties(); ConfigFilterChainManager filter = new ConfigFilterChainManager(new Properties()); ServerListManager agent = Mockito.mock(ServerListManager.class); final NacosClientProperties nacosClientProperties = NacosClientProperties.PROTOTYPE.derive(prop); ClientWorker clientWorker = new ClientWorker(filter, agent, nacosClientProperties); String dataId = "a"; String group = "b"; String tenant = "c"; String content = "d"; String appName = "app"; String tag = "tag"; String betaIps = "1.1.1.1"; String casMd5 = "1111"; String type = "properties"; Mockito.when(rpcClient.request(any(ConfigPublishRequest.class), anyLong())).thenThrow(new NacosException()); boolean b = clientWorker.publishConfig(dataId, group, tenant, appName, tag, betaIps, content, null, casMd5, type); assertFalse(b); }
@Override public ByteBuf retainedSlice() { return slice().retain(); }
@Test public void testRetainedDuplicateAfterReleaseRetainedSlice() { ByteBuf buf = newBuffer(1); ByteBuf buf2 = buf.retainedSlice(0, 1); assertRetainedDuplicateFailAfterRelease(buf, buf2); }
@VisibleForTesting static SingleSegmentAssignment getNextSingleSegmentAssignment(Map<String, String> currentInstanceStateMap, Map<String, String> targetInstanceStateMap, int minAvailableReplicas, boolean lowDiskMode, Map<String, Integer> numSegmentsToOffloadMap, Map<Pair<Set<String>, Set<String>>, Set<String>> assignmentMap) { Map<String, String> nextInstanceStateMap = new TreeMap<>(); // Assign the segment the same way as other segments if the current and target instances are the same. We need this // to guarantee the mirror servers for replica-group based routing strategies. Set<String> currentInstances = currentInstanceStateMap.keySet(); Set<String> targetInstances = targetInstanceStateMap.keySet(); Pair<Set<String>, Set<String>> assignmentKey = Pair.of(currentInstances, targetInstances); Set<String> instancesToAssign = assignmentMap.get(assignmentKey); if (instancesToAssign != null) { Set<String> availableInstances = new TreeSet<>(); for (String instanceName : instancesToAssign) { String currentInstanceState = currentInstanceStateMap.get(instanceName); String targetInstanceState = targetInstanceStateMap.get(instanceName); if (currentInstanceState != null) { availableInstances.add(instanceName); // Use target instance state if available in case the state changes nextInstanceStateMap.put(instanceName, targetInstanceState != null ? targetInstanceState : currentInstanceState); } else { nextInstanceStateMap.put(instanceName, targetInstanceState); } } return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances); } // Add all the common instances // Use target instance state in case the state changes for (Map.Entry<String, String> entry : targetInstanceStateMap.entrySet()) { String instanceName = entry.getKey(); if (currentInstanceStateMap.containsKey(instanceName)) { nextInstanceStateMap.put(instanceName, entry.getValue()); } } // Add current instances until the min available replicas achieved int numInstancesToKeep = minAvailableReplicas - nextInstanceStateMap.size(); if (numInstancesToKeep > 0) { // Sort instances by number of segments to offload, and keep the ones with the least segments to offload List<Triple<String, String, Integer>> instancesInfo = getSortedInstancesOnNumSegmentsToOffload(currentInstanceStateMap, nextInstanceStateMap, numSegmentsToOffloadMap); numInstancesToKeep = Integer.min(numInstancesToKeep, instancesInfo.size()); for (int i = 0; i < numInstancesToKeep; i++) { Triple<String, String, Integer> instanceInfo = instancesInfo.get(i); nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle()); } } Set<String> availableInstances = new TreeSet<>(nextInstanceStateMap.keySet()); // After achieving the min available replicas, when low disk mode is enabled, only add new instances when all // current instances exist in the next assignment. // We want to first drop the extra instances as one step, then add the target instances as another step to avoid the // case where segments are first added to the instance before other segments are dropped from the instance, which // might cause server running out of disk. Note that even if segment addition and drop happen in the same step, // there is no guarantee that server process the segment drop before the segment addition. if (!lowDiskMode || currentInstanceStateMap.size() == nextInstanceStateMap.size()) { int numInstancesToAdd = targetInstanceStateMap.size() - nextInstanceStateMap.size(); if (numInstancesToAdd > 0) { // Sort instances by number of segments to offload, and add the ones with the least segments to offload List<Triple<String, String, Integer>> instancesInfo = getSortedInstancesOnNumSegmentsToOffload(targetInstanceStateMap, nextInstanceStateMap, numSegmentsToOffloadMap); for (int i = 0; i < numInstancesToAdd; i++) { Triple<String, String, Integer> instanceInfo = instancesInfo.get(i); nextInstanceStateMap.put(instanceInfo.getLeft(), instanceInfo.getMiddle()); } } } assignmentMap.put(assignmentKey, nextInstanceStateMap.keySet()); return new SingleSegmentAssignment(nextInstanceStateMap, availableInstances); }
@Test public void testOneMinAvailableReplicas() { // With 2 common instances, first assignment should be the same as target assignment Map<String, String> currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE); Map<String, String> targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host4"), ONLINE); TableRebalancer.SingleSegmentAssignment assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host1", "host2"))); // With 1 common instance, first assignment should be the same as target assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host4", "host5"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Without common instance, first assignment should have 1 common instances with current assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host4", "host5", "host6"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host4", "host5"), ONLINE)); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host4", "host5"))); // With increasing number of replicas, first assignment should have 1 common instances with current assignment targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host4", "host5", "host6", "host7"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host4", "host5", "host6"), ONLINE)); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host4", "host5", "host6"))); // With decreasing number of replicas, first assignment should have 1 common instances with current assignment currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3", "host4"), ONLINE); targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host5", "host6", "host7"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host5", "host6"), ONLINE)); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host5", "host6"))); // With increasing from 1 replica, first assignment should have 1 common instances with current assignment currentInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Collections.singletonList("host1"), ONLINE); targetInstanceStateMap = SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host2", "host3", "host4"), ONLINE); assignment = getNextSingleSegmentAssignment(currentInstanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, SegmentAssignmentUtils.getInstanceStateMap(Arrays.asList("host1", "host2", "host3"), ONLINE)); assertEquals(assignment._availableInstances, Collections.singleton("host1")); // Second assignment should be the same as target assignment assignment = getNextSingleSegmentAssignment(assignment._instanceStateMap, targetInstanceStateMap, 1, false); assertEquals(assignment._instanceStateMap, targetInstanceStateMap); assertEquals(assignment._availableInstances, new TreeSet<>(Arrays.asList("host2", "host3"))); }
@Override public BuiltInPreparedQuery prepareQuery(AnalyzerOptions analyzerOptions, String query, Map<String, String> preparedStatements, WarningCollector warningCollector) { Statement wrappedStatement = sqlParser.createStatement(query, createParsingOptions(analyzerOptions)); if (warningCollector.hasWarnings() && analyzerOptions.getWarningHandlingLevel() == AS_ERROR) { throw new PrestoException(WARNING_AS_ERROR, format("Warning handling level set to AS_ERROR. Warnings: %n %s", warningCollector.getWarnings().stream() .map(PrestoWarning::getMessage) .collect(joining(System.lineSeparator())))); } return prepareQuery(analyzerOptions, wrappedStatement, preparedStatements); }
@Test public void testSelectStatement() { BuiltInPreparedQuery preparedQuery = QUERY_PREPARER.prepareQuery(testAnalyzerOptions, "SELECT * FROM foo", emptyPreparedStatements, WarningCollector.NOOP); assertEquals(preparedQuery.getStatement(), simpleQuery(selectList(new AllColumns()), table(QualifiedName.of("foo")))); }
@Override public Optional<ConfigItem> resolve(final String propertyName, final boolean strict) { if (propertyName.startsWith(KSQL_REQUEST_CONFIG_PROPERTY_PREFIX)) { return resolveRequestConfig(propertyName); } else if (propertyName.startsWith(KSQL_CONFIG_PROPERTY_PREFIX) && !propertyName.startsWith(KSQL_STREAMS_PREFIX)) { return resolveKsqlConfig(propertyName); } return resolveStreamsConfig(propertyName, strict); }
@Test public void shouldReturnUnresolvedForOtherKsqlFunctionProperty() { assertThat( resolver.resolve(KsqlConfig.KSQL_FUNCTIONS_PROPERTY_PREFIX + "some_udf.some.prop", true), is(unresolvedItem(KsqlConfig.KSQL_FUNCTIONS_PROPERTY_PREFIX + "some_udf.some.prop"))); }
@VisibleForTesting Path getWarArtifact() { Build build = project.getBuild(); String warName = build.getFinalName(); Plugin warPlugin = project.getPlugin("org.apache.maven.plugins:maven-war-plugin"); if (warPlugin != null) { for (PluginExecution execution : warPlugin.getExecutions()) { if ("default-war".equals(execution.getId())) { Xpp3Dom configuration = (Xpp3Dom) execution.getConfiguration(); warName = getChildValue(configuration, "warName").orElse(warName); } } } return Paths.get(build.getDirectory(), warName + ".war"); }
@Test public void testGetWarArtifact_warNameProperty() { when(mockBuild.getDirectory()).thenReturn(Paths.get("/foo/bar").toString()); when(mockBuild.getFinalName()).thenReturn("helloworld-1"); when(mockMavenProject.getPlugin("org.apache.maven.plugins:maven-war-plugin")) .thenReturn(mockPlugin); when(mockPlugin.getExecutions()).thenReturn(Arrays.asList(mockPluginExecution)); when(mockPluginExecution.getId()).thenReturn("default-war"); when(mockPluginExecution.getConfiguration()).thenReturn(pluginConfiguration); addXpp3DomChild(pluginConfiguration, "warName", "baz"); assertThat(mavenProjectProperties.getWarArtifact()).isEqualTo(Paths.get("/foo/bar/baz.war")); }
public CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> commitTransactionalOffset( RequestContext context, TxnOffsetCommitRequestData request ) throws ApiException { validateTransactionalOffsetCommit(context, request); final TxnOffsetCommitResponseData response = new TxnOffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); request.topics().forEach(topic -> { final TxnOffsetCommitResponseTopic topicResponse = new TxnOffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new TxnOffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing transactional offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new TxnOffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupTransactionalOffsetCommit() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create a group. ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); // Add member. ClassicGroupMember member = mkGenericMember("member", Optional.empty()); group.add(member); // Transition to next generation. group.transitionTo(ClassicGroupState.PREPARING_REBALANCE); group.initNextGeneration(); assertEquals(1, group.generationId()); group.transitionTo(ClassicGroupState.STABLE); CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> result = context.commitTransactionalOffset( new TxnOffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGenerationId(1) .setTopics(Collections.singletonList( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) .setCommittedLeaderEpoch(10) .setCommittedMetadata("metadata") )) )) ); assertEquals( new TxnOffsetCommitResponseData() .setTopics(Collections.singletonList( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("bar") .setPartitions(Collections.singletonList( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) )) )), result.response() ); assertEquals( Collections.singletonList(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( "foo", "bar", 0, new OffsetAndMetadata( 100L, OptionalInt.of(10), "metadata", context.time.milliseconds(), OptionalLong.empty() ), MetadataImage.EMPTY.features().metadataVersion() )), result.records() ); }
@Override public void update() { if (patrollingLeft) { position -= 1; if (position == PATROLLING_LEFT_BOUNDING) { patrollingLeft = false; } } else { position += 1; if (position == PATROLLING_RIGHT_BOUNDING) { patrollingLeft = true; } } logger.info("Skeleton {} is on position {}.", id, position); }
@Test void testUpdateForPatrollingLeft() { skeleton.patrollingLeft = true; skeleton.setPosition(50); skeleton.update(); assertEquals(49, skeleton.getPosition()); }
public FEELFnResult<Boolean> invoke(@ParameterName( "point1" ) Comparable point1, @ParameterName( "point2" ) Comparable point2) { if ( point1 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be null")); } if ( point2 == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point2", "cannot be null")); } try { boolean result = point1.compareTo( point2 ) == 0; return FEELFnResult.ofResult( result ); } catch( Exception e ) { // points are not comparable return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "point1", "cannot be compared to point2")); } }
@Test void invokeParamIsNull() { FunctionTestUtil.assertResultError(coincidesFunction.invoke(null, "b"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(coincidesFunction.invoke("a", null), InvalidParametersEvent.class); }
public ExitStatus(Options options) { this.options = options; }
@Test void with_pending_scenarios() { createRuntime(); bus.send(testCaseFinishedWithStatus(Status.PENDING)); assertThat(exitStatus.exitStatus(), is(equalTo((byte) 0x1))); }
@Override public void onError(final Throwable t) { LOG.error("Messages write occur errors", t); }
@Test public void onError() { Throwable throwable = new Throwable(); messageWriter.onError(throwable); }