focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
protected static boolean isSingleQuoted(String input) { if (input == null || input.isBlank()) { return false; } return input.matches("(^" + QUOTE_CHAR + "{1}([^" + QUOTE_CHAR + "]+)" + QUOTE_CHAR + "{1})"); }
@Test public void testSingleQuotedNegative() { assertFalse(isSingleQuoted("arg 0")); }
public static GlobalConfig readConfig() throws IOException, InvalidGlobalConfigException { return readConfig(getConfigDir()); }
@Test public void testReadConfig_missingRegistry() throws IOException { String json = "{\"registryMirrors\":[{\"mirrors\":[\"mirror.gcr.io\"]}]}"; Files.write(configDir.resolve("config.json"), json.getBytes(StandardCharsets.UTF_8)); InvalidGlobalConfigException exception = assertThrows(InvalidGlobalConfigException.class, () -> GlobalConfig.readConfig(configDir)); assertThat(exception) .hasMessageThat() .startsWith( "'registryMirrors.registry' property is missing; see " + "https://github.com/GoogleContainerTools/jib/blob/master/docs/faq.md#where-is-the-global-jib-configuration-file-and-how-i-can-configure-it " + "to fix or you may need to delete "); }
public void extractTablesFromInsert(final InsertStatement insertStatement) { if (null != insertStatement.getTable()) { extractTablesFromTableSegment(insertStatement.getTable()); } if (!insertStatement.getColumns().isEmpty()) { for (ColumnSegment each : insertStatement.getColumns()) { extractTablesFromExpression(each); } } insertStatement.getOnDuplicateKeyColumns().ifPresent(optional -> extractTablesFromAssignmentItems(optional.getColumns())); if (insertStatement.getInsertSelect().isPresent()) { extractTablesFromSelect(insertStatement.getInsertSelect().get().getSelect()); } }
@Test void assertExtractTablesFromInsert() { InsertStatement insertStatement = mock(InsertStatement.class); when(insertStatement.getTable()).thenReturn(new SimpleTableSegment(new TableNameSegment(122, 128, new IdentifierValue("t_order")))); Collection<ColumnAssignmentSegment> assignmentSegments = new LinkedList<>(); ColumnSegment columnSegment = new ColumnSegment(133, 136, new IdentifierValue("id")); columnSegment.setOwner(new OwnerSegment(130, 132, new IdentifierValue("t_order"))); assignmentSegments.add(new ColumnAssignmentSegment(130, 140, Collections.singletonList(columnSegment), new LiteralExpressionSegment(141, 142, 1))); when(insertStatement.getOnDuplicateKeyColumns()).thenReturn(Optional.of(new OnDuplicateKeyColumnsSegment(130, 140, assignmentSegments))); tableExtractor.extractTablesFromInsert(insertStatement); assertThat(tableExtractor.getRewriteTables().size(), is(2)); Iterator<SimpleTableSegment> tableSegmentIterator = tableExtractor.getRewriteTables().iterator(); assertTableSegment(tableSegmentIterator.next(), 122, 128, "t_order"); assertTableSegment(tableSegmentIterator.next(), 130, 132, "t_order"); }
public Future<Void> migrateFromDeploymentToStrimziPodSets(Deployment deployment, StrimziPodSet podSet) { if (deployment == null) { // Deployment does not exist anymore => no migration needed return Future.succeededFuture(); } else { int depReplicas = deployment.getSpec().getReplicas(); int podSetReplicas = podSet != null ? podSet.getSpec().getPods().size() : 0; return moveOnePodFromDeploymentToStrimziPodSet(depReplicas - 1, Math.min(podSetReplicas + 1, connect.getReplicas())); } }
@Test public void testMigrationToPodSets(VertxTestContext context) { DeploymentOperator mockDepOps = mock(DeploymentOperator.class); StrimziPodSetOperator mockPodSetOps = mock(StrimziPodSetOperator.class); PodOperator mockPodOps = mock(PodOperator.class); LinkedList<String> events = mockKubernetes(mockDepOps, mockPodSetOps, mockPodOps); KafkaConnectMigration migration = new KafkaConnectMigration( RECONCILIATION, CLUSTER, null, null, 1_000L, false, null, null, null, mockDepOps, mockPodSetOps, mockPodOps ); Checkpoint async = context.checkpoint(); migration.migrateFromDeploymentToStrimziPodSets( DEPLOYMENT, null ).onComplete(context.succeeding(v -> context.verify(() -> { assertThat(events.size(), is(11)); assertThat(events.poll(), is("POD-SET-RECONCILE-TO-1")); assertThat(events.poll(), is("POD-READINESS-my-connect-connect-0")); assertThat(events.poll(), is("DEP-SCALE-DOWN-TO-2")); assertThat(events.poll(), is("DEP-READINESS-" + COMPONENT_NAME)); assertThat(events.poll(), is("POD-SET-RECONCILE-TO-2")); assertThat(events.poll(), is("POD-READINESS-my-connect-connect-1")); assertThat(events.poll(), is("DEP-SCALE-DOWN-TO-1")); assertThat(events.poll(), is("DEP-READINESS-" + COMPONENT_NAME)); assertThat(events.poll(), is("POD-SET-RECONCILE-TO-3")); assertThat(events.poll(), is("POD-READINESS-my-connect-connect-2")); assertThat(events.poll(), is("DEP-DELETE-" + COMPONENT_NAME)); async.flag(); }))); }
@Override public void onProjectsRekeyed(Set<RekeyedProject> rekeyedProjects) { checkNotNull(rekeyedProjects, "rekeyedProjects can't be null"); if (rekeyedProjects.isEmpty()) { return; } Arrays.stream(listeners) .forEach(safelyCallListener(listener -> listener.onProjectsRekeyed(rekeyedProjects))); }
@Test @UseDataProvider("oneOrManyRekeyedProjects") public void onProjectsRekeyed_calls_all_listeners_in_order_of_addition_to_constructor(Set<RekeyedProject> projects) { InOrder inOrder = Mockito.inOrder(listener1, listener2, listener3); underTestWithListeners.onProjectsRekeyed(projects); inOrder.verify(listener1).onProjectsRekeyed(same(projects)); inOrder.verify(listener2).onProjectsRekeyed(same(projects)); inOrder.verify(listener3).onProjectsRekeyed(same(projects)); inOrder.verifyNoMoreInteractions(); }
@Override public void publish(ScannerReportWriter writer) { Optional<String> targetBranch = getTargetBranch(); if (targetBranch.isPresent()) { Profiler profiler = Profiler.create(LOG).startInfo(LOG_MSG); int count = writeChangedLines(scmConfiguration.provider(), writer, targetBranch.get()); LOG.debug("SCM reported changed lines for {} {} in the branch", count, ScannerUtils.pluralize("file", count)); profiler.stopInfo(); } }
@Test public void write_changed_file_with_GitScmProvider() { GitScmProvider provider = mock(GitScmProvider.class); when(scmConfiguration.provider()).thenReturn(provider); Set<Integer> lines = new HashSet<>(Arrays.asList(1, 10)); when(provider.branchChangedLines(eq(TARGET_BRANCH), eq(BASE_DIR), anySet())) .thenReturn(ImmutableMap.of(BASE_DIR.resolve("path1"), lines, BASE_DIR.resolve("path3"), Collections.emptySet())); publisher.publish(writer); verify(provider).branchChangedLinesWithFileMovementDetection(eq(TARGET_BRANCH), eq(BASE_DIR), anyMap()); }
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers, final int maxRecords, final boolean checkCrcs) { // Creating an empty ShareInFlightBatch ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition); if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(cachedBatchException); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(cachedRecordException); cachedRecordException = null; return inFlightBatch; } if (isConsumed) return inFlightBatch; initializeNextAcquired(); try { int recordsInBatch = 0; while (recordsInBatch < maxRecords) { lastRecord = nextFetchedRecord(checkCrcs); if (lastRecord == null) { // Any remaining acquired records are gaps while (nextAcquired != null) { inFlightBatch.addGap(nextAcquired.offset); nextAcquired = nextAcquiredRecord(); } break; } while (nextAcquired != null) { if (lastRecord.offset() == nextAcquired.offset) { // It's acquired, so we parse it and add it to the batch Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord, nextAcquired.deliveryCount); inFlightBatch.addRecord(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); recordsInBatch++; nextAcquired = nextAcquiredRecord(); break; } else if (lastRecord.offset() < nextAcquired.offset) { // It's not acquired, so we skip it break; } else { // It's acquired, but there's no non-control record at this offset, so it's a gap inFlightBatch.addGap(nextAcquired.offset); } nextAcquired = nextAcquiredRecord(); } } } catch (SerializationException se) { nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(se); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); } } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(e); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); } } return inFlightBatch; }
@Test public void testUnaligned() { long firstMessageId = 5; long startingOffset = 10L; int numRecords = 10; ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() .setRecords(newRecords(startingOffset, numRecords + 500, firstMessageId)) .setAcquiredRecords(acquiredRecords(startingOffset + 500, numRecords)); Deserializers<String, String> deserializers = newStringDeserializers(); ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData); ShareInFlightBatch<String, String> batch = completedFetch.fetchRecords(deserializers, 10, true); List<ConsumerRecord<String, String>> records = batch.getInFlightRecords(); assertEquals(10, records.size()); ConsumerRecord<String, String> record = records.get(0); assertEquals(510L, record.offset()); assertEquals(Optional.of((short) 1), record.deliveryCount()); Acknowledgements acknowledgements = batch.getAcknowledgements(); assertEquals(0, acknowledgements.size()); batch = completedFetch.fetchRecords(deserializers, 10, true); records = batch.getInFlightRecords(); assertEquals(0, records.size()); acknowledgements = batch.getAcknowledgements(); assertEquals(0, acknowledgements.size()); }
public double bearingTo(final IGeoPoint other) { final double lat1 = Math.toRadians(this.mLatitude); final double long1 = Math.toRadians(this.mLongitude); final double lat2 = Math.toRadians(other.getLatitude()); final double long2 = Math.toRadians(other.getLongitude()); final double delta_long = long2 - long1; final double a = Math.sin(delta_long) * Math.cos(lat2); final double b = Math.cos(lat1) * Math.sin(lat2) - Math.sin(lat1) * Math.cos(lat2) * Math.cos(delta_long); final double bearing = Math.toDegrees(Math.atan2(a, b)); final double bearing_normalized = (bearing + 360) % 360; return bearing_normalized; }
@Test public void test_bearingTo_west() { final GeoPoint target = new GeoPoint(0.0, 0.0); final GeoPoint other = new GeoPoint(0.0, -10.0); assertEquals("directly west", 270, Math.round(target.bearingTo(other))); }
public synchronized Stream updateStreamState(String streamId, Stream.State state) { LOG.info("Updating {}'s state to {} in project {}.", streamId, state.name(), projectId); try { Stream.Builder streamBuilder = Stream.newBuilder() .setName(StreamName.format(projectId, location, streamId)) .setState(state); FieldMask.Builder fieldMaskBuilder = FieldMask.newBuilder().addPaths(FIELD_STATE); UpdateStreamRequest request = UpdateStreamRequest.newBuilder() .setStream(streamBuilder) .setUpdateMask(fieldMaskBuilder) .build(); Stream reference = datastreamClient.updateStreamAsync(request).get(); LOG.info( "Successfully updated {}'s state to {} in project {}.", streamId, state.name(), projectId); return reference; } catch (InterruptedException | ExecutionException e) { throw new DatastreamResourceManagerException("Failed to update stream. ", e); } }
@Test public void testUpdateStreamStateInterruptedExceptionShouldFail() throws ExecutionException, InterruptedException { when(datastreamClient.updateStreamAsync(any(UpdateStreamRequest.class)).get()) .thenThrow(InterruptedException.class); DatastreamResourceManagerException exception = assertThrows( DatastreamResourceManagerException.class, () -> testManager.updateStreamState(STREAM_ID, State.RUNNING)); assertThat(exception).hasMessageThat().contains("Failed to update stream."); }
@Override public boolean dataDefinitionIgnoredInTransactions() { return false; }
@Test void assertDataDefinitionIgnoredInTransactions() { assertFalse(metaData.dataDefinitionIgnoredInTransactions()); }
public static InetSocketAddress parseAddress(String address, int defaultPort) { return parseAddress(address, defaultPort, false); }
@Test void shouldParseAddressForIPv6WithoutPort() { InetSocketAddress socketAddress = AddressUtils.parseAddress("[1abc:2abc:3abc::5ABC:6abc]:", 80); assertThat(socketAddress.isUnresolved()).isFalse(); assertThat(socketAddress.getAddress().getHostAddress()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc"); assertThat(socketAddress.getPort()).isEqualTo(80); assertThat(socketAddress.getHostString()).isEqualTo("1abc:2abc:3abc:0:0:0:5abc:6abc"); }
protected boolean isVfsPath( String filePath ) { boolean ret = false; try { VFSFileProvider vfsFileProvider = (VFSFileProvider) providerService.get( VFSFileProvider.TYPE ); if ( vfsFileProvider == null ) { return false; } return vfsFileProvider.isSupported( filePath ); } catch ( InvalidFileProviderException | ClassCastException e ) { // DO NOTHING } return ret; }
@Test public void testIsVfsPath() throws Exception { // SETUP String vfsPath = "pvfs://someConnection/someFilePath"; ProviderService mockProviderService = mock( ProviderService.class ); VFSFileProvider mockVFSFileProvider = mock( VFSFileProvider.class ); when( mockProviderService.get( VFSFileProvider.TYPE ) ).thenReturn( mockVFSFileProvider ); when( mockVFSFileProvider.isSupported( any() ) ).thenReturn( true ); FileOpenSaveExtensionPoint testInstance = new FileOpenSaveExtensionPoint( mockProviderService, null ); assertTrue( testInstance.isVfsPath( vfsPath ) ); }
@Override public Sink.SinkWriter<WindowedValue<PubsubMessage>> writer() { return new PubsubDynamicSink.PubsubWriter(); }
@Test public void testWriteDynamicDestinations() throws Exception { Windmill.WorkItemCommitRequest.Builder outputBuilder = Windmill.WorkItemCommitRequest.newBuilder() .setKey(ByteString.copyFromUtf8("key")) .setWorkToken(0); when(mockContext.getOutputBuilder()).thenReturn(outputBuilder); Map<String, Object> spec = new HashMap<>(); spec.put(PropertyNames.OBJECT_TYPE_NAME, "PubsubDynamicSink"); spec.put(PropertyNames.PUBSUB_TIMESTAMP_ATTRIBUTE, "ts"); spec.put(PropertyNames.PUBSUB_ID_ATTRIBUTE, "id"); CloudObject cloudSinkSpec = CloudObject.fromSpec(spec); PubsubDynamicSink sink = (PubsubDynamicSink) SinkRegistry.defaultRegistry() .create( cloudSinkSpec, WindowedValue.getFullCoder(VoidCoder.of(), IntervalWindow.getCoder()), null, mockContext, null) .getUnderlyingSink(); Sink.SinkWriter<WindowedValue<PubsubMessage>> writer = sink.writer(); List<Windmill.Message> expectedMessages1 = Lists.newArrayList(); List<Windmill.Message> expectedMessages2 = Lists.newArrayList(); List<Windmill.Message> expectedMessages3 = Lists.newArrayList(); for (int i = 0; i < 10; ++i) { int baseTimestamp = i * 10; byte[] payload1 = String.format("value_%d_%d", i, 1).getBytes(StandardCharsets.UTF_8); byte[] payload2 = String.format("value_%d_%d", i, 2).getBytes(StandardCharsets.UTF_8); byte[] payload3 = String.format("value_%d_%d", i, 3).getBytes(StandardCharsets.UTF_8); expectedMessages1.add( Windmill.Message.newBuilder() .setTimestamp(baseTimestamp * 1000) .setData( Pubsub.PubsubMessage.newBuilder() .setData(ByteString.copyFrom(payload1)) .build() .toByteString()) .build()); expectedMessages2.add( Windmill.Message.newBuilder() .setTimestamp((baseTimestamp + 1) * 1000) .setData( Pubsub.PubsubMessage.newBuilder() .setData(ByteString.copyFrom(payload2)) .build() .toByteString()) .build()); expectedMessages3.add( Windmill.Message.newBuilder() .setTimestamp((baseTimestamp + 2) * 1000) .setData( Pubsub.PubsubMessage.newBuilder() .setData(ByteString.copyFrom(payload3)) .build() .toByteString()) .build()); writer.add( WindowedValue.timestampedValueInGlobalWindow( new PubsubMessage(payload1, null).withTopic("topic1"), new Instant(baseTimestamp))); writer.add( WindowedValue.timestampedValueInGlobalWindow( new PubsubMessage(payload2, null).withTopic("topic2"), new Instant(baseTimestamp + 1))); writer.add( WindowedValue.timestampedValueInGlobalWindow( new PubsubMessage(payload3, null).withTopic("topic3"), new Instant(baseTimestamp + 2))); } writer.close(); Windmill.WorkItemCommitRequest expectedCommit = Windmill.WorkItemCommitRequest.newBuilder() .setKey(ByteString.copyFromUtf8("key")) .setWorkToken(0) .addPubsubMessages( Windmill.PubSubMessageBundle.newBuilder() .setTopic("topic1") .setTimestampLabel("ts") .setIdLabel("id") .setWithAttributes(true) .addAllMessages(expectedMessages1)) .addPubsubMessages( Windmill.PubSubMessageBundle.newBuilder() .setTopic("topic2") .setTimestampLabel("ts") .setIdLabel("id") .setWithAttributes(true) .addAllMessages(expectedMessages2)) .addPubsubMessages( Windmill.PubSubMessageBundle.newBuilder() .setTopic("topic3") .setTimestampLabel("ts") .setIdLabel("id") .setWithAttributes(true) .addAllMessages(expectedMessages3)) .build(); assertEquals(expectedCommit, outputBuilder.build()); }
@GET @Path("/callback") @Consumes(MediaType.APPLICATION_FORM_URLENCODED) public Response callback( @CookieParam("session_id") String sessionId, @QueryParam("code") String code) { var redirect = authService.callback(new CallbackRequest(sessionId, code)); return Response.seeOther(redirect).build(); }
@Test void callback_success() { var callbackRedirect = URI.create("https://app.example.com/success"); var authService = mock(AuthService.class); when(authService.callback(any())).thenReturn(callbackRedirect); var sut = new AuthEndpoint(authService); // when try (var res = sut.callback(null, null)) { // then assertEquals(Status.SEE_OTHER.getStatusCode(), res.getStatus()); var redirect = res.getLocation().toString(); assertEquals(callbackRedirect.toString(), redirect); } }
public List<PushConnection> getAll() { return new ArrayList<>(clientPushConnectionMap.values()); }
@Test void testGetAll() { pushConnectionRegistry.put("clientId1", pushConnection); pushConnectionRegistry.put("clientId2", pushConnection); List<PushConnection> connections = pushConnectionRegistry.getAll(); assertEquals(2, connections.size()); }
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) { if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) { throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\""); } LoggerContext rootContext = getRootContext(); logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value)); logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value)); Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey()); boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE; logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled)); return rootContext; }
@Test public void apply_sets_domain_property_over_global_property_if_both_set() { LogLevelConfig config = newLogLevelConfig().levelByDomain("foo", WEB_SERVER, LogDomain.ES).build(); props.set("sonar.log.level", "DEBUG"); props.set("sonar.log.level.web.es", "TRACE"); LoggerContext context = underTest.apply(config, props); assertThat(context.getLogger("foo").getLevel()).isEqualTo(Level.TRACE); }
public static Event[] fromJson(final String json) throws IOException { return fromJson(json, BasicEventFactory.INSTANCE); }
@Test(expected=ClassCastException.class) public void testFromJsonWithInvalidJsonArray2() throws Exception { Event.fromJson("[\"gabeutch\"]"); }
@Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { return createInputStream(() -> originalFs.open(f, bufferSize)); }
@Test void testSlowInputStreamNotClosed() throws Exception { final File file = File.createTempFile("junit", null, tempFolder); createRandomContents(file, new Random(), 50); final LimitedConnectionsFileSystem fs = new LimitedConnectionsFileSystem(LocalFileSystem.getSharedInstance(), 1, 0L, 1000L); // some competing threads final WriterThread[] threads = new WriterThread[10]; for (int i = 0; i < threads.length; i++) { Path path = new Path(File.createTempFile("junit", null, tempFolder).toURI()); threads[i] = new WriterThread(fs, path, 1, Integer.MAX_VALUE); } // open the stream we test try (FSDataInputStream in = fs.open(new Path(file.toURI()))) { // start the other threads that will try to shoot this stream down for (WriterThread t : threads) { t.start(); } // read the stream slowly. Thread.sleep(5); while (in.read() != -1) { Thread.sleep(5); } } // wait for clean shutdown for (WriterThread t : threads) { t.sync(); } }
@Override public void updateProject(GoViewProjectUpdateReqVO updateReqVO) { // 校验存在 validateProjectExists(updateReqVO.getId()); // 更新 GoViewProjectDO updateObj = GoViewProjectConvert.INSTANCE.convert(updateReqVO); goViewProjectMapper.updateById(updateObj); }
@Test public void testUpdateProject_success() { // mock 数据 GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class); goViewProjectMapper.insert(dbGoViewProject);// @Sql: 先插入出一条存在的数据 // 准备参数 GoViewProjectUpdateReqVO reqVO = randomPojo(GoViewProjectUpdateReqVO.class, o -> { o.setId(dbGoViewProject.getId()); // 设置更新的 ID o.setStatus(randomCommonStatus()); }); // 调用 goViewProjectService.updateProject(reqVO); // 校验是否更新正确 GoViewProjectDO goViewProject = goViewProjectMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, goViewProject); }
public <T extends AwsSyncClientBuilder> void applyHttpClientConfigurations(T builder) { if (Strings.isNullOrEmpty(httpClientType)) { httpClientType = CLIENT_TYPE_DEFAULT; } switch (httpClientType) { case CLIENT_TYPE_URLCONNECTION: UrlConnectionHttpClientConfigurations urlConnectionHttpClientConfigurations = loadHttpClientConfigurations(UrlConnectionHttpClientConfigurations.class.getName()); urlConnectionHttpClientConfigurations.configureHttpClientBuilder(builder); break; case CLIENT_TYPE_APACHE: ApacheHttpClientConfigurations apacheHttpClientConfigurations = loadHttpClientConfigurations(ApacheHttpClientConfigurations.class.getName()); apacheHttpClientConfigurations.configureHttpClientBuilder(builder); break; default: throw new IllegalArgumentException("Unrecognized HTTP client type " + httpClientType); } }
@Test public void testApacheHttpClientConfiguration() { Map<String, String> properties = Maps.newHashMap(); properties.put(HttpClientProperties.CLIENT_TYPE, "apache"); HttpClientProperties httpClientProperties = new HttpClientProperties(properties); S3ClientBuilder mockS3ClientBuilder = Mockito.mock(S3ClientBuilder.class); ArgumentCaptor<SdkHttpClient.Builder> httpClientBuilderCaptor = ArgumentCaptor.forClass(SdkHttpClient.Builder.class); httpClientProperties.applyHttpClientConfigurations(mockS3ClientBuilder); Mockito.verify(mockS3ClientBuilder).httpClientBuilder(httpClientBuilderCaptor.capture()); SdkHttpClient.Builder capturedHttpClientBuilder = httpClientBuilderCaptor.getValue(); assertThat(capturedHttpClientBuilder) .as("Should use apache http client") .isInstanceOf(ApacheHttpClient.Builder.class); }
@Override public boolean remove(Object value) { lock.lock(); try { checkComparator(); BinarySearchResult<V> res = binarySearch((V) value); if (res.getIndex() < 0) { return false; } remove((int) res.getIndex()); return true; } finally { lock.unlock(); } }
@Test public void testRemove() { RPriorityQueue<Integer> set = redisson.getPriorityQueue("set"); set.add(5); set.add(3); set.add(1); set.add(2); set.add(4); set.add(1); Assertions.assertFalse(set.remove(0)); Assertions.assertTrue(set.remove(3)); Assertions.assertTrue(set.remove(1)); assertThat(set).containsExactly(1, 2, 4, 5); }
public static TextSingleFormField.Builder textSingleBuilder(String fieldName) { return new TextSingleFormField.Builder(fieldName, Type.text_single); }
@Test public void testThrowExceptionWhenNullLabel() { TextSingleFormField.Builder builder = FormField.textSingleBuilder("type"); assertThrows(IllegalArgumentException.class, () -> builder.setLabel(null)); }
@Nullable static String errorCode(Throwable error) { if (error instanceof RpcException) { return ERROR_CODE_NUMBER_TO_NAME.get(((RpcException) error).getCode()); } return null; }
@Test void errorCodes() { assertThat(DubboParser.errorCode(null)) .isEqualTo(DubboParser.errorCode(new IOException("timeout"))) .isNull(); assertThat(DubboParser.errorCode(new RpcException(0))) .isEqualTo("UNKNOWN_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(1))) .isEqualTo("NETWORK_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(2))) .isEqualTo("TIMEOUT_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(3))) .isEqualTo("BIZ_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(4))) .isEqualTo("FORBIDDEN_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(5))) .isEqualTo("SERIALIZATION_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(6))) .isEqualTo("NO_INVOKER_AVAILABLE_AFTER_FILTER"); assertThat(DubboParser.errorCode(new RpcException(7))) .isEqualTo("LIMIT_EXCEEDED_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(8))) .isEqualTo("TIMEOUT_TERMINATE"); assertThat(DubboParser.errorCode(new RpcException(9))) .isEqualTo("REGISTRY_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(10))) .isEqualTo("ROUTER_CACHE_NOT_BUILD"); assertThat(DubboParser.errorCode(new RpcException(11))) .isEqualTo("METHOD_NOT_FOUND"); assertThat(DubboParser.errorCode(new RpcException(12))) .isEqualTo("VALIDATION_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(13))) .isEqualTo("AUTHORIZATION_EXCEPTION"); assertThat(DubboParser.errorCode(new RpcException(14))) .isNull();// This test will drift with a new error code name if Dubbo adds one }
protected Diff() {}
@Test(timeout=60000) public void testDiff() throws Exception { for(int startSize = 0; startSize <= 10000; startSize = nextStep(startSize)) { for(int m = 0; m <= 10000; m = nextStep(m)) { runDiffTest(startSize, m); } } }
@VisibleForTesting public int getListReservationsFailedRetrieved() { return numListReservationsFailedRetrieved.value(); }
@Test public void testListReservationsFailed() { long totalBadBefore = metrics.getListReservationsFailedRetrieved(); badSubCluster.getListReservations(); Assert.assertEquals(totalBadBefore + 1, metrics.getListReservationsFailedRetrieved()); }
public int[] predict(int[] o) { int N = a.nrow(); // The probability of the most probable path. double[][] trellis = new double[o.length][N]; // Backtrace. int[][] psy = new int[o.length][N]; // The most likely state sequence. int[] s = new int[o.length]; // forward for (int i = 0; i < N; i++) { trellis[0][i] = MathEx.log(pi[i]) + MathEx.log(b.get(i, o[0])); psy[0][i] = 0; } for (int t = 1; t < o.length; t++) { for (int j = 0; j < N; j++) { double maxDelta = Double.NEGATIVE_INFINITY; int maxPsy = 0; for (int i = 0; i < N; i++) { double delta = trellis[t - 1][i] + MathEx.log(a.get(i, j)); if (maxDelta < delta) { maxDelta = delta; maxPsy = i; } } trellis[t][j] = maxDelta + MathEx.log(b.get(j, o[t])); psy[t][j] = maxPsy; } } // trace back int n = o.length - 1; double maxDelta = Double.NEGATIVE_INFINITY; for (int i = 0; i < N; i++) { if (maxDelta < trellis[n][i]) { maxDelta = trellis[n][i]; s[n] = i; } } for (int t = n; t-- > 0;) { s[t] = psy[t + 1][s[t + 1]]; } return s; }
@Test public void testPredict() { System.out.println("predict"); HMM hmm = new HMM(pi, Matrix.of(a), Matrix.of(b)); int[] o = {0, 0, 1, 1, 0, 1, 1, 0}; int[] s = {0, 0, 0, 0, 0, 0, 0, 0}; int[] result = hmm.predict(o); assertEquals(o.length, result.length); for (int i = 0; i < s.length; i++) { assertEquals(s[i], result[i]); } }
@Override @CheckForNull public EmailMessage format(Notification notification) { if (!"alerts".equals(notification.getType())) { return null; } // Retrieve useful values String projectId = notification.getFieldValue("projectId"); String projectKey = notification.getFieldValue("projectKey"); String projectName = notification.getFieldValue("projectName"); String projectVersion = notification.getFieldValue("projectVersion"); String branchName = notification.getFieldValue("branch"); String alertName = notification.getFieldValue("alertName"); String alertText = notification.getFieldValue("alertText"); String alertLevel = notification.getFieldValue("alertLevel"); String ratingMetricsInOneString = notification.getFieldValue("ratingMetrics"); boolean isNewAlert = Boolean.parseBoolean(notification.getFieldValue("isNewAlert")); String fullProjectName = computeFullProjectName(projectName, branchName); // Generate text String subject = generateSubject(fullProjectName, alertLevel, isNewAlert); String messageBody = generateMessageBody(projectName, projectKey, projectVersion, branchName, alertName, alertText, isNewAlert, ratingMetricsInOneString); // And finally return the email that will be sent return new EmailMessage() .setMessageId("alerts/" + projectId) .setSubject(subject) .setPlainTextMessage(messageBody); }
@Test public void shouldFormatNewAlertWithOneMessageOnBranch() { Notification notification = createNotification("Failed", "violations > 4", "ERROR", "true") .setFieldValue("branch", "feature"); EmailMessage message = template.format(notification); assertThat(message.getMessageId(), is("alerts/45")); assertThat(message.getSubject(), is("New quality gate threshold reached on \"Foo (feature)\"")); assertThat(message.getMessage(), is("" + "Project: Foo\n" + "Branch: feature\n" + "Version: V1-SNAP\n" + "Quality gate status: Failed\n" + "\n" + "New quality gate threshold: violations > 4\n" + "\n" + "More details at: http://nemo.sonarsource.org/dashboard?id=org.sonar.foo:foo&branch=feature")); }
static ArgumentParser argParser() { ArgumentParser parser = ArgumentParsers .newArgumentParser("producer-performance") .defaultHelp(true) .description("This tool is used to verify the producer performance. To enable transactions, " + "you can specify a transaction id or set a transaction duration using --transaction-duration-ms. " + "There are three ways to specify the transaction id: set transaction.id=<id> via --producer-props, " + "set transaction.id=<id> in the config file via --producer.config, or use --transaction-id <id>."); MutuallyExclusiveGroup payloadOptions = parser .addMutuallyExclusiveGroup() .required(true) .description("either --record-size or --payload-file must be specified but not both."); parser.addArgument("--topic") .action(store()) .required(true) .type(String.class) .metavar("TOPIC") .help("produce messages to this topic"); parser.addArgument("--num-records") .action(store()) .required(true) .type(Long.class) .metavar("NUM-RECORDS") .dest("numRecords") .help("number of messages to produce"); payloadOptions.addArgument("--record-size") .action(store()) .required(false) .type(Integer.class) .metavar("RECORD-SIZE") .dest("recordSize") .help("message size in bytes. Note that you must provide exactly one of --record-size or --payload-file " + "or --payload-monotonic."); payloadOptions.addArgument("--payload-file") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-FILE") .dest("payloadFile") .help("file to read the message payloads from. This works only for UTF-8 encoded text files. " + "Payloads will be read from this file and a payload will be randomly selected when sending messages. " + "Note that you must provide exactly one of --record-size or --payload-file or --payload-monotonic."); payloadOptions.addArgument("--payload-monotonic") .action(storeTrue()) .type(Boolean.class) .metavar("PAYLOAD-MONOTONIC") .dest("payloadMonotonic") .help("payload is monotonically increasing integer. Note that you must provide exactly one of --record-size " + "or --payload-file or --payload-monotonic."); parser.addArgument("--payload-delimiter") .action(store()) .required(false) .type(String.class) .metavar("PAYLOAD-DELIMITER") .dest("payloadDelimiter") .setDefault("\\n") .help("provides delimiter to be used when --payload-file is provided. " + "Defaults to new line. " + "Note that this parameter will be ignored if --payload-file is not provided."); parser.addArgument("--throughput") .action(store()) .required(true) .type(Double.class) .metavar("THROUGHPUT") .help("throttle maximum message throughput to *approximately* THROUGHPUT messages/sec. Set this to -1 to disable throttling."); parser.addArgument("--producer-props") .nargs("+") .required(false) .metavar("PROP-NAME=PROP-VALUE") .type(String.class) .dest("producerConfig") .help("kafka producer related configuration properties like bootstrap.servers,client.id etc. " + "These configs take precedence over those passed via --producer.config."); parser.addArgument("--producer.config") .action(store()) .required(false) .type(String.class) .metavar("CONFIG-FILE") .dest("producerConfigFile") .help("producer config properties file."); parser.addArgument("--print-metrics") .action(storeTrue()) .type(Boolean.class) .metavar("PRINT-METRICS") .dest("printMetrics") .help("print out metrics at the end of the test."); parser.addArgument("--transactional-id") .action(store()) .required(false) .type(String.class) .metavar("TRANSACTIONAL-ID") .dest("transactionalId") .help("The transactional id to use. This config takes precedence over the transactional.id " + "specified via --producer.config or --producer-props. Note that if the transactional id " + "is not specified while --transaction-duration-ms is provided, the default value for the " + "transactional id will be performance-producer- followed by a random uuid."); parser.addArgument("--transaction-duration-ms") .action(store()) .required(false) .type(Long.class) .metavar("TRANSACTION-DURATION") .dest("transactionDurationMs") .help("The max age of each transaction. The commitTransaction will be called after this time has elapsed. " + "The value should be greater than 0. If the transactional id is specified via --producer-props, " + "--producer.config, or --transactional-id but --transaction-duration-ms is not specified, " + "the default value will be 3000."); return parser; }
@Test public void testEnableTransactionByProducerProps() throws IOException, ArgumentParserException { ArgumentParser parser = ProducerPerformance.argParser(); String[] args = new String[]{ "--topic", "Hello-Kafka", "--num-records", "5", "--throughput", "100", "--record-size", "100", "--producer-props", "bootstrap.servers=localhost:9000", "transactional.id=foobar"}; ProducerPerformance.ConfigPostProcessor configs = new ProducerPerformance.ConfigPostProcessor(parser, args); assertTrue(configs.transactionsEnabled); assertEquals(ProducerPerformance.DEFAULT_TRANSACTION_DURATION_MS, configs.transactionDurationMs); assertEquals("foobar", configs.producerProps.get(ProducerConfig.TRANSACTIONAL_ID_CONFIG)); }
public Operation parseMethod( Method method, List<Parameter> globalParameters, JsonView jsonViewAnnotation) { JavaType classType = TypeFactory.defaultInstance().constructType(method.getDeclaringClass()); return parseMethod( classType.getClass(), method, globalParameters, null, null, null, null, new ArrayList<>(), Optional.empty(), new HashSet<>(), new ArrayList<>(), false, null, null, jsonViewAnnotation, null, null); }
@Test(description = "scan methods") public void testScanMethods() { Reader reader = new Reader(new OpenAPI()); Method[] methods = SimpleMethods.class.getMethods(); for (final Method method : methods) { if (isValidRestPath(method)) { Operation operation = reader.parseMethod(method, null, null); assertNotNull(operation); } } }
public String getTopicName(AsyncMockDefinition definition, EventMessage eventMessage) { // Produce service name part of topic name. String serviceName = definition.getOwnerService().getName().replace(" ", ""); serviceName = serviceName.replace("-", ""); // Produce version name part of topic name. String versionName = definition.getOwnerService().getVersion().replace(" ", ""); // Produce operation name part of topic name. String operationName = ProducerManager.getDestinationOperationPart(definition.getOperation(), eventMessage); operationName = operationName.replace('/', '-'); // Aggregate the 3 parts using '_' as delimiter. return serviceName + "-" + versionName + "-" + operationName; }
@Test void testGetTopicName() { KafkaProducerManager producerManager = new KafkaProducerManager(); Service service = new Service(); service.setName("Streetlights API"); service.setVersion("0.1.0"); Operation operation = new Operation(); operation.setName("RECEIVE receiveLightMeasurement"); operation.setMethod("RECEIVE"); operation.setResourcePaths(Set.of("smartylighting.streetlights.1.0.event.lighting.measured")); service.addOperation(operation); EventMessage eventMessage = new EventMessage(); eventMessage.setName("Sample"); List<EventMessage> eventsMessages = List.of(eventMessage); AsyncMockDefinition definition = new AsyncMockDefinition(service, operation, eventsMessages); String topicName = producerManager.getTopicName(definition, eventMessage); assertEquals("StreetlightsAPI-0.1.0-receiveLightMeasurement", topicName); }
public static <K, V> V computeIfAbsent(ConcurrentMap<K, V> map, K key, Function<? super K, ? extends V> func) { Objects.requireNonNull(func); if (isJdk8) { V v = map.get(key); if (null == v) { // this bug fix methods maybe cause `func.apply` multiple calls. v = func.apply(key); if (null == v) { return null; } final V res = map.putIfAbsent(key, v); if (null != res) { // if pre value present, means other thread put value already, and putIfAbsent not effect // return exist value return res; } } return v; } else { return map.computeIfAbsent(key, func); } }
@Test public void computeIfAbsent() { ConcurrentHashMap<String, String> map = new ConcurrentHashMap<>(); map.put("123", "1111"); String value = ConcurrentHashMapUtils.computeIfAbsent(map, "123", k -> "234"); assertEquals("1111", value); String value1 = ConcurrentHashMapUtils.computeIfAbsent(map, "1232", k -> "2342"); assertEquals("2342", value1); String value2 = ConcurrentHashMapUtils.computeIfAbsent(map, "123", k -> "2342"); assertEquals("1111", value2); }
public static <T> Partition<T> of( int numPartitions, PartitionWithSideInputsFn<? super T> partitionFn, Requirements requirements) { Contextful ctfFn = Contextful.fn( (T element, Contextful.Fn.Context c) -> partitionFn.partitionFor(element, numPartitions, c), requirements); return new Partition<>(new PartitionDoFn<T>(numPartitions, ctfFn, partitionFn)); }
@Test @Category(NeedsRunner.class) public void testDroppedPartition() { // Compute the set of integers either 1 or 2 mod 3, the hard way. PCollectionList<Integer> outputs = pipeline .apply(Create.of(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12)) .apply(Partition.of(3, new ModFn())); List<PCollection<Integer>> outputsList = new ArrayList<>(outputs.getAll()); outputsList.remove(0); outputs = PCollectionList.of(outputsList); assertTrue(outputs.size() == 2); PCollection<Integer> output = outputs.apply(Flatten.pCollections()); PAssert.that(output).containsInAnyOrder(2, 4, 5, 7, 8, 10, 11); pipeline.run(); }
public static Builder builder() { return new Builder(); }
@Test public void testBuilder() { PiTableId piTableId = PiTableId.of("table10"); long cookie = 0xfff0323; int priority = 100; double timeout = 1000; PiMatchFieldId piMatchFieldId = PiMatchFieldId.of(IPV4_HEADER_NAME + DOT + DST_ADDR); PiFieldMatch piFieldMatch = new PiExactFieldMatch(piMatchFieldId, ImmutableByteSequence.copyFrom(0x0a010101)); PiAction piAction = PiAction.builder().withId(PiActionId.of(DROP)).build(); final Map<PiMatchFieldId, PiFieldMatch> fieldMatches = Maps.newHashMap(); fieldMatches.put(piMatchFieldId, piFieldMatch); final PiTableEntry piTableEntry = PiTableEntry.builder() .forTable(piTableId) .withMatchKey(PiMatchKey.builder() .addFieldMatches(fieldMatches.values()) .build()) .withAction(piAction) .withCookie(cookie) .withPriority(priority) .withTimeout(timeout) .build(); assertThat(piTableEntry.table(), is(piTableId)); assertThat(piTableEntry.cookie(), is(cookie)); assertThat("Priority must be set", piTableEntry.priority().isPresent()); assertThat("Timeout must be set", piTableEntry.timeout().isPresent()); assertThat(piTableEntry.priority().getAsInt(), is(priority)); assertThat(piTableEntry.timeout().get(), is(timeout)); assertThat("Incorrect match param value", CollectionUtils.isEqualCollection(piTableEntry.matchKey().fieldMatches(), fieldMatches.values())); assertThat(piTableEntry.action(), is(piAction)); }
public void createPipe(CreatePipeStmt stmt) throws DdlException { try { lock.writeLock().lock(); Pair<Long, String> dbIdAndName = resolvePipeNameUnlock(stmt.getPipeName()); boolean existed = nameToId.containsKey(dbIdAndName); if (existed) { if (!stmt.isIfNotExists() && !stmt.isReplace()) { ErrorReport.reportSemanticException(ErrorCode.ERR_PIPE_EXISTS); } if (stmt.isIfNotExists()) { return; } else if (stmt.isReplace()) { LOG.info("Pipe {} already exist, replace it with a new one", stmt.getPipeName()); Pipe pipe = pipeMap.get(nameToId.get(dbIdAndName)); dropPipeImpl(pipe); } } // Add pipe long id = GlobalStateMgr.getCurrentState().getNextId(); Pipe pipe = Pipe.fromStatement(id, stmt); putPipe(pipe); repo.addPipe(pipe); } finally { lock.writeLock().unlock(); } }
@Test @Ignore("flaky test") public void testExecuteTaskSubmitFailed() throws Exception { mockRepoExecutor(); final String pipeName = "p3"; String sql = "create pipe p3 as insert into tbl1 select * from files('path'='fake://pipe', 'format'='parquet')"; createPipe(sql); // poll error mockPollError(1); Pipe p3 = getPipe(pipeName); p3.poll(); Assert.assertEquals(Pipe.State.ERROR, p3.getState()); // clear the error and resume the pipe resumePipe(pipeName); p3.setLastPolledTime(0); Assert.assertEquals(Pipe.State.RUNNING, p3.getState()); p3.poll(); p3.schedule(); Assert.assertEquals(Pipe.State.RUNNING, p3.getState()); Assert.assertEquals(1, p3.getRunningTasks().size()); TaskManager taskManager = GlobalStateMgr.getCurrentState().getTaskManager(); new mockit.Expectations(taskManager) { { // submit error taskManager.executeTaskAsync((Task) any, (ExecuteOption) any); result = new SubmitResult("queryid", SubmitResult.SubmitStatus.FAILED); } }; Thread.sleep(1000); Assert.assertEquals(1, p3.getRunningTasks().size()); // retry several times, until failed for (int i = 0; i < Pipe.FAILED_TASK_THRESHOLD; i++) { p3.schedule(); Assert.assertEquals(Pipe.State.RUNNING, p3.getState()); Assert.assertEquals(1, p3.getRunningTasks().size()); Assert.assertTrue(String.format("iteration %d: %s", i, p3.getRunningTasks()), p3.getRunningTasks().stream().allMatch(PipeTaskDesc::isError)); p3.schedule(); Assert.assertEquals(Pipe.State.RUNNING, p3.getState()); Assert.assertTrue(String.format("iteration %d: %s", i, p3.getRunningTasks()), p3.getRunningTasks().stream().allMatch(PipeTaskDesc::isRunnable)); } p3.schedule(); Assert.assertEquals(Pipe.FAILED_TASK_THRESHOLD + 1, p3.getFailedTaskExecutionCount()); Assert.assertEquals(Pipe.State.ERROR, p3.getState()); // retry all { AlterPipeStmt alter = (AlterPipeStmt) UtFrameUtils.parseStmtWithNewParser("alter pipe p3 retry all", ctx); p3.retry((AlterPipeClauseRetry) alter.getAlterPipeClause()); List<PipeFileRecord> unloadedFiles = p3.getPipeSource().getFileListRepo().listFilesByState(FileListRepo.PipeFileState.UNLOADED, 0); Assert.assertEquals(1, unloadedFiles.size()); } }
@Override public CucumberOptionsAnnotationParser.CucumberOptions getOptions(Class<?> clazz) { CucumberOptions annotation = clazz.getAnnotation(CucumberOptions.class); if (annotation != null) { return new JunitCucumberOptions(annotation); } warnWhenTestNGCucumberOptionsAreUsed(clazz); return null; }
@Test void testUuidGeneratorWhenNotSpecified() { io.cucumber.core.options.CucumberOptionsAnnotationParser.CucumberOptions options = this.optionsProvider .getOptions(ClassWithDefault.class); assertNotNull(options); assertNull(options.uuidGenerator()); }
public SchemaMapping fromParquet(MessageType parquetSchema) { List<Type> fields = parquetSchema.getFields(); List<TypeMapping> mappings = fromParquet(fields); List<Field> arrowFields = fields(mappings); return new SchemaMapping(new Schema(arrowFields), parquetSchema, mappings); }
@Test public void testParquetMapToArrow() { GroupType mapType = Types.requiredMap().key(INT32).optionalValue(INT64).named("myMap"); MessageType parquet = Types.buildMessage().addField(mapType).named("root"); Schema expected = new Schema(asList(field( "myMap", new ArrowType.Map(false), field(null, false, new ArrowType.Int(32, true)), field(null, true, new ArrowType.Int(64, true))))); SchemaMapping mapping = converter.fromParquet(parquet); Schema actual = mapping.getArrowSchema(); Assert.assertEquals(expected, actual); }
@Override public TopicRouteData examineTopicRouteInfo( String topic) throws RemotingException, MQClientException, InterruptedException { return defaultMQAdminExtImpl.examineTopicRouteInfo(topic); }
@Test public void testExamineTopicRouteInfo() throws RemotingException, MQClientException, InterruptedException { TopicRouteData topicRouteData = defaultMQAdminExt.examineTopicRouteInfo("UnitTest"); assertThat(topicRouteData.getBrokerDatas().get(0).getBrokerName()).isEqualTo("default-broker"); assertThat(topicRouteData.getBrokerDatas().get(0).getCluster()).isEqualTo("default-cluster"); }
public static long hashToLong(final String value) { return getHashString(value).asLong(); }
@Test void shouldReturnConsistentHashLong() { assertEquals(Hashing.hashToLong("random"), 8668895776616456786L); }
@Override public Failure parse(XmlPullParser parser, int initialDepth, XmlEnvironment xmlEnvironment) throws XmlPullParserException, IOException, SmackParsingException { Failure.CompressFailureError compressFailureError = null; StanzaError stanzaError = null; XmlEnvironment failureXmlEnvironment = XmlEnvironment.from(parser, xmlEnvironment); outerloop: while (true) { XmlPullParser.Event eventType = parser.next(); switch (eventType) { case START_ELEMENT: String name = parser.getName(); String namespace = parser.getNamespace(); switch (namespace) { case Failure.NAMESPACE: compressFailureError = Failure.CompressFailureError.valueOf(name.replace("-", "_")); if (compressFailureError == null) { LOGGER.warning("Unknown element in " + Failure.NAMESPACE + ": " + name); } break; case StreamOpen.CLIENT_NAMESPACE: case StreamOpen.SERVER_NAMESPACE: switch (name) { case StanzaError.ERROR: stanzaError = PacketParserUtils.parseError(parser, failureXmlEnvironment); break; default: LOGGER.warning("Unknown element in " + namespace + ": " + name); break; } break; } break; case END_ELEMENT: if (parser.getDepth() == initialDepth) { break outerloop; } break; default: // fall out } } return new Failure(compressFailureError, stanzaError); }
@Test public void simpleFailureTest() throws Exception { final String xml = "<failure xmlns='http://jabber.org/protocol/compress'><processing-failed/></failure>"; final XmlPullParser parser = PacketParserUtils.getParserFor(xml); final Failure failure = FailureProvider.INSTANCE.parse(parser); assertEquals(Failure.CompressFailureError.processing_failed, failure.getCompressFailureError()); }
@Override public void write(final MySQLPacketPayload payload, final Object value) { LocalDateTime localDateTime = LocalDateTime.ofInstant(Instant.ofEpochMilli(((Time) value).getTime()), ZoneId.systemDefault()); int hours = localDateTime.getHour(); int minutes = localDateTime.getMinute(); int seconds = localDateTime.getSecond(); int nanos = localDateTime.getNano(); boolean isTimeAbsent = 0 == hours && 0 == minutes && 0 == seconds; boolean isNanosAbsent = 0 == nanos; if (isTimeAbsent && isNanosAbsent) { payload.writeInt1(0); return; } if (isNanosAbsent) { payload.writeInt1(8); writeTime(payload, hours, minutes, seconds); return; } payload.writeInt1(12); writeTime(payload, hours, minutes, seconds); writeNanos(payload, nanos); }
@Test void assertWriteWithEightBytes() { MySQLTimeBinaryProtocolValue actual = new MySQLTimeBinaryProtocolValue(); actual.write(payload, Time.valueOf("01:30:10")); verify(payload).writeInt1(8); verify(payload).writeInt1(0); verify(payload).writeInt4(0); payload.writeInt1(0); payload.writeInt4(0); verify(payload).writeInt1(1); verify(payload).writeInt1(30); verify(payload).writeInt1(10); }
@Override public boolean isDetected() { return StringUtils.isNotBlank(system.envVariable("GITHUB_ACTION")); }
@Test public void isDetected() { setEnvVariable("GITHUB_ACTION", "build"); assertThat(underTest.isDetected()).isTrue(); setEnvVariable("GITHUB_ACTION", null); assertThat(underTest.isDetected()).isFalse(); }
@Override public long estimate() { final double raw = (1 / computeE()) * alpha() * m * m; return applyRangeCorrection(raw); }
@RequireAssertEnabled @Test(expected = AssertionError.class) public void testAlpha_withInvalidMemoryFootprint() { DenseHyperLogLogEncoder encoder = new DenseHyperLogLogEncoder(1); encoder.estimate(); }
@Nullable public static URI normalizeURI(@Nullable final URI uri, String scheme, int port, String path) { return Optional.ofNullable(uri) .map(u -> getUriWithScheme(u, scheme)) .map(u -> getUriWithPort(u, port)) .map(u -> getUriWithDefaultPath(u, path)) .map(Tools::uriWithTrailingSlash) .map(URI::normalize) .orElse(null); }
@Test public void normalizeURIReturnsNormalizedURI() { final URI uri = URI.create("foobar://example.com//foo/////bar"); assertEquals(URI.create("quux://example.com:1234/foo/bar/"), Tools.normalizeURI(uri, "quux", 1234, "/baz")); }
@Override protected void runTask() { LOGGER.trace("Looking for deleted jobs that can be deleted permanently..."); int totalAmountOfPermanentlyDeletedJobs = storageProvider.deleteJobsPermanently(StateName.DELETED, now().minus(backgroundJobServerConfiguration().getPermanentlyDeleteDeletedJobsAfter())); LOGGER.debug("Found {} deleted jobs that were permanently deleted as part of JobRunr maintenance", totalAmountOfPermanentlyDeletedJobs); }
@Test void testTask() { runTask(task); verify(storageProvider).deleteJobsPermanently(eq(DELETED), any()); }
public Set<PValue> getKeyedPValues() { checkState( finalized, "can't call getKeyedPValues before a Pipeline has been completely traversed"); return keyedValues; }
@Test public void unkeyedInputWithKeyPreserving() { PCollection<KV<String, Iterable<WindowedValue<KV<String, Integer>>>>> input = p.apply( Create.of( KV.of( "hello", (Iterable<WindowedValue<KV<String, Integer>>>) Collections.<WindowedValue<KV<String, Integer>>>emptyList())) .withCoder( KvCoder.of( StringUtf8Coder.of(), IterableCoder.of( WindowedValue.getValueOnlyCoder( KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of())))))); PCollection<KeyedWorkItem<String, KV<String, Integer>>> unkeyed = input .apply(ParDo.of(new ParDoMultiOverrideFactory.ToKeyedWorkItem<>())) .setCoder( KeyedWorkItemCoder.of( StringUtf8Coder.of(), KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()), GlobalWindow.Coder.INSTANCE)); p.traverseTopologically(visitor); assertThat(visitor.getKeyedPValues(), not(hasItem(unkeyed))); }
public Statistics getTableStatistics(IcebergTable icebergTable, Map<ColumnRefOperator, Column> colRefToColumnMetaMap, OptimizerContext session, ScalarOperator predicate, TableVersionRange version) { Table nativeTable = icebergTable.getNativeTable(); Statistics.Builder statisticsBuilder = Statistics.builder(); String uuid = icebergTable.getUUID(); if (version.end().isPresent()) { Set<Integer> primitiveColumnsFieldIds = nativeTable.schema().columns().stream() .filter(column -> column.type().isPrimitiveType()) .map(Types.NestedField::fieldId).collect(Collectors.toSet()); Map<Integer, Long> colIdToNdvs = new HashMap<>(); if (session != null && session.getSessionVariable().enableReadIcebergPuffinNdv()) { colIdToNdvs = readNumDistinctValues(icebergTable, primitiveColumnsFieldIds, version); if (uuidToPartitionFieldIdToValues.containsKey(uuid) && !uuidToPartitionFieldIdToValues.get(uuid).isEmpty()) { HashMultimap<Integer, Object> partitionFieldIdToValue = uuidToPartitionFieldIdToValues.get(uuid); Map<Integer, Long> partitionSourceIdToNdv = new HashMap<>(); for (PartitionField partitionField : nativeTable.spec().fields()) { int sourceId = partitionField.sourceId(); int fieldId = partitionField.fieldId(); if (partitionFieldIdToValue.containsKey(fieldId)) { partitionSourceIdToNdv.put(sourceId, (long) partitionFieldIdToValue.get(fieldId).size()); } } colIdToNdvs.putAll(partitionSourceIdToNdv); } } PredicateSearchKey key = PredicateSearchKey.of(icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), version.end().get(), predicate); IcebergFileStats icebergFileStats; if (!icebergFileStatistics.containsKey(key)) { icebergFileStats = new IcebergFileStats(1); } else { icebergFileStats = icebergFileStatistics.get(key); } statisticsBuilder.setOutputRowCount(icebergFileStats.getRecordCount()); statisticsBuilder.addColumnStatistics(buildColumnStatistics( nativeTable, colRefToColumnMetaMap, icebergFileStats, colIdToNdvs)); } else { // empty table statisticsBuilder.setOutputRowCount(1); statisticsBuilder.addColumnStatistics(buildUnknownColumnStatistics(colRefToColumnMetaMap.keySet())); } return statisticsBuilder.build(); }
@Test public void testUnknownTableStatistics() { IcebergStatisticProvider statisticProvider = new IcebergStatisticProvider(); mockedNativeTableA.newFastAppend().appendFile(FILE_A).commit(); IcebergTable icebergTable = new IcebergTable(1, "srTableName", "iceberg_catalog", "resource_name", "db_name", "table_name", "", Lists.newArrayList(), mockedNativeTableA, Maps.newHashMap()); Map<ColumnRefOperator, Column> colRefToColumnMetaMap = new HashMap<ColumnRefOperator, Column>(); ColumnRefOperator columnRefOperator1 = new ColumnRefOperator(3, Type.INT, "id", true); ColumnRefOperator columnRefOperator2 = new ColumnRefOperator(4, Type.STRING, "data", true); colRefToColumnMetaMap.put(columnRefOperator1, new Column("id", Type.INT)); colRefToColumnMetaMap.put(columnRefOperator2, new Column("data", Type.STRING)); TableVersionRange version = TableVersionRange.withEnd(Optional.of( mockedNativeTableA.currentSnapshot().snapshotId())); Statistics statistics = statisticProvider.getTableStatistics(icebergTable, colRefToColumnMetaMap, null, null, version); Assert.assertEquals(1.0, statistics.getOutputRowCount(), 0.001); }
public static String evaluate(final co.elastic.logstash.api.Event event, final String template) throws JsonProcessingException { if (event instanceof Event) { return evaluate((Event) event, template); } else { throw new IllegalStateException("Unknown event concrete class: " + event.getClass().getName()); } }
@Test public void TestMixDateAndFields() throws IOException { Event event = getTestEvent(); String path = "/full/%{+YYYY}/weeee/%{bar}"; assertEquals("/full/2015/weeee/foo", StringInterpolation.evaluate(event, path)); }
public static <T> byte[] write(Writer<T> writer, T value) { byte[] result = new byte[writer.sizeInBytes(value)]; WriteBuffer b = WriteBuffer.wrap(result); try { writer.write(value, b); } catch (RuntimeException e) { int lengthWritten = result.length; for (int i = 0; i < result.length; i++) { if (result[i] == 0) { lengthWritten = i; break; } } // Don't use value directly in the message, as its toString might be implemented using this // method. If that's the case, we'd stack overflow. Instead, emit what we've written so far. String message = format( "Bug found using %s to write %s as json. Wrote %s/%s bytes: %s", writer.getClass().getSimpleName(), value.getClass().getSimpleName(), lengthWritten, result.length, new String(result, 0, lengthWritten, UTF_8)); throw Platform.get().assertionError(message, e); } return result; }
@Test void doesntStackOverflowOnToBufferWriterBug_lessThanBytes() { class FooWriter implements WriteBuffer.Writer<Object> { @Override public int sizeInBytes(Object value) { return 2; } @Override public void write(Object value, WriteBuffer buffer) { buffer.writeByte('a'); throw new RuntimeException("buggy"); } } class Foo { @Override public String toString() { return new String(JsonWriter.write(new FooWriter(), this), UTF_8); } } Foo foo = new Foo(); assertThatThrownBy(foo::toString) .isInstanceOf(AssertionError.class) .hasMessage("Bug found using FooWriter to write Foo as json. Wrote 1/2 bytes: a"); }
public static String normalizeParams(String paramPart, Charset charset) { if(StrUtil.isEmpty(paramPart)){ return paramPart; } final StrBuilder builder = StrBuilder.create(paramPart.length() + 16); final int len = paramPart.length(); String name = null; int pos = 0; // 未处理字符开始位置 char c; // 当前字符 int i; // 当前字符位置 for (i = 0; i < len; i++) { c = paramPart.charAt(i); if (c == '=') { // 键值对的分界点 if (null == name) { // 只有=前未定义name时被当作键值分界符,否则做为普通字符 name = (pos == i) ? StrUtil.EMPTY : paramPart.substring(pos, i); pos = i + 1; } } else if (c == '&') { // 参数对的分界点 if (pos != i) { if (null == name) { // 对于像&a&这类无参数值的字符串,我们将name为a的值设为"" name = paramPart.substring(pos, i); builder.append(RFC3986.QUERY_PARAM_NAME.encode(name, charset)).append('='); } else { builder.append(RFC3986.QUERY_PARAM_NAME.encode(name, charset)).append('=') .append(RFC3986.QUERY_PARAM_VALUE.encode(paramPart.substring(pos, i), charset)).append('&'); } name = null; } pos = i + 1; } } // 结尾处理 if (null != name) { builder.append(URLUtil.encodeQuery(name, charset)).append('='); } if (pos != i) { if (null == name && pos > 0) { builder.append('='); } builder.append(URLUtil.encodeQuery(paramPart.substring(pos, i), charset)); } // 以&结尾则去除之 int lastIndex = builder.length() - 1; if ('&' == builder.charAt(lastIndex)) { builder.delTo(lastIndex); } return builder.toString(); }
@Test public void normalizeBlankParamsTest() { final String encodeResult = HttpUtil.normalizeParams("", CharsetUtil.CHARSET_UTF_8); assertEquals("", encodeResult); }
@Override public Boolean authenticate(final Host bookmark, final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = bookmark.getCredentials(); if(credentials.isPublicKeyAuthentication()) { if(log.isDebugEnabled()) { log.debug(String.format("Login using public key authentication with credentials %s", credentials)); } final Local privKey = credentials.getIdentity(); final Local pubKey; final FileKeyProvider provider; final AtomicBoolean canceled = new AtomicBoolean(); try { final KeyFormat format = KeyProviderUtil.detectKeyFileFormat( new InputStreamReader(privKey.getInputStream(), StandardCharsets.UTF_8), true); if(log.isInfoEnabled()) { log.info(String.format("Reading private key %s with key format %s", privKey, format)); } switch(format) { case PKCS8: provider = new PKCS8KeyFile.Factory().create(); pubKey = null; break; case OpenSSH: { provider = new OpenSSHKeyFile.Factory().create(); final File f = OpenSSHKeyFileUtil.getPublicKeyFile(new File(privKey.getAbsolute())); if(f != null) { pubKey = LocalFactory.get(f.getAbsolutePath()); } else { pubKey = null; } break; } case OpenSSHv1: { provider = new OpenSSHKeyV1KeyFile.Factory().create(); final File f = OpenSSHKeyFileUtil.getPublicKeyFile(new File(privKey.getAbsolute())); if(f != null) { pubKey = LocalFactory.get(f.getAbsolutePath()); } else { pubKey = null; } break; } case PuTTY: provider = new PuTTYKeyFile.Factory().create(); pubKey = null; break; default: throw new InteroperabilityException(String.format("Unknown key format for file %s", privKey.getName())); } provider.init(new InputStreamReader(privKey.getInputStream(), StandardCharsets.UTF_8), pubKey != null ? new InputStreamReader(pubKey.getInputStream(), StandardCharsets.UTF_8) : null, new PasswordFinder() { @Override public char[] reqPassword(Resource<?> resource) { if(StringUtils.isEmpty(credentials.getIdentityPassphrase())) { try { // Use password prompt final Credentials input = prompt.prompt(bookmark, LocaleFactory.localizedString("Private key password protected", "Credentials"), String.format("%s (%s)", LocaleFactory.localizedString("Enter the passphrase for the private key file", "Credentials"), privKey.getAbbreviatedPath()), new LoginOptions() .icon(bookmark.getProtocol().disk()) .user(false).password(true) ); credentials.setSaved(input.isSaved()); credentials.setIdentityPassphrase(input.getPassword()); } catch(LoginCanceledException e) { canceled.set(true); // Return null if user cancels return StringUtils.EMPTY.toCharArray(); } } return credentials.getIdentityPassphrase().toCharArray(); } @Override public boolean shouldRetry(Resource<?> resource) { return false; } }); client.auth(credentials.getUsername(), new AuthPublickey(provider)); return client.isAuthenticated(); } catch(IOException e) { if(canceled.get()) { throw new LoginCanceledException(); } throw new SFTPExceptionMappingService().map(e); } } return false; }
@Test(expected = LoginCanceledException.class) public void testAuthenticateOpenSSHKeyWithPassword() throws Exception { final Local key = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); try { new DefaultLocalTouchFeature().touch(key); IOUtils.copy(new StringReader("-----BEGIN RSA PRIVATE KEY-----\n" + "Proc-Type: 4,ENCRYPTED\n" + "DEK-Info: AES-128-CBC,356A353DAFDC2E16E8BD0EE23A6201A2\n" + "\n" + "pvpC2RA1TteVsp584fSZ6RYFz97CF8tJXfyP4/8UdzpIVM8VXXlk4g3AnyI9/JD0\n" + "4/0dzNqGvg/84xUpDpdJp/w8fWJ8IzE7RXf1xDfg0xavr2iegp2aZBd48KVKImwU\n" + "yJlzy27VmVvIvWID2zPrNhOWzr4AdnP/NprLfhHPQDHV5gRcS92s6vFktZOPzNtQ\n" + "3O+j3O5MAyf/MpgPH4BTubOjcuZuZg3AJCjEPxLlwrRfxqXkRXXMB7XxDFdK7LQ/\n" + "fQnJzikcrYXFio8+DJhBg7OyOnlAmC0I85YomZJ+8C3A3bye9PakIxHJn/qNIQew\n" + "BujHxPVmnezjFzStr/SyfLE2a+RZu84Jm6u9+DuJYF5/Vo6yv6+zubsVaflXp5fS\n" + "SAogS0quWfoqoiUfhgCuOZlqv/aeo/BEetUEdHVi4KTdeSpcfrJa4CphXd8TwEPN\n" + "L4NFSc+8CeGayO45o5lXeQiKa4UH2oPEBaANHK4SQPKJ9NdyTlFN/O1c77kCvG4W\n" + "4thchQkUvwqwTYXwx9jNW3x7FBytJwmhi9DpzHMa/LFRrnedarFPDgep4E40NjRB\n" + "fy877Wd+KJTlrHjyQR13wgtlGZdcTO5QzLseztxqdaD14Dn7jPF/YJBDaj65Jw1N\n" + "+G6EB0zN70WL7Y3+2HnSLNZWEnLhletzfwbjVqr+Vg4XB2HQKH52gCyh+ITPEjqR\n" + "wU00oMJvGf518U+6awxzb3zwnoxMrFwcnaLqwsZNQ5CjmYVE/yERSK47OMYCNQl4\n" + "0Xxa9mWYBqWlfdMurkGCD6OuUWMx5t4mcpSg30UEQNBEVfrVk6t480iztgVJprEO\n" + "vhepM2nw326PH5VYAoXH+OmEezjI1AmHKqpbB/y9UQv6ZjEyUT70Tbs9JBtU4dze\n" + "Yha1Dc0+eYkUvZ5AjENQ/Bvfdyit4xxbDrU6TbFmyHpHwMPCNkcgO0u/Mgtc5Hmc\n" + "Gi6RaxUaxSZ2IlpJDNkqAzmv1Xr+M9TxbF2gZY+TJHUt/mc1rFpTl2qZ/tK/Ei1U\n" + "8TBVJHcNNwHiHtm/NpREYTmzu0s8X602JgXrkBxkM40NGVRqd08jaULhxdWcTmzW\n" + "pweib9WhIrvjTNZTAjjGku625qLihDt5jtbJxspM2dLGfcG4zgYgRr4u9HA+60oD\n" + "l1oNjz8IfBuJLJ3rwENI6oX9FW7huKc/XV1hP72/l2VhfuxtTufdjbaiwwiwObRA\n" + "O+zwB8NPWRG6UYj9IAWjASoPXOoyhk/f1fzvTH7xeO35QjkCICln095T+hNMZRiC\n" + "VpCCKsQGY2O30D9OJlnTpylBAq/Q/HXNL8Jj2f/rZRqDGzidj2No5mun/pZ3uwzr\n" + "CRrEpvfFuf8g1EnPQXmdlYRi/nmtBKsiQr0GWVzIOzNRi/tgsV0tyUgBT9QL4JKt\n" + "/z54PrlBK74I9SWcBv9EwCAfL9YdZ7mW0iWrmUUmcpuJcRUXnKvTynTpq/l6GE8+\n" + "Ld5saHMVWt7GlEbM3Fjqfvj7/dbtcy3TTmy0Vx4GbKzsaPytAb2jgLGn8bQfjQzp\n" + "hnPC1l+r7ebV7tBR216+6PmsXQu7atqgbGjb7Dh+GP8Ak73F8v6LPtyz+tAOYwpB\n" + "-----END RSA PRIVATE KEY-----\n"), key.getOutputStream(false), StandardCharsets.UTF_8); final AtomicBoolean b = new AtomicBoolean(); // Reconnect session.disconnect(); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.getHost().getCredentials().setIdentity(key); assertTrue(new SFTPPublicKeyAuthentication(session.getClient()).authenticate(session.getHost(), new DisabledLoginCallback() { @Override public Credentials prompt(final Host bookmark, String username, String title, String reason, LoginOptions options) throws LoginCanceledException { b.set(true); throw new LoginCanceledException(); } }, new DisabledCancelCallback())); assertTrue(b.get()); } finally { key.delete(); } }
public Task schedule(BackOff backOff, ThrowingFunction<Task, Boolean, Exception> function) { final BackOffTimerTask task = new BackOffTimerTask(backOff, scheduler, function); long delay = task.next(); if (delay != BackOff.NEVER) { scheduler.schedule(task, delay, TimeUnit.MILLISECONDS); } else { task.cancel(); } return task; }
@Test public void testBackOffTimerWithMaxElapsedTime() throws Exception { final CountDownLatch latch = new CountDownLatch(1); final AtomicInteger counter = new AtomicInteger(); final ScheduledExecutorService executor = Executors.newScheduledThreadPool(3); final BackOff backOff = BackOff.builder().delay(100).maxElapsedTime(400).build(); final BackOffTimer timer = new BackOffTimer(executor); BackOffTimer.Task task = timer.schedule( backOff, context -> { assertEquals(counter.incrementAndGet(), context.getCurrentAttempts()); assertEquals(100, context.getCurrentDelay()); assertEquals(100L * counter.get(), context.getCurrentElapsedTime()); return true; }); task.whenComplete( (context, throwable) -> { assertTrue(counter.get() <= 5); assertEquals(BackOffTimer.Task.Status.Exhausted, context.getStatus()); latch.countDown(); }); latch.await(5, TimeUnit.SECONDS); executor.shutdownNow(); }
public ValidationResult validateSecretsConfig(final String pluginId, final Map<String, String> configuration) { return getVersionedSecretsExtension(pluginId).validateSecretsConfig(pluginId, configuration); }
@Test void validateSecretsConfig_shouldDelegateToVersionedExtension() { SecretsExtensionV1 secretsExtensionV1 = mock(SecretsExtensionV1.class); Map<String, VersionedSecretsExtension> secretsExtensionMap = Map.of("1.0", secretsExtensionV1); extension = new SecretsExtension(pluginManager, extensionsRegistry, secretsExtensionMap); Map<String, String> configuration = Map.of("key", "val"); when(pluginManager.resolveExtensionVersion(PLUGIN_ID, SECRETS_EXTENSION, SUPPORTED_VERSIONS)).thenReturn(SecretsExtensionV1.VERSION); this.extension.validateSecretsConfig(PLUGIN_ID, configuration); verify(secretsExtensionV1).validateSecretsConfig(PLUGIN_ID, configuration); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void testValueSupertypeException() { RichMapFunction<?, ?> function = new RichMapFunction<StringValue, Value>() { private static final long serialVersionUID = 1L; @Override public Value map(StringValue value) throws Exception { return null; } }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes( function, (TypeInformation) TypeInformation.of(new TypeHint<StringValue>() {}), "name", true); assertThat(ti).isInstanceOf(MissingTypeInfo.class); assertThatThrownBy( () -> TypeExtractor.getMapReturnTypes( function, (TypeInformation) TypeInformation.of(new TypeHint<StringValue>() {}))) .isInstanceOf(InvalidTypesException.class); }
public ConsumptionRateLimiter createServerRateLimiter(PinotConfiguration serverConfig, ServerMetrics serverMetrics) { double serverRateLimit = serverConfig.getProperty(CommonConstants.Server.CONFIG_OF_SERVER_CONSUMPTION_RATE_LIMIT, CommonConstants.Server.DEFAULT_SERVER_CONSUMPTION_RATE_LIMIT); if (serverRateLimit <= 0) { LOGGER.warn("Invalid server consumption rate limit: {}, throttling is disabled", serverRateLimit); _serverRateLimiter = NOOP_RATE_LIMITER; } else { LOGGER.info("A server consumption rate limiter is set up with rate limit: {}", serverRateLimit); MetricEmitter metricEmitter = new MetricEmitter(serverMetrics, SERVER_CONSUMPTION_RATE_METRIC_KEY_NAME); _serverRateLimiter = new RateLimiterImpl(serverRateLimit, metricEmitter); } return _serverRateLimiter; }
@Test public void testCreateServerRateLimiter() { // Server config 1 ConsumptionRateLimiter rateLimiter = _consumptionRateManager.createServerRateLimiter(SERVER_CONFIG_1, null); assertEquals(5.0, ((RateLimiterImpl) rateLimiter).getRate(), DELTA); // Server config 2 rateLimiter = _consumptionRateManager.createServerRateLimiter(SERVER_CONFIG_2, null); assertEquals(2.5, ((RateLimiterImpl) rateLimiter).getRate(), DELTA); // Server config 3 rateLimiter = _consumptionRateManager.createServerRateLimiter(SERVER_CONFIG_3, null); assertEquals(rateLimiter, NOOP_RATE_LIMITER); // Server config 4 rateLimiter = _consumptionRateManager.createServerRateLimiter(SERVER_CONFIG_4, null); assertEquals(rateLimiter, NOOP_RATE_LIMITER); }
@Override public boolean tryClaim(Timestamp position) { if (position.equals(lastAttemptedPosition)) { return true; } return super.tryClaim(position, this.partition); }
@Test public void testTryClaim() { assertEquals(range, tracker.currentRestriction()); assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(10L))); assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(10L))); assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(11L))); assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(11L))); assertTrue(tracker.tryClaim(Timestamp.ofTimeMicroseconds(19L))); assertFalse(tracker.tryClaim(Timestamp.ofTimeMicroseconds(20L))); }
public boolean isJavaFunction() { return type == Type.JAVA_FUNCTION; }
@Test void testJavaFunction() { Variable v = new Variable((Function<String, String>) this::simpleFunction); assertTrue(v.isJavaFunction()); v = new Variable((BiFunction<String, String, String>) this::simpleBiFunction); // maybe we are ok with this, karate "call" can be used only with functions assertFalse(v.isJavaFunction()); }
public void processNullMessage(Null nullMessage, Afnemersbericht afnemersbericht){ if (afnemersbericht != null && afnemersbericht.getType() == Afnemersbericht.Type.Av01){ afnemersberichtRepository.delete(afnemersbericht); } logger.info("Received null message"); }
@Test public void testProcessNullMessageAv01(){ Null testNullMessage = TestDglMessagesUtil.createTestNullMessage(); when(afnemersbericht.getType()).thenReturn(Afnemersbericht.Type.Av01); classUnderTest.processNullMessage(testNullMessage, afnemersbericht); verify(afnemersberichtRepository, times(1)).delete(afnemersbericht); }
public static synchronized void configure(DataflowWorkerLoggingOptions options) { if (!initialized) { throw new RuntimeException("configure() called before initialize()"); } // For compatibility reason, we do not call SdkHarnessOptions.getConfiguredLoggerFromOptions // to config the logging for legacy worker, instead replicate the config steps used for // DataflowWorkerLoggingOptions for default log level and log level overrides. SdkHarnessOptions harnessOptions = options.as(SdkHarnessOptions.class); boolean usedDeprecated = false; // default value for both DefaultSdkHarnessLogLevel and DefaultWorkerLogLevel are INFO Level overrideLevel = getJulLevel(harnessOptions.getDefaultSdkHarnessLogLevel()); if (options.getDefaultWorkerLogLevel() != null && options.getDefaultWorkerLogLevel() != INFO) { overrideLevel = getJulLevel(options.getDefaultWorkerLogLevel()); usedDeprecated = true; } LogManager.getLogManager().getLogger(ROOT_LOGGER_NAME).setLevel(overrideLevel); if (options.getWorkerLogLevelOverrides() != null) { for (Map.Entry<String, DataflowWorkerLoggingOptions.Level> loggerOverride : options.getWorkerLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } usedDeprecated = true; } else if (harnessOptions.getSdkHarnessLogLevelOverrides() != null) { for (Map.Entry<String, SdkHarnessOptions.LogLevel> loggerOverride : harnessOptions.getSdkHarnessLogLevelOverrides().entrySet()) { Logger logger = Logger.getLogger(loggerOverride.getKey()); logger.setLevel(getJulLevel(loggerOverride.getValue())); configuredLoggers.add(logger); } } // If the options specify a level for messages logged to System.out/err, we need to reconfigure // the corresponding stream adapter. if (options.getWorkerSystemOutMessageLevel() != null) { System.out.close(); System.setOut( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_OUT_LOG_NAME, getJulLevel(options.getWorkerSystemOutMessageLevel()), Charset.defaultCharset())); } if (options.getWorkerSystemErrMessageLevel() != null) { System.err.close(); System.setErr( JulHandlerPrintStreamAdapterFactory.create( loggingHandler, SYSTEM_ERR_LOG_NAME, getJulLevel(options.getWorkerSystemErrMessageLevel()), Charset.defaultCharset())); } if (usedDeprecated) { LOG.warn( "Deprecated DataflowWorkerLoggingOptions are used for log level settings." + "Consider using options defined in SdkHarnessOptions for forward compatibility."); } }
@Test public void testWithSdkHarnessCustomLogLevels() { SdkHarnessOptions options = PipelineOptionsFactory.as(SdkHarnessOptions.class); options.setSdkHarnessLogLevelOverrides( new SdkHarnessLogLevelOverrides() .addOverrideForName("C", SdkHarnessOptions.LogLevel.DEBUG) .addOverrideForName("D", SdkHarnessOptions.LogLevel.ERROR)); DataflowWorkerLoggingInitializer.configure(options.as(DataflowWorkerLoggingOptions.class)); Logger aLogger = LogManager.getLogManager().getLogger("C"); assertEquals(0, aLogger.getHandlers().length); assertEquals(Level.FINE, aLogger.getLevel()); assertTrue(aLogger.getUseParentHandlers()); Logger bLogger = LogManager.getLogManager().getLogger("D"); assertEquals(Level.SEVERE, bLogger.getLevel()); assertEquals(0, bLogger.getHandlers().length); assertTrue(aLogger.getUseParentHandlers()); }
public JoinedRowData replace(RowData row1, RowData row2) { this.row1 = row1; this.row2 = row2; return this; }
@Test void testReplace() { final RowData row1 = GenericRowData.of(1L); final RowData row2 = GenericRowData.of(2L); final JoinedRowData joinedRow = new JoinedRowData(row1, row2); assertThat(joinedRow).hasArity(2); joinedRow.replace(GenericRowData.of(3L), GenericRowData.of(4L, 5L)); assertThat(joinedRow).hasArity(3); assertThat(joinedRow).getLong(0).isEqualTo(3); assertThat(joinedRow).getLong(1).isEqualTo(4); assertThat(joinedRow).getLong(2).isEqualTo(5); }
@Override public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { final EueApiClient client = new EueApiClient(session); if(status.isExists()) { if(!new CaseInsensitivePathPredicate(file).test(target)) { if(log.isWarnEnabled()) { log.warn(String.format("Trash file %s to be replaced with %s", target, file)); } new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, delete); } } final String resourceId = fileid.getFileId(file); if(!new SimplePathPredicate(file.getParent()).test(target.getParent())) { final ResourceMoveResponseEntries resourceMoveResponseEntries; final String parentResourceId = fileid.getFileId(target.getParent()); switch(parentResourceId) { case EueResourceIdProvider.ROOT: case EueResourceIdProvider.TRASH: resourceMoveResponseEntries = new MoveChildrenForAliasApiApi(client) .resourceAliasAliasChildrenMovePost(parentResourceId, Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null, "rename", null); break; default: resourceMoveResponseEntries = new MoveChildrenApi(client) .resourceResourceIdChildrenMovePost(parentResourceId, Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null, "rename", null); } if(null == resourceMoveResponseEntries) { // Move of single file will return 200 status code with empty response body } else { for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) { switch(resourceMoveResponseEntry.getStatusCode()) { case HttpStatus.SC_OK: break; default: log.warn(String.format("Failure %s moving file %s", resourceMoveResponseEntries, file)); final ResourceCreationResponseEntryEntity entity = resourceMoveResponseEntry.getEntity(); if(null == entity) { throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getEntity().getError(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } } } } if(!StringUtils.equals(file.getName(), target.getName())) { final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel(); final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate(); final Uifs uifs = new Uifs(); uifs.setName(target.getName()); resourceUpdateModelUpdate.setUifs(uifs); resourceUpdateModel.setUpdate(resourceUpdateModelUpdate); final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(resourceId, resourceUpdateModel, null, null, null); if(null == resourceMoveResponseEntries) { // Move of single file will return 200 status code with empty response body } else { for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) { switch(resourceMoveResponseEntry.getStatusCode()) { case HttpStatus.SC_CREATED: break; default: log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file)); throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } } } } fileid.cache(file, null); return target; } catch(ApiException e) { throw new EueExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMoveFileToRoot() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path sourceFolder = new EueDirectoryFeature(session, fileid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path sourceFile = new Path(sourceFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); createFile(fileid, sourceFile, RandomUtils.nextBytes(541)); final PathAttributes sourceAttr = new EueAttributesFinderFeature(session, fileid).find(sourceFile); assertTrue(new EueFindFeature(session, fileid).find(sourceFile)); final Path targetFile = new EueMoveFeature(session, fileid).move(sourceFile, new Path(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.placeholder)), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new EueFindFeature(session, fileid).find(sourceFile)); assertTrue(new EueFindFeature(session, fileid).find(targetFile)); assertFalse(new DefaultFindFeature(session).find(sourceFile)); assertTrue(new DefaultFindFeature(session).find(targetFile)); assertEquals(sourceAttr.getSize(), new EueAttributesFinderFeature(session, fileid).find(targetFile).getSize()); assertNotEquals(sourceAttr.getETag(), new EueAttributesFinderFeature(session, fileid).find(targetFile).getETag()); assertEquals(sourceAttr.getFileId(), new EueAttributesFinderFeature(session, fileid).find(targetFile).getFileId()); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(sourceFolder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public int size() { return mWorkers.size(); }
@Test public void size() { WorkerIdentity worker1 = WorkerIdentityTestUtils.ofLegacyId(1); WorkerIdentity worker2 = WorkerIdentityTestUtils.ofLegacyId(2); List<WorkerInfo> workers = ImmutableList.of( new WorkerInfo().setIdentity(worker1), new WorkerInfo().setIdentity(worker2) ); WorkerClusterView view = new WorkerClusterView(workers); assertEquals(workers.size(), view.size()); assertFalse(view.isEmpty()); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatSelectWithReservedWordAlias() { final String statementString = "CREATE STREAM S AS SELECT address AS `STREAM` FROM address;"; final Statement statement = parseSingle(statementString); assertThat(SqlFormatter.formatSql(statement), equalTo("CREATE STREAM S AS SELECT" + " ADDRESS `STREAM`\n" + "FROM ADDRESS ADDRESS\nEMIT CHANGES")); }
public static KMeans fit(double[][] data, int k) { return fit(data, k, 100, 1E-4); }
@Test public void testUSPS() throws Exception { System.out.println("USPS"); MathEx.setSeed(19650218); // to get repeatable results. double[][] x = USPS.x; int[] y = USPS.y; double[][] testx = USPS.testx; int[] testy = USPS.testy; KMeans model = KMeans.fit(x, 10, 100, 4); System.out.println(model); double r = RandIndex.of(y, model.y); double r2 = AdjustedRandIndex.of(y, model.y); System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.9063, r, 1E-4); assertEquals(0.5148, r2, 1E-4); System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y)); System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y)); System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y)); System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y)); System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y)); System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y)); int[] p = new int[testx.length]; for (int i = 0; i < testx.length; i++) { p[i] = model.predict(testx[i]); } r = RandIndex.of(testy, p); r2 = AdjustedRandIndex.of(testy, p); System.out.format("Testing rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8942, r, 1E-4); assertEquals(0.4540, r2, 1E-4); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
public static int compareVersion(final String versionA, final String versionB) { final String[] sA = versionA.split("\\."); final String[] sB = versionB.split("\\."); int expectSize = 3; if (sA.length != expectSize || sB.length != expectSize) { throw new IllegalArgumentException("version must be like x.y.z(-beta)"); } int first = Objects.compare(sA[0], sB[0], STRING_COMPARATOR); if (first != 0) { return first; } int second = Objects.compare(sA[1], sB[1], STRING_COMPARATOR); if (second != 0) { return second; } return Objects.compare(sA[2].split("-")[0], sB[2].split("-")[0], STRING_COMPARATOR); }
@Test void testVersionCompareEtWithChar() { assertEquals(0, VersionUtils.compareVersion("1.2.1", "1.2.1-beta")); }
@Override public String toString() { ToStringHelper helper = MoreObjects.toStringHelper("byte[]"); if (bytes != null) { helper.add("length", bytes.length) .add("hash", Arrays.hashCode(bytes)); } else { helper.addValue(bytes); } return helper.toString(); }
@Test public void testToString() { final byte[] some = new byte[] {2, 5, 0, 1 }; final String expected = "byte[]{length=" + some.length + ", hash=" + Arrays.hashCode(some) + "}"; assertEquals(expected, String.valueOf(ByteArraySizeHashPrinter.of(some))); assertNotNull(ByteArraySizeHashPrinter.orNull(some)); }
@Override public boolean markSlotActive(AllocationID allocationId) throws SlotNotFoundException { checkRunning(); TaskSlot<T> taskSlot = getTaskSlot(allocationId); if (taskSlot != null) { return markExistingSlotActive(taskSlot); } else { throw new SlotNotFoundException(allocationId); } }
@Test void testMarkSlotActiveDeactivatesSlotTimeout() throws Exception { runDeactivateSlotTimeoutTest( (taskSlotTable, jobId, allocationId) -> taskSlotTable.markSlotActive(allocationId)); }
@Operation(summary = "queryUnauthorizedProject", description = "QUERY_UNAUTHORIZED_PROJECT_NOTES") @Parameters({ @Parameter(name = "userId", description = "USER_ID", schema = @Schema(implementation = int.class, example = "100")) }) @GetMapping(value = "/unauth-project") @ResponseStatus(HttpStatus.OK) @ApiException(QUERY_UNAUTHORIZED_PROJECT_ERROR) public Result queryUnauthorizedProject(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam("userId") Integer userId) { return projectService.queryUnauthorizedProject(loginUser, userId); }
@Test public void testQueryUnauthorizedProject() { Result result = new Result(); putMsg(result, Status.SUCCESS); Mockito.when(projectService.queryUnauthorizedProject(user, 2)).thenReturn(result); Result response = projectController.queryUnauthorizedProject(user, 2); Assertions.assertEquals(Status.SUCCESS.getCode(), response.getCode().intValue()); }
@Override public void execute(String commandName, BufferedReader reader, BufferedWriter writer) throws Py4JException, IOException { String returnCommand = null; Throwable exception = (Throwable) Protocol.getObject(reader.readLine(), this.gateway); // EOQ reader.readLine(); String stackTrace = Protocol.getThrowableAsString(exception); ReturnObject rObject = ReturnObject.getPrimitiveReturnObject(stackTrace); returnCommand = Protocol.getOutputCommand(rObject); logger.finest("Returning command: " + returnCommand); writer.write(returnCommand); writer.flush(); }
@Test public void testException() { String id = null; try { throw new RuntimeException("Hello World"); } catch (Exception e) { id = "r" + gateway.putNewObject(e); } String inputCommand = id + "\ne\n"; try { command.execute("p", new BufferedReader(new StringReader(inputCommand)), writer); System.out.println("DEBUG!!!" + sWriter.toString()); assertTrue(sWriter.toString().startsWith("!ysjava.lang.RuntimeException: Hello World")); } catch (Exception e) { e.printStackTrace(); fail(); } }
public void updateEndTime(String eventName, long endTime) { synchronized (mTrackTimer) { EventTimer eventTimer = mTrackTimer.get(eventName); if (eventTimer != null) { eventTimer.setEndTime(endTime); } } }
@Test public void updateEndTime() { mInstance.addEventTimer("EventTimer", new EventTimer(TimeUnit.SECONDS, 10000L)); mInstance.updateEndTime("EventTimer", 20000); Assert.assertEquals(20000, mInstance.getEventTimer("EventTimer").getEndTime()); }
public static <T> Iterator<Class<T>> classIterator(Class<T> expectedType, String factoryId, ClassLoader classLoader) { Set<ServiceDefinition> serviceDefinitions = getServiceDefinitions(factoryId, classLoader); return new ClassIterator<>(serviceDefinitions, expectedType); }
@Test public void testClassIteratorInTomcat_whenClassesInBothLibs() throws Exception { ClassLoader launchClassLoader = this.getClass().getClassLoader(); ClassLoader webappClassLoader; // setup embedded tomcat Tomcat tomcat = new Tomcat(); tomcat.setPort(13256); // 8080 may be used by some other tests Context ctx = tomcat.addContext("", null); // Map target/classes as WEB-INF/classes, so webapp classloader // will locate compiled production classes in the webapp classpath. // The purpose of this setup is to make project classes available // to both launch classloader and webapplication classloader, // modeling a Tomcat deployment in which Hazelcast JARs are deployed // in both tomcat/lib and webapp/lib File webInfClasses = new File("target/classes"); WebResourceRoot resources = new StandardRoot(ctx); resources.addPreResources(new DirResourceSet(resources, "/WEB-INF/classes", webInfClasses.getAbsolutePath(), "/")); ctx.setResources(resources); TestServiceLoaderServlet testServlet = new TestServiceLoaderServlet(); Wrapper wrapper = tomcat.addServlet("", "testServlet", testServlet); wrapper.setLoadOnStartup(1); ctx.addServletMappingDecoded("/", "testServlet"); tomcat.start(); try { assertTrueEventually(() -> assertTrue(testServlet.isInitDone())); assertNull("No failure is expected from servlet init() method", testServlet.failure()); webappClassLoader = testServlet.getWebappClassLoader(); assertNotEquals(launchClassLoader, webappClassLoader); Iterator<? extends Class<?>> iterator = ServiceLoader.classIterator(DataSerializerHook.class, "com.hazelcast.DataSerializerHook", webappClassLoader); assertTrue(iterator.hasNext()); while (iterator.hasNext()) { Class<?> klass = iterator.next(); assertEquals(launchClassLoader, klass.getClassLoader()); } } finally { tomcat.stop(); } }
@PutMapping("/{id}") @RequiresPermissions("system:plugin:edit") public ShenyuAdminResult updatePlugin(@PathVariable("id") @Existed(message = "plugin is not existed", provider = PluginMapper.class) final String id, @Valid @ModelAttribute final PluginDTO pluginDTO) { pluginDTO.setId(id); return createPlugin(pluginDTO); }
@Test public void testUpdatePlugin() throws Exception { PluginDTO pluginDTO = new PluginDTO(); pluginDTO.setId("123"); pluginDTO.setName("test1"); pluginDTO.setEnabled(true); pluginDTO.setRole("1"); pluginDTO.setSort(100); when(SpringBeanUtils.getInstance().getBean(PluginMapper.class)).thenReturn(pluginMapper); when(pluginMapper.existed(pluginDTO.getId())).thenReturn(true); given(this.pluginService.createOrUpdate(pluginDTO)).willReturn(ShenyuResultMessage.UPDATE_SUCCESS); this.mockMvc.perform(MockMvcRequestBuilders.put("/plugin/{id}", pluginDTO.getId()) .contentType(MediaType.APPLICATION_FORM_URLENCODED_VALUE) .param("name", pluginDTO.getName()) .param("enabled", String.valueOf(pluginDTO.getEnabled())) .param("role", pluginDTO.getRole()) .param("sort", String.valueOf(pluginDTO.getSort()))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.UPDATE_SUCCESS))) .andReturn(); when(pluginMapper.existed(pluginDTO.getId())).thenReturn(null); this.mockMvc.perform(MockMvcRequestBuilders.put("/plugin/{id}", pluginDTO.getId()) .contentType(MediaType.MULTIPART_FORM_DATA_VALUE) .param("name", pluginDTO.getName()) .param("enabled", String.valueOf(pluginDTO.getEnabled())) .param("role", pluginDTO.getRole()) .param("sort", String.valueOf(pluginDTO.getSort()))) .andExpect(jsonPath("$.message", Matchers.containsString("Request error! invalid argument"))) .andReturn(); when(pluginMapper.existed(pluginDTO.getId())).thenReturn(true); given(this.pluginService.createOrUpdate(pluginDTO)).willReturn(ShenyuResultMessage.CREATE_SUCCESS); this.mockMvc.perform(MockMvcRequestBuilders.put("/plugin/{id}", pluginDTO.getId()) .contentType(MediaType.APPLICATION_FORM_URLENCODED_VALUE) .param("name", pluginDTO.getName()) .param("enabled", String.valueOf(pluginDTO.getEnabled())) .param("role", pluginDTO.getRole()) .param("sort", String.valueOf(pluginDTO.getSort()))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.CREATE_SUCCESS))) .andReturn(); }
public static Criterion matchMplsTc(byte mplsTc) { return new MplsTcCriterion(mplsTc); }
@Test public void testMatchMplsTcMethod() { Criterion matchMplsTc = Criteria.matchMplsTc(mplsTc1); MplsTcCriterion mplsTcCriterion = checkAndConvert(matchMplsTc, Criterion.Type.MPLS_TC, MplsTcCriterion.class); assertThat(mplsTcCriterion.tc(), is(equalTo(mplsTc1))); }
static Optional<ExecutorService> lookupExecutorServiceRef( CamelContext camelContext, String name, Object source, String executorServiceRef) { ExecutorServiceManager manager = camelContext.getExecutorServiceManager(); ObjectHelper.notNull(manager, ESM_NAME); ObjectHelper.notNull(executorServiceRef, "executorServiceRef"); // lookup in registry first and use existing thread pool if exists, // or create a new thread pool, assuming that the executor service ref is a thread pool ID return lookupByNameAndType(camelContext, executorServiceRef, ExecutorService.class) .or(() -> Optional.ofNullable(manager.newThreadPool(source, name, executorServiceRef))); }
@Test void testLookupExecutorServiceRefWithNewThreadPool() { String name = "ThreadPool"; Object source = new Object(); String executorServiceRef = "NewThreadPool"; when(camelContext.getExecutorServiceManager()).thenReturn(manager); when(manager.newThreadPool(source, name, executorServiceRef)).thenReturn(newThreadPool); when(camelContext.getRegistry()).thenReturn(mockRegistry); Optional<ExecutorService> executorService = DynamicRouterRecipientListHelper.lookupExecutorServiceRef(camelContext, name, source, executorServiceRef); Assertions.assertTrue(executorService.isPresent()); Assertions.assertEquals(newThreadPool, executorService.get()); }
public static List<String> getSupportedCipherSuites() throws NoSuchAlgorithmException, KeyManagementException { // TODO Might want to cache the result. It's unlikely to change at runtime. final SSLContext context = getUninitializedSSLContext(); context.init( null, null, null ); return Arrays.asList( context.createSSLEngine().getSupportedCipherSuites() ); }
@Test public void testHasSupportedCipherSuites() throws Exception { // Setup fixture. // (not needed) // Execute system under test. final Collection<String> result = EncryptionArtifactFactory.getSupportedCipherSuites(); // Verify results. assertFalse( result.isEmpty() ); }
@Override public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) { if (!(sqlStatementContext instanceof InsertStatementContext)) { return false; } Optional<InsertColumnsSegment> insertColumnsSegment = ((InsertStatementContext) sqlStatementContext).getSqlStatement().getInsertColumns(); return insertColumnsSegment.isPresent() && !insertColumnsSegment.get().getColumns().isEmpty(); }
@Test void assertIsGenerateSQLTokenWithInsertStatementContext() { assertTrue(generator.isGenerateSQLToken(EncryptGeneratorFixtureBuilder.createInsertStatementContext(Collections.emptyList()))); }
public String toHtml(@Nullable RuleDto.Format descriptionFormat, RuleDescriptionSectionDto ruleDescriptionSectionDto) { if (MARKDOWN.equals(descriptionFormat)) { return Markdown.convertToHtml(ruleDescriptionSectionDto.getContent()); } return ruleDescriptionSectionDto.getContent(); }
@Test public void toHtmlWithHtmlFormat() { String result = ruleDescriptionFormatter.toHtml(HTML, HTML_SECTION); assertThat(result).isEqualTo(HTML_SECTION.getContent()); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testDisableForwardIndexForSingleDictColumn() throws Exception { Set<String> forwardIndexDisabledColumns = new HashSet<>(SV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_COLUMNS); forwardIndexDisabledColumns.addAll(MV_FORWARD_INDEX_DISABLED_DUPLICATES_COLUMNS); forwardIndexDisabledColumns.addAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); forwardIndexDisabledColumns.add(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); for (String column : DICT_ENABLED_COLUMNS_WITH_FORWARD_INDEX) { SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); forwardIndexDisabledColumns.add(column); indexLoadingConfig.setForwardIndexDisabledColumns(forwardIndexDisabledColumns); Set<String> invertedIndexColumns = new HashSet<>(forwardIndexDisabledColumns); invertedIndexColumns.removeAll(FORWARD_INDEX_DISABLED_RAW_COLUMNS); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITHOUT_INV_IDX); invertedIndexColumns.remove(DIM_SV_FORWARD_INDEX_DISABLED_INTEGER_WITH_RANGE_INDEX); indexLoadingConfig.setInvertedIndexColumns(invertedIndexColumns); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); validateIndexMap(column, true, true); validateIndexesForForwardIndexDisabledColumns(column); // In column metadata, nothing should change. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(column); validateMetadataProperties(column, metadata.hasDictionary(), metadata.getColumnMaxLength(), metadata.getCardinality(), metadata.getTotalDocs(), metadata.getDataType(), metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); } }
public List<InterpreterResultMessage> message() { return msg; }
@Test void testSimpleMagicType() { InterpreterResult result = null; result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "%table col1\tcol2\naaa\t123\n"); assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType()); result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "%table\ncol1\tcol2\naaa\t123\n"); assertEquals(InterpreterResult.Type.TABLE, result.message().get(0).getType()); result = new InterpreterResult(InterpreterResult.Code.SUCCESS, "some text before magic word\n%table col1\tcol2\naaa\t123\n"); assertEquals(InterpreterResult.Type.TABLE, result.message().get(1).getType()); }
@Udf(description = "Returns the portion of str from pos to the end of str") public String substring( @UdfParameter(description = "The source string.") final String str, @UdfParameter(description = "The base-one position to start from.") final Integer pos ) { if (str == null || pos == null) { return null; } final int start = getStartIndex(str.length(), pos); return str.substring(start); }
@Test public void shouldTruncateOutOfBoundIndexesOnStrings() { assertThat(udf.substring("a test string", 0), is("a test string")); assertThat(udf.substring("a test string", 100), is("")); assertThat(udf.substring("a test string", -100), is("a test string")); assertThat(udf.substring("a test string", 3, 100), is("test string")); assertThat(udf.substring("a test string", 3, -100), is("")); }
@Override public int maxCapacity() { return maxCapacity; }
@Test public void testCapacityNegative() { final ByteBuf buffer = newBuffer(3, 13); assertEquals(13, buffer.maxCapacity()); assertEquals(3, buffer.capacity()); try { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { buffer.capacity(-1); } }); } finally { buffer.release(); } }
public double d(int[] x, int[] y) { if (x.length != y.length) { throw new IllegalArgumentException(String.format("Arrays have different length: x[%d], y[%d]", x.length, y.length)); } double dist = 0.0; if (weight == null) { for (int i = 0; i < x.length; i++) { double d = Math.abs(x[i] - y[i]); dist += Math.pow(d, p); } } else { if (x.length != weight.length) { throw new IllegalArgumentException(String.format("Input vectors and weight vector have different length: %d, %d", x.length, weight.length)); } for (int i = 0; i < x.length; i++) { double d = Math.abs(x[i] - y[i]); dist += weight[i] * Math.pow(d, p); } } return Math.pow(dist, 1.0/p); }
@Test public void testDistance() { System.out.println("distance"); double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515}; double[] y = {-1.7781325, -0.6659839, 0.9526148, -0.9460919, -0.3925300}; MinkowskiDistance m3 = new MinkowskiDistance(3); MinkowskiDistance m4 = new MinkowskiDistance(4); assertEquals(2.124599, m3.d(x, y), 1E-6); assertEquals(2.044696, m4.d(x, y), 1E-6); }
@Override public BuiltInScalarFunctionImplementation specialize(BoundVariables boundVariables, int arity, FunctionAndTypeManager functionAndTypeManager) { ImmutableList.Builder<ScalarFunctionImplementationChoice> implementationChoices = ImmutableList.builder(); for (PolymorphicScalarFunctionChoice choice : choices) { implementationChoices.add(getScalarFunctionImplementationChoice(boundVariables, functionAndTypeManager, choice)); } return new BuiltInScalarFunctionImplementation(implementationChoices.build()); }
@Test public void testSelectsMethodBasedOnArgumentTypes() throws Throwable { SqlScalarFunction function = SqlScalarFunction.builder(TestMethods.class) .signature(SIGNATURE) .deterministic(true) .calledOnNullInput(false) .choice(choice -> choice .implementation(methodsGroup -> methodsGroup.methods("bigintToBigintReturnExtraParameter")) .implementation(methodsGroup -> methodsGroup .methods("varcharToBigintReturnExtraParameter") .withExtraParameters(context -> ImmutableList.of(context.getLiteral("x"))))) .build(); BuiltInScalarFunctionImplementation functionImplementation = function.specialize(BOUND_VARIABLES, 1, FUNCTION_AND_TYPE_MANAGER); assertEquals(functionImplementation.getMethodHandle().invoke(INPUT_SLICE), INPUT_VARCHAR_LENGTH); }
public static String toJavaCode( final String argName, final Class<?> argType, final String lambdaBody ) { return toJavaCode(ImmutableList.of(new Pair<>(argName, argType)), lambdaBody); }
@Test public void shouldGenerateFunctionCode() { // Given: final String argName = "fred"; final Class<?> argType = Long.class; // When: final String javaCode = LambdaUtil .toJavaCode(argName, argType, argName + " + 1"); // Then: final Object result = CodeGenTestUtil.cookAndEval(javaCode, Function.class); assertThat(result, is(instanceOf(Function.class))); assertThat(((Function<Object, Object>) result).apply(10L), is(11L)); }
public static <K, V> Write<K, V> write() { return new AutoValue_CdapIO_Write.Builder<K, V>().build(); }
@Test public void testWindowedWriteCdapBatchSinkPlugin() throws IOException { List<KV<String, String>> data = new ArrayList<>(); for (int i = 0; i < EmployeeInputFormat.NUM_OF_TEST_EMPLOYEE_RECORDS; i++) { data.add(KV.of(String.valueOf(i), EmployeeInputFormat.EMPLOYEE_NAME_PREFIX + i)); } PCollection<KV<String, String>> input = p.apply(Create.of(data)).apply(Window.into(FixedWindows.of(WINDOW_DURATION))); EmployeeConfig pluginConfig = new ConfigWrapper<>(EmployeeConfig.class).withParams(TEST_EMPLOYEE_PARAMS_MAP).build(); input.apply( "Write", CdapIO.<String, String>write() .withCdapPlugin( Plugin.createBatch( EmployeeBatchSink.class, EmployeeOutputFormat.class, EmployeeOutputFormatProvider.class)) .withPluginConfig(pluginConfig) .withKeyClass(String.class) .withValueClass(String.class) .withLocksDirPath(tmpFolder.getRoot().getAbsolutePath())); p.run(); List<KV<String, String>> writtenOutput = EmployeeOutputFormat.getWrittenOutput(); assertEquals(data.size(), writtenOutput.size()); assertTrue(data.containsAll(writtenOutput)); assertTrue(writtenOutput.containsAll(data)); Mockito.verify(EmployeeOutputFormat.getOutputCommitter()).commitJob(Mockito.any()); }
public LocalReplicatedMapStats getLocalReplicatedMapStats(String name) { return statsProvider.getLocalReplicatedMapStats(name); }
@Test public void testGetLocalReplicatedMapStatsNoObjectGenerationIfDisabledStats() { String name = randomMapName(); ReplicatedMapConfig replicatedMapConfig = new ReplicatedMapConfig(); replicatedMapConfig.setName(name); replicatedMapConfig.setStatisticsEnabled(false); nodeEngine.getConfig().addReplicatedMapConfig(replicatedMapConfig); ReplicatedMapService service = new ReplicatedMapService(nodeEngine); LocalReplicatedMapStats stats = service.getLocalReplicatedMapStats(name); LocalReplicatedMapStats stats2 = service.getLocalReplicatedMapStats(name); LocalReplicatedMapStats stats3 = service.getLocalReplicatedMapStats(name); assertSame(stats, stats2); assertSame(stats2, stats3); }
@SuppressWarnings("unchecked") @Override public NodeHeartbeatResponse nodeHeartbeat(NodeHeartbeatRequest request) throws YarnException, IOException { NodeStatus remoteNodeStatus = request.getNodeStatus(); /** * Here is the node heartbeat sequence... * 1. Check if it's a valid (i.e. not excluded) node * 2. Check if it's a registered node * 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat * 4. Send healthStatus to RMNode * 5. Update node's labels if distributed Node Labels configuration is enabled */ NodeId nodeId = remoteNodeStatus.getNodeId(); // 1. Check if it's a valid (i.e. not excluded) node, if not, see if it is // in decommissioning. if (!this.nodesListManager.isValidNode(nodeId.getHost()) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager nodeId: " + nodeId + " hostname: " + nodeId.getHost(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } // 2. Check if it's a registered node RMNode rmNode = this.rmContext.getRMNodes().get(nodeId); if (rmNode == null) { /* node does not exist */ String message = "Node not found resyncing " + remoteNodeStatus.getNodeId(); LOG.info(message); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Send ping this.nmLivelinessMonitor.receivedPing(nodeId); this.decommissioningWatcher.update(rmNode, remoteNodeStatus); // 3. Check if it's a 'fresh' heartbeat i.e. not duplicate heartbeat NodeHeartbeatResponse lastNodeHeartbeatResponse = rmNode.getLastNodeHeartBeatResponse(); if (getNextResponseId( remoteNodeStatus.getResponseId()) == lastNodeHeartbeatResponse .getResponseId()) { LOG.info("Received duplicate heartbeat from node " + rmNode.getNodeAddress()+ " responseId=" + remoteNodeStatus.getResponseId()); return lastNodeHeartbeatResponse; } else if (remoteNodeStatus.getResponseId() != lastNodeHeartbeatResponse .getResponseId()) { String message = "Too far behind rm response id:" + lastNodeHeartbeatResponse.getResponseId() + " nm response id:" + remoteNodeStatus.getResponseId(); LOG.info(message); // TODO: Just sending reboot is not enough. Think more. this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.REBOOTING)); return YarnServerBuilderUtils.newNodeHeartbeatResponse(NodeAction.RESYNC, message); } // Evaluate whether a DECOMMISSIONING node is ready to be DECOMMISSIONED. if (rmNode.getState() == NodeState.DECOMMISSIONING && decommissioningWatcher.checkReadyToBeDecommissioned( rmNode.getNodeID())) { String message = "DECOMMISSIONING " + nodeId + " is ready to be decommissioned"; LOG.info(message); this.rmContext.getDispatcher().getEventHandler().handle( new RMNodeEvent(nodeId, RMNodeEventType.DECOMMISSION)); this.nmLivelinessMonitor.unregister(nodeId); return YarnServerBuilderUtils.newNodeHeartbeatResponse( NodeAction.SHUTDOWN, message); } if (timelineServiceV2Enabled) { // Check & update collectors info from request. updateAppCollectorsMap(request); } // Heartbeat response long newInterval = nextHeartBeatInterval; if (heartBeatIntervalScalingEnable) { newInterval = rmNode.calculateHeartBeatInterval( nextHeartBeatInterval, heartBeatIntervalMin, heartBeatIntervalMax, heartBeatIntervalSpeedupFactor, heartBeatIntervalSlowdownFactor); } NodeHeartbeatResponse nodeHeartBeatResponse = YarnServerBuilderUtils.newNodeHeartbeatResponse( getNextResponseId(lastNodeHeartbeatResponse.getResponseId()), NodeAction.NORMAL, null, null, null, null, newInterval); rmNode.setAndUpdateNodeHeartbeatResponse(nodeHeartBeatResponse); populateKeys(request, nodeHeartBeatResponse); populateTokenSequenceNo(request, nodeHeartBeatResponse); if (timelineServiceV2Enabled) { // Return collectors' map that NM needs to know setAppCollectorsMapToResponse(rmNode.getRunningApps(), nodeHeartBeatResponse); } // 4. Send status to RMNode, saving the latest response. RMNodeStatusEvent nodeStatusEvent = new RMNodeStatusEvent(nodeId, remoteNodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { nodeStatusEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle(nodeStatusEvent); // 5. Update node's labels to RM's NodeLabelManager. if (isDistributedNodeLabelsConf && request.getNodeLabels() != null) { try { updateNodeLabelsFromNMReport( NodeLabelsUtils.convertToStringSet(request.getNodeLabels()), nodeId); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response nodeHeartBeatResponse.setDiagnosticsMessage(ex.getMessage()); nodeHeartBeatResponse.setAreNodeLabelsAcceptedByRM(false); } } // 6. check if node's capacity is load from dynamic-resources.xml // if so, send updated resource back to NM. String nid = nodeId.toString(); Resource capability = loadNodeResourceFromDRConfiguration(nid); // sync back with new resource if not null. if (capability != null) { nodeHeartBeatResponse.setResource(capability); } // Check if we got an event (AdminService) that updated the resources if (rmNode.isUpdatedCapability()) { nodeHeartBeatResponse.setResource(rmNode.getTotalCapability()); rmNode.resetUpdatedCapability(); } // 7. Send Container Queuing Limits back to the Node. This will be used by // the node to truncate the number of Containers queued for execution. if (this.rmContext.getNodeManagerQueueLimitCalculator() != null) { nodeHeartBeatResponse.setContainerQueuingLimit( this.rmContext.getNodeManagerQueueLimitCalculator() .createContainerQueuingLimit()); } // 8. Get node's attributes and update node-to-attributes mapping // in RMNodeAttributeManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = nodeHeartBeatResponse.getDiagnosticsMessage() == null ? ex.getMessage() : nodeHeartBeatResponse.getDiagnosticsMessage() + "\n" + ex .getMessage(); nodeHeartBeatResponse.setDiagnosticsMessage(errorMsg); nodeHeartBeatResponse.setAreNodeAttributesAcceptedByRM(false); } } return nodeHeartBeatResponse; }
@Test(timeout = 30000) public void testInitDecommMetricNoRegistration() throws Exception { Configuration conf = new Configuration(); rm = new MockRM(conf); rm.start(); MockNM nm1 = rm.registerNode("host1:1234", 5120); MockNM nm2 = rm.registerNode("host2:5678", 10240); nm1.nodeHeartbeat(true); nm2.nodeHeartbeat(true); //host3 will not register or heartbeat writeToHostsFile(excludeHostFile, "host3", "host2"); conf.set(YarnConfiguration.RM_NODES_EXCLUDE_FILE_PATH, excludeHostFile.getAbsolutePath()); writeToHostsFile(hostFile, "host1", "host2"); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile.getAbsolutePath()); rm.getNodesListManager().refreshNodes(conf); rm.drainEvents(); Assert.assertEquals("The decommissioned nodes metric should be 1 ", 1, ClusterMetrics.getMetrics().getNumDecommisionedNMs()); rm.stop(); MockRM rm1 = new MockRM(conf); rm1.start(); rm1.getNodesListManager().refreshNodes(conf); rm1.drainEvents(); Assert.assertEquals("The decommissioned nodes metric should be 2 ", 2, ClusterMetrics.getMetrics().getNumDecommisionedNMs()); rm1.stop(); }
@Udf public String concat(@UdfParameter( description = "The varchar fields to concatenate") final String... inputs) { if (inputs == null) { return null; } return Arrays.stream(inputs) .filter(Objects::nonNull) .collect(Collectors.joining()); }
@Test public void shouldConcatStrings() { assertThat(udf.concat("The", "Quick", "Brown", "Fox"), is("TheQuickBrownFox")); }
@ApiOperation(value = "Create Or Update Asset (saveAsset)", notes = "Creates or Updates the Asset. When creating asset, platform generates Asset Id as " + UUID_WIKI_LINK + "The newly created Asset id will be present in the response. " + "Specify existing Asset id to update the asset. " + "Referencing non-existing Asset Id will cause 'Not Found' error. " + "Remove 'id', 'tenantId' and optionally 'customerId' from the request body example (below) to create new Asset entity. " + TENANT_OR_CUSTOMER_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAnyAuthority('TENANT_ADMIN', 'CUSTOMER_USER')") @RequestMapping(value = "/asset", method = RequestMethod.POST) @ResponseBody public Asset saveAsset(@io.swagger.v3.oas.annotations.parameters.RequestBody(description = "A JSON value representing the asset.") @RequestBody Asset asset) throws Exception { asset.setTenantId(getTenantId()); checkEntity(asset.getId(), asset, Resource.ASSET); return tbAssetService.save(asset, getCurrentUser()); }
@Test public void testSaveAsset() throws Exception { Asset asset = new Asset(); asset.setName("My asset"); asset.setType("default"); Mockito.reset(tbClusterService, auditLogService); Asset savedAsset = doPost("/api/asset", asset, Asset.class); testNotifyEntityAllOneTime(savedAsset, savedAsset.getId(), savedAsset.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.ADDED); Assert.assertNotNull(savedAsset); Assert.assertNotNull(savedAsset.getId()); Assert.assertTrue(savedAsset.getCreatedTime() > 0); Assert.assertEquals(savedTenant.getId(), savedAsset.getTenantId()); Assert.assertNotNull(savedAsset.getCustomerId()); Assert.assertEquals(NULL_UUID, savedAsset.getCustomerId().getId()); Assert.assertEquals(asset.getName(), savedAsset.getName()); Mockito.reset(tbClusterService, auditLogService); savedAsset.setName("My new asset"); doPost("/api/asset", savedAsset, Asset.class); testNotifyEntityAllOneTime(savedAsset, savedAsset.getId(), savedAsset.getId(), savedTenant.getId(), tenantAdmin.getCustomerId(), tenantAdmin.getId(), tenantAdmin.getEmail(), ActionType.UPDATED); Asset foundAsset = doGet("/api/asset/" + savedAsset.getId().getId().toString(), Asset.class); Assert.assertEquals(foundAsset.getName(), savedAsset.getName()); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { final long timestamp = clock.getTime() / 1000; // oh it'd be lovely to use Java 7 here try { graphite.connect(); for (Map.Entry<String, Gauge> entry : gauges.entrySet()) { reportGauge(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Counter> entry : counters.entrySet()) { reportCounter(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { reportHistogram(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Meter> entry : meters.entrySet()) { reportMetered(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Timer> entry : timers.entrySet()) { reportTimer(entry.getKey(), entry.getValue(), timestamp); } graphite.flush(); } catch (IOException e) { LOGGER.warn("Unable to report to Graphite", graphite, e); } finally { try { graphite.close(); } catch (IOException e1) { LOGGER.warn("Error closing Graphite", graphite, e1); } } }
@Test public void doesNotReportStringGaugeValues() throws Exception { reporter.report(map("gauge", gauge("value")), map(), map(), map(), map()); final InOrder inOrder = inOrder(graphite); inOrder.verify(graphite).connect(); inOrder.verify(graphite, never()).send("prefix.gauge", "value", timestamp); inOrder.verify(graphite).flush(); inOrder.verify(graphite).close(); verifyNoMoreInteractions(graphite); }
public void setHeaders(URLConnection connection, HTTPSamplerBase sampler) throws IOException { // Get the encoding to use for the request String contentEncoding = sampler.getContentEncoding(); long contentLength = 0L; HTTPFileArg[] files = sampler.getHTTPFiles(); // Check if we should do a multipart/form-data or an // application/x-www-form-urlencoded post request if(sampler.getUseMultipart()) { // Set the content type connection.setRequestProperty( HTTPConstants.HEADER_CONTENT_TYPE, HTTPConstants.MULTIPART_FORM_DATA + "; boundary=" + getBoundary()); // $NON-NLS-1$ // Write the form section ByteArrayOutputStream bos = new ByteArrayOutputStream(); OutputStreamWriter osw = new OutputStreamWriter(bos, contentEncoding); // Add any parameters for (JMeterProperty jMeterProperty : sampler.getArguments()) { HTTPArgument arg = (HTTPArgument) jMeterProperty.getObjectValue(); String parameterName = arg.getName(); if (arg.isSkippable(parameterName)) { continue; } // Write multipart for parameter writeFormMultipart(osw, contentEncoding, parameterName, arg.getValue(), sampler.getDoBrowserCompatibleMultipart()); } osw.flush(); // Keep the content, will be sent later formDataPostBody = bos.toByteArray(); contentLength = formDataPostBody.length; // Now we just construct any multipart for the files // We only construct the file multipart start, we do not write // the actual file content for (int i = 0; i < files.length; i++) { bos.reset(); contentLength += multipartDividerBytes.length + CRLF.length; HTTPFileArg file = files[i]; // Write multipart for file writeStartFileMultipart(osw, contentEncoding, file.getPath(), file.getParamName(), file.getMimeType()); osw.flush(); // Technically speaking, we should refrain from decoding the header to string // since we will have to encode it again when sending the request // However, HTTPFileArg#setHeaer(byte[]) does not exist yet String header = bos.toString(contentEncoding); // If this is not the first file we can't write its header now // for simplicity we always save it, even if there is only one file file.setHeader(header); contentLength += bos.size(); // Add also the length of the file content File uploadFile = new File(file.getPath()); contentLength += uploadFile.length(); contentLength += CRLF.length; } // Add the end of multipart contentLength += multipartDividerBytes.length + DASH_DASH_BYTES.length + CRLF.length; // Set the content length connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_LENGTH, Long.toString(contentLength)); // Make the connection ready for sending post data connection.setDoOutput(true); connection.setDoInput(true); } else { // Check if the header manager had a content type header // This allows the user to specify their own content-type for a POST request String contentTypeHeader = connection.getRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE); boolean hasContentTypeHeader = contentTypeHeader != null && contentTypeHeader.length() > 0; // If there are no arguments, we can send a file as the body of the request if(sampler.getArguments() != null && sampler.getArguments().getArgumentCount() == 0 && sampler.getSendFileAsPostBody()) { // we're sure that there is one file because of // getSendFileAsPostBody method's return value. HTTPFileArg file = files[0]; if(!hasContentTypeHeader) { // Allow the mimetype of the file to control the content type if(file.getMimeType() != null && file.getMimeType().length() > 0) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, file.getMimeType()); } else { if(HTTPAbstractImpl.ADD_CONTENT_TYPE_TO_POST_IF_MISSING) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, HTTPConstants.APPLICATION_X_WWW_FORM_URLENCODED); } } } // Create the content length we are going to write File inputFile = new File(file.getPath()); contentLength = inputFile.length(); } else { // We create the post body content now, so we know the size ByteArrayOutputStream bos = new ByteArrayOutputStream(); // If none of the arguments have a name specified, we // just send all the values as the post body String postBody = null; if(!sampler.getSendParameterValuesAsPostBody()) { // Set the content type if(!hasContentTypeHeader && HTTPAbstractImpl.ADD_CONTENT_TYPE_TO_POST_IF_MISSING) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, HTTPConstants.APPLICATION_X_WWW_FORM_URLENCODED); } // It is a normal post request, with parameter names and values postBody = sampler.getQueryString(contentEncoding); } else { // Allow the mimetype of the file to control the content type // This is not obvious in GUI if you are not uploading any files, // but just sending the content of nameless parameters // TODO: needs a multiple file upload scenario if(!hasContentTypeHeader) { HTTPFileArg file = files.length > 0? files[0] : null; if(file != null && file.getMimeType() != null && file.getMimeType().length() > 0) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, file.getMimeType()); } else { if(HTTPAbstractImpl.ADD_CONTENT_TYPE_TO_POST_IF_MISSING) { connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_TYPE, HTTPConstants.APPLICATION_X_WWW_FORM_URLENCODED); } } } // Just append all the parameter values, and use that as the post body StringBuilder postBodyBuffer = new StringBuilder(); for (JMeterProperty jMeterProperty : sampler.getArguments()) { HTTPArgument arg = (HTTPArgument) jMeterProperty.getObjectValue(); postBodyBuffer.append(arg.getEncodedValue(contentEncoding)); } postBody = postBodyBuffer.toString(); } bos.write(postBody.getBytes(contentEncoding)); bos.flush(); bos.close(); // Keep the content, will be sent later formDataUrlEncoded = bos.toByteArray(); contentLength = bos.toByteArray().length; } // Set the content length connection.setRequestProperty(HTTPConstants.HEADER_CONTENT_LENGTH, Long.toString(contentLength)); // Make the connection ready for sending post data connection.setDoOutput(true); } }
@Test public void testSetHeaders_NoFilename() throws IOException { sampler.setMethod(HTTPConstants.POST); setupNoFilename(sampler); setupFormData(sampler); postWriter.setHeaders(connection, sampler); checkNoContentType(connection); checkContentLength(connection, "title=mytitle&description=mydescription".length()); }
String extractVariablesFromAngularRegistry(String scriptBody, Map<String, Input> inputs, AngularObjectRegistry angularRegistry) { final String noteId = this.getNote().getId(); final String paragraphId = this.getId(); final Set<String> keys = new HashSet<>(inputs.keySet()); for (String varName : keys) { final AngularObject paragraphScoped = angularRegistry.get(varName, noteId, paragraphId); final AngularObject noteScoped = angularRegistry.get(varName, noteId, null); final AngularObject angularObject = paragraphScoped != null ? paragraphScoped : noteScoped; if (angularObject != null) { inputs.remove(varName); final String pattern = "[$][{]\\s*" + varName + "\\s*(?:=[^}]+)?[}]"; scriptBody = scriptBody.replaceAll(pattern, angularObject.get().toString()); } } return scriptBody; }
@Test void should_extract_variable_from_angular_object_registry() throws Exception { //Given final String noteId = "noteId"; final AngularObjectRegistry registry = mock(AngularObjectRegistry.class); final Note note = mock(Note.class); final Map<String, Input> inputs = new HashMap<>(); inputs.put("name", null); inputs.put("age", null); inputs.put("job", null); final String scriptBody = "My name is ${name} and I am ${age=20} years old. " + "My occupation is ${ job = engineer | developer | artists}"; final Paragraph paragraph = new Paragraph(note, null); final String paragraphId = paragraph.getId(); final AngularObject<String> nameAO = AngularObjectBuilder.build("name", "DuyHai DOAN", noteId, paragraphId); final AngularObject<Integer> ageAO = AngularObjectBuilder.build("age", 34, noteId, null); when(note.getId()).thenReturn(noteId); when(registry.get("name", noteId, paragraphId)).thenReturn(nameAO); when(registry.get("age", noteId, null)).thenReturn(ageAO); final String expected = "My name is DuyHai DOAN and I am 34 years old. " + "My occupation is ${ job = engineer | developer | artists}"; //When final String actual = paragraph.extractVariablesFromAngularRegistry(scriptBody, inputs, registry); //Then verify(registry).get("name", noteId, paragraphId); verify(registry).get("age", noteId, null); assertEquals(actual, expected); }
public boolean validate(final CommandLine input) { for(Option o : input.getOptions()) { if(Option.UNINITIALIZED == o.getArgs()) { continue; } if(o.hasOptionalArg()) { continue; } if(o.getArgs() != o.getValuesList().size()) { console.printf("Missing argument for option %s%n", o.getLongOpt()); return false; } } final TerminalAction action = TerminalActionFinder.get(input); if(null == action) { console.printf("%s%n", "Missing argument"); return false; } if(input.hasOption(TerminalOptionsBuilder.Params.existing.name())) { final String arg = input.getOptionValue(TerminalOptionsBuilder.Params.existing.name()); if(null == TransferAction.forName(arg)) { final Set<TransferAction> actions = new HashSet<TransferAction>(TransferAction.forTransfer(Transfer.Type.download)); actions.add(TransferAction.cancel); console.printf("Invalid argument '%s' for option %s. Must be one of %s%n", arg, TerminalOptionsBuilder.Params.existing.name(), Arrays.toString(actions.toArray())); return false; } switch(action) { case download: if(!validate(arg, Transfer.Type.download)) { return false; } break; case upload: if(!validate(arg, Transfer.Type.upload)) { return false; } break; case synchronize: if(!validate(arg, Transfer.Type.sync)) { return false; } break; case copy: if(!validate(arg, Transfer.Type.copy)) { return false; } break; } } // Validate arguments switch(action) { case list: case download: if(!validate(input.getOptionValue(action.name()))) { return false; } break; case upload: case copy: case synchronize: if(!validate(input.getOptionValue(action.name()))) { return false; } break; } return true; }
@Test public void testListContainers() throws Exception { final Set<Protocol> list = new HashSet<>(Arrays.asList( new SwiftProtocol(), new ProfilePlistReader(new ProtocolFactory(Collections.singleton(new SwiftProtocol()))) .read(this.getClass().getResourceAsStream("/Rackspace US.cyberduckprofile")) )); assertFalse(new TerminalOptionsInputValidator(new ProtocolFactory(list)).validate("rackspace:")); assertTrue(new TerminalOptionsInputValidator(new ProtocolFactory(list)).validate("rackspace:/")); assertFalse(new TerminalOptionsInputValidator(new ProtocolFactory(list)).validate("rackspace://")); assertTrue(new TerminalOptionsInputValidator(new ProtocolFactory(list)).validate("rackspace:///")); }
@SuppressWarnings("unchecked") void openDB(final Map<String, Object> configs, final File stateDir) { // initialize the default rocksdb options final DBOptions dbOptions = new DBOptions(); final ColumnFamilyOptions columnFamilyOptions = new ColumnFamilyOptions(); userSpecifiedOptions = new RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapter(dbOptions, columnFamilyOptions); final BlockBasedTableConfigWithAccessibleCache tableConfig = new BlockBasedTableConfigWithAccessibleCache(); cache = new LRUCache(BLOCK_CACHE_SIZE); tableConfig.setBlockCache(cache); tableConfig.setBlockSize(BLOCK_SIZE); filter = new BloomFilter(); tableConfig.setFilterPolicy(filter); userSpecifiedOptions.optimizeFiltersForHits(); userSpecifiedOptions.setTableFormatConfig(tableConfig); userSpecifiedOptions.setWriteBufferSize(WRITE_BUFFER_SIZE); userSpecifiedOptions.setCompressionType(COMPRESSION_TYPE); userSpecifiedOptions.setCompactionStyle(COMPACTION_STYLE); userSpecifiedOptions.setMaxWriteBufferNumber(MAX_WRITE_BUFFERS); userSpecifiedOptions.setCreateIfMissing(true); userSpecifiedOptions.setErrorIfExists(false); userSpecifiedOptions.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL); // this is the recommended way to increase parallelism in RocksDb // note that the current implementation of setIncreaseParallelism affects the number // of compaction threads but not flush threads (the latter remains one). Also, // the parallelism value needs to be at least two because of the code in // https://github.com/facebook/rocksdb/blob/62ad0a9b19f0be4cefa70b6b32876e764b7f3c11/util/options.cc#L580 // subtracts one from the value passed to determine the number of compaction threads // (this could be a bug in the RocksDB code and their devs have been contacted). userSpecifiedOptions.setIncreaseParallelism(Math.max(Runtime.getRuntime().availableProcessors(), 2)); wOptions = new WriteOptions(); wOptions.setDisableWAL(true); fOptions = new FlushOptions(); fOptions.setWaitForFlush(true); final Class<RocksDBConfigSetter> configSetterClass = (Class<RocksDBConfigSetter>) configs.get(StreamsConfig.ROCKSDB_CONFIG_SETTER_CLASS_CONFIG); if (configSetterClass != null) { configSetter = Utils.newInstance(configSetterClass); configSetter.setConfig(name, userSpecifiedOptions, configs); } dbDir = new File(new File(stateDir, parentDir), name); try { Files.createDirectories(dbDir.getParentFile().toPath()); Files.createDirectories(dbDir.getAbsoluteFile().toPath()); } catch (final IOException fatal) { throw new ProcessorStateException(fatal); } // Setup statistics before the database is opened, otherwise the statistics are not updated // with the measurements from Rocks DB setupStatistics(configs, dbOptions); openRocksDB(dbOptions, columnFamilyOptions); dbAccessor = new DirectDBAccessor(db, fOptions, wOptions); open = true; addValueProvidersToMetricsRecorder(); }
@Test public void shouldSetStatisticsInValueProvidersWhenUserProvidesNoStatistics() throws Exception { rocksDBStore = getRocksDBStoreWithRocksDBMetricsRecorder(); context = getProcessorContext(RecordingLevel.DEBUG); rocksDBStore.openDB(context.appConfigs(), context.stateDir()); verify(metricsRecorder).addValueProviders(eq(DB_NAME), notNull(), notNull(), eq(getStatistics(rocksDBStore))); }
public static <K> Keys<K> create() { return new Keys<>(); }
@Test @Category(ValidatesRunner.class) public void testKeysEmpty() { PCollection<KV<String, Integer>> input = p.apply( Create.of(Arrays.asList(EMPTY_TABLE)) .withCoder(KvCoder.of(StringUtf8Coder.of(), BigEndianIntegerCoder.of()))); PCollection<String> output = input.apply(Keys.create()); PAssert.that(output).empty(); p.run(); }
public ZkService chooseService() { if (zkService != null) { return zkService; } synchronized (this) { if (zkService == null) { final String version = lbConfig.getZkServerVersion(); if (version.startsWith(VERSION_34_PREFIX)) { zkService = PluginServiceManager.getPluginService(ZkService34.class); } } } if (zkService == null) { throw new IllegalArgumentException(String.format(Locale.ENGLISH, "Can not get target zookeeper client version(%s) service", lbConfig.getZkServerVersion())); } return zkService; }
@Test(expected = IllegalArgumentException.class) public void chooseService() { final ZkServiceManager zkServiceManager = new ZkServiceManager(); zkServiceManager.chooseService(); }
static PythonEnvironment preparePythonEnvironment( ReadableConfig config, String entryPointScript, String tmpDir) throws IOException { PythonEnvironment env = new PythonEnvironment(); // 1. set the path of python interpreter. String pythonExec = config.getOptional(PYTHON_CLIENT_EXECUTABLE) .orElse(System.getenv(PYFLINK_CLIENT_EXECUTABLE)); if (pythonExec != null) { env.pythonExec = pythonExec; } // 2. setup temporary local directory for the user files tmpDir = new File(tmpDir).getAbsolutePath(); Path tmpDirPath = new Path(tmpDir); tmpDirPath.getFileSystem().mkdirs(tmpDirPath); env.tempDirectory = tmpDir; // 3. append the internal lib files to PYTHONPATH. if (System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) != null) { String pythonLibDir = System.getenv(ConfigConstants.ENV_FLINK_OPT_DIR) + File.separator + "python"; env.pythonPath = getLibFiles(pythonLibDir).stream() .map(p -> p.toFile().getAbsolutePath()) .collect(Collectors.joining(File.pathSeparator)); } // 4. copy relevant python files to tmp dir and set them in PYTHONPATH. if (config.getOptional(PYTHON_FILES).isPresent()) { List<Path> pythonFiles = Arrays.stream(config.get(PYTHON_FILES).split(FILE_DELIMITER)) .map(Path::new) .collect(Collectors.toList()); addToPythonPath(env, pythonFiles); } // 5. set the archives directory as the working directory, then user could access the // content of the archives via relative path if (config.getOptional(PYTHON_ARCHIVES).isPresent() && (config.getOptional(PYTHON_CLIENT_EXECUTABLE).isPresent() || !StringUtils.isNullOrWhitespaceOnly( System.getenv(PYFLINK_CLIENT_EXECUTABLE)))) { env.archivesDirectory = String.join(File.separator, tmpDir, PYTHON_ARCHIVES_DIR); // extract archives to archives directory config.getOptional(PYTHON_ARCHIVES) .ifPresent( pyArchives -> { for (String archive : pyArchives.split(FILE_DELIMITER)) { final Path archivePath; final String targetDirName; final String originalFileName; if (archive.contains(PythonDependencyUtils.PARAM_DELIMITER)) { String[] filePathAndTargetDir = archive.split( PythonDependencyUtils.PARAM_DELIMITER, 2); archivePath = new Path(filePathAndTargetDir[0]); targetDirName = filePathAndTargetDir[1]; originalFileName = archivePath.getName(); } else { archivePath = new Path(archive); originalFileName = archivePath.getName(); targetDirName = originalFileName; } Path localArchivePath = archivePath; try { if (archivePath.getFileSystem().isDistributedFS()) { localArchivePath = new Path( env.tempDirectory, String.join( File.separator, UUID.randomUUID().toString(), originalFileName)); FileUtils.copy(archivePath, localArchivePath, false); } } catch (IOException e) { String msg = String.format( "Error occurred when copying %s to %s.", archivePath, localArchivePath); throw new RuntimeException(msg, e); } try { CompressionUtils.extractFile( localArchivePath.getPath(), String.join( File.separator, env.archivesDirectory, targetDirName), originalFileName); } catch (IOException e) { throw new RuntimeException( "Extract archives to archives directory failed.", e); } } }); } // 4. append configured python.pythonpath to the PYTHONPATH. if (config.getOptional(PYTHON_PATH).isPresent()) { env.pythonPath = String.join( File.pathSeparator, config.getOptional(PYTHON_PATH).get(), env.pythonPath); } if (entryPointScript != null) { addToPythonPath(env, Collections.singletonList(new Path(entryPointScript))); } return env; }
@Test void testSetPythonExecutable() throws IOException { Configuration config = new Configuration(); File zipFile = new File(tmpDirPath + File.separator + "venv.zip"); try (ZipArchiveOutputStream zipOut = new ZipArchiveOutputStream(new FileOutputStream(zipFile))) { ZipArchiveEntry entry = new ZipArchiveEntry("zipDir" + "/zipfile0"); zipOut.putArchiveEntry(entry); zipOut.write(new byte[] {1, 1, 1, 1, 1}); zipOut.closeArchiveEntry(); } PythonEnvUtils.PythonEnvironment env = preparePythonEnvironment(config, null, tmpDirPath); if (OperatingSystem.isWindows()) { assertThat(env.pythonExec).isEqualTo("python.exe"); } else { assertThat(env.pythonExec).isEqualTo("python"); } Map<String, String> systemEnv = new HashMap<>(System.getenv()); systemEnv.put(PYFLINK_CLIENT_EXECUTABLE, "python3"); CommonTestUtils.setEnv(systemEnv); try { env = preparePythonEnvironment(config, null, tmpDirPath); assertThat(env.pythonExec).isEqualTo("python3"); } finally { systemEnv.remove(PYFLINK_CLIENT_EXECUTABLE); CommonTestUtils.setEnv(systemEnv); } config.set(PYTHON_ARCHIVES, zipFile.getPath()); systemEnv = new HashMap<>(System.getenv()); systemEnv.put(PYFLINK_CLIENT_EXECUTABLE, "venv.zip/venv/bin/python"); CommonTestUtils.setEnv(systemEnv); try { env = preparePythonEnvironment(config, null, tmpDirPath); assertThat(env.pythonExec).isEqualTo("venv.zip/venv/bin/python"); } finally { systemEnv.remove(PYFLINK_CLIENT_EXECUTABLE); CommonTestUtils.setEnv(systemEnv); } java.nio.file.Path[] files = FileUtils.listDirectory(new File(env.archivesDirectory).toPath()); assertThat(files).hasSize(1); assertThat(files[0].getFileName().toString()).isEqualTo(zipFile.getName()); config.removeConfig(PYTHON_ARCHIVES); config.set(PYTHON_CLIENT_EXECUTABLE, "/usr/bin/python"); env = preparePythonEnvironment(config, null, tmpDirPath); assertThat(env.pythonExec).isEqualTo("/usr/bin/python"); }
@Nonnull public static <K, V> Sink<Entry<K, V>> map(@Nonnull String mapName) { return map(mapName, Entry::getKey, Entry::getValue); }
@Test public void map_byRef() { // Given List<Integer> input = sequence(itemCount); putToBatchSrcMap(input); IMap<String, Integer> sinkMap = hz().getMap(sinkName); // When Sink<Entry<String, Integer>> sink = Sinks.map(sinkMap); // Then p.readFrom(Sources.<String, Integer>map(srcName)).writeTo(sink); execute(); List<Entry<String, Integer>> expected = input.stream() .map(i -> entry(String.valueOf(i), i)) .collect(toList()); Set<Entry<String, Integer>> actual = sinkMap.entrySet(); assertEquals(expected.size(), actual.size()); expected.forEach(entry -> assertTrue(actual.contains(entry))); }
@Override public String getPrincipal() { Subject subject = org.apache.shiro.SecurityUtils.getSubject(); String principal; if (subject.isAuthenticated()) { principal = extractPrincipal(subject); if (zConf.isUsernameForceLowerCase()) { if (LOGGER.isDebugEnabled()) { LOGGER.debug("Converting principal name {} to lower case: {}", principal, principal.toLowerCase()); } principal = principal.toLowerCase(); } } else { // TODO(jl): Could be better to occur error? principal = "anonymous"; } return principal; }
@Test void canGetPrincipalName() { String expectedName = "java.security.Principal.getName()"; setupPrincipalName(expectedName); assertEquals(expectedName, shiroSecurityService.getPrincipal()); }
@Override public CacheOption swapToObject(final YamlSQLParserCacheOptionRuleConfiguration yamlConfig) { return new CacheOption(yamlConfig.getInitialCapacity(), yamlConfig.getMaximumSize()); }
@Test void assertSwapToObject() { YamlSQLParserCacheOptionRuleConfiguration cacheOptionRuleConfig = new YamlSQLParserCacheOptionRuleConfiguration(); cacheOptionRuleConfig.setInitialCapacity(2); cacheOptionRuleConfig.setMaximumSize(5L); CacheOption actual = new YamlSQLParserCacheOptionConfigurationSwapper().swapToObject(cacheOptionRuleConfig); assertThat(actual.getInitialCapacity(), is(2)); assertThat(actual.getMaximumSize(), is(5L)); }
@Override public List<MenuDO> getMenuList() { return menuMapper.selectList(); }
@Test public void testGetMenuList_all() { // mock 数据 MenuDO menu100 = randomPojo(MenuDO.class); menuMapper.insert(menu100); MenuDO menu101 = randomPojo(MenuDO.class); menuMapper.insert(menu101); // 准备参数 // 调用 List<MenuDO> list = menuService.getMenuList(); // 断言 assertEquals(2, list.size()); assertPojoEquals(menu100, list.get(0)); assertPojoEquals(menu101, list.get(1)); }