focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static boolean isJsonValid(String schemaText, String jsonText) throws IOException { return isJsonValid(schemaText, jsonText, null); }
@Test void testValidateJsonWithExternalReferenceSuccess() { boolean valid = false; String schemaText = null; String jsonText = "[{\"region\": \"north\", \"weather\": \"snowy\", \"temp\": -1.5, \"visibility\": 25}, " + "{\"region\": \"west\", \"weather\": \"rainy\", \"temp\": 12.2, \"visibility\": 300}]"; try { // Load schema from file. schemaText = FileUtils.readFileToString( new File("target/test-classes/io/github/microcks/util/openapi/weather-forecasts.json")); // Validate Json according schema. valid = OpenAPISchemaValidator.isJsonValid(schemaText, jsonText, "https://raw.githubusercontent.com/microcks/microcks/1.6.x/commons/util/src/test/resources/io/github/microcks/util/openapi/"); } catch (Exception e) { fail("Exception should not be thrown"); } // Assert Json object is valid. assertTrue(valid); }
public Class<?> getTargetClass() { return targetClass; }
@Test void testConstructorWithTargetType() { Type type = SimpleType.constructUnsafe(NacosDeserializationExceptionTest.class); NacosDeserializationException exception = new NacosDeserializationException(type); assertEquals(Constants.Exception.DESERIALIZE_ERROR_CODE, exception.getErrCode()); assertEquals( String.format("errCode: 101, errMsg: Nacos deserialize for class [%s] failed. ", type.getTypeName()), exception.getMessage()); assertNull(exception.getTargetClass()); }
@Override public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponseData response) { if (response.errorCode() != Errors.NONE.code()) { String errorMessage = String.format( "Unexpected error in Heartbeat response. Expected no error, but received: %s", Errors.forCode(response.errorCode()) ); throw new IllegalArgumentException(errorMessage); } MemberState state = state(); if (state == MemberState.LEAVING) { log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " + "already leaving the group.", memberId, memberEpoch); return; } if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; } if (isNotInGroup()) { log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" + " so it's not a member of the group. ", memberId, state); return; } // Update the group member id label in the client telemetry reporter if the member id has // changed. Initially the member id is empty, and it is updated when the member joins the // group. This is done here to avoid updating the label on every heartbeat response. Also // check if the member id is null, as the schema defines it as nullable. if (response.memberId() != null && !response.memberId().equals(memberId)) { clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels( Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId()))); } this.memberId = response.memberId(); updateMemberEpoch(response.memberEpoch()); ConsumerGroupHeartbeatResponseData.Assignment assignment = response.assignment(); if (assignment != null) { if (!state.canHandleNewAssignment()) { // New assignment received but member is in a state where it cannot take new // assignments (ex. preparing to leave the group) log.debug("Ignoring new assignment {} received from server because member is in {} state.", assignment, state); return; } Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>(); assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions()))); processAssignmentReceived(newAssignment); } }
@Test public void testSameAssignmentReconciledAgainWithMissingTopic() { ConsumerMembershipManager membershipManager = createMemberInStableState(); Uuid topic1 = Uuid.randomUuid(); Uuid topic2 = Uuid.randomUuid(); final Assignment assignment1 = new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new TopicPartitions().setTopicId(topic1).setPartitions(Collections.singletonList(0)), new TopicPartitions().setTopicId(topic2).setPartitions(Collections.singletonList(0)) )); final Assignment assignment2 = new ConsumerGroupHeartbeatResponseData.Assignment() .setTopicPartitions(Arrays.asList( new TopicPartitions().setTopicId(topic1).setPartitions(Arrays.asList(0, 1)), new TopicPartitions().setTopicId(topic2).setPartitions(Collections.singletonList(0)) )); when(metadata.topicNames()).thenReturn(Collections.singletonMap(topic1, "topic1")); // Receive assignment - full reconciliation triggered // stay in RECONCILING state, since an unresolved topic is assigned membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(assignment1).data()); assertEquals(MemberState.RECONCILING, membershipManager.state()); membershipManager.poll(time.milliseconds()); verifyReconciliationTriggeredAndCompleted(membershipManager, Collections.singletonList(new TopicIdPartition(topic1, new TopicPartition("topic1", 0))) ); membershipManager.onHeartbeatRequestGenerated(); assertEquals(MemberState.RECONCILING, membershipManager.state()); clearInvocations(membershipManager); // Receive extended assignment - assignment received but no reconciliation triggered membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(assignment2).data()); assertEquals(MemberState.RECONCILING, membershipManager.state()); verifyReconciliationNotTriggered(membershipManager); // Receive original assignment again - full reconciliation not triggered but assignment is acked again membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(assignment1).data()); assertEquals(MemberState.RECONCILING, membershipManager.state()); membershipManager.poll(time.milliseconds()); assertEquals(MemberState.ACKNOWLEDGING, membershipManager.state()); verifyReconciliationNotTriggered(membershipManager); assertEquals(Collections.singletonMap(topic1, mkSortedSet(0)), membershipManager.currentAssignment().partitions); assertEquals(mkSet(topic2), membershipManager.topicsAwaitingReconciliation()); }
public static List<UpdateRequirement> forReplaceView( ViewMetadata base, List<MetadataUpdate> metadataUpdates) { Preconditions.checkArgument(null != base, "Invalid view metadata: null"); Preconditions.checkArgument(null != metadataUpdates, "Invalid metadata updates: null"); Builder builder = new Builder(null, false); builder.require(new UpdateRequirement.AssertViewUUID(base.uuid())); metadataUpdates.forEach(builder::update); return builder.build(); }
@Test public void upgradeFormatVersionForView() { List<UpdateRequirement> requirements = UpdateRequirements.forReplaceView( viewMetadata, ImmutableList.of(new MetadataUpdate.UpgradeFormatVersion(2))); requirements.forEach(req -> req.validate(viewMetadata)); assertThat(requirements) .hasSize(1) .hasOnlyElementsOfType(UpdateRequirement.AssertViewUUID.class); assertViewUUID(requirements); }
public Flowable<EthBlock> replayBlocksFlowable( DefaultBlockParameter startBlock, DefaultBlockParameter endBlock, boolean fullTransactionObjects) { return replayBlocksFlowable(startBlock, endBlock, fullTransactionObjects, true); }
@Test public void testReplayBlocksFlowable() throws Exception { List<EthBlock> ethBlocks = Arrays.asList(createBlock(0), createBlock(1), createBlock(2)); OngoingStubbing<EthBlock> stubbing = when(web3jService.send(any(Request.class), eq(EthBlock.class))); for (EthBlock ethBlock : ethBlocks) { stubbing = stubbing.thenReturn(ethBlock); } Flowable<EthBlock> flowable = web3j.replayPastBlocksFlowable( new DefaultBlockParameterNumber(BigInteger.ZERO), new DefaultBlockParameterNumber(BigInteger.valueOf(2)), false); CountDownLatch transactionLatch = new CountDownLatch(ethBlocks.size()); CountDownLatch completedLatch = new CountDownLatch(1); List<EthBlock> results = new ArrayList<>(ethBlocks.size()); Disposable subscription = flowable.subscribe( result -> { results.add(result); transactionLatch.countDown(); }, throwable -> fail(throwable.getMessage()), () -> completedLatch.countDown()); transactionLatch.await(1, TimeUnit.SECONDS); assertEquals(results, (ethBlocks)); subscription.dispose(); completedLatch.await(1, TimeUnit.SECONDS); assertTrue(subscription.isDisposed()); }
@Override public void setResult(CeTaskResult taskResult) { requireNonNull(taskResult, "taskResult can not be null"); checkState(this.result == null, "CeTaskResult has already been set in the holder"); this.result = taskResult; }
@Test public void setResult_throws_ISE_if_called_twice() { underTest.setResult(mock(CeTaskResult.class)); assertThatThrownBy(() -> underTest.setResult(mock(CeTaskResult.class))) .isInstanceOf(IllegalStateException.class) .hasMessage("CeTaskResult has already been set in the holder"); }
public long getTotalPartitionsCount(String keyspace, String table, Optional<Long> sessionSplitsPerNode) { if (sessionSplitsPerNode.isPresent()) { return sessionSplitsPerNode.get(); } else if (configSplitsPerNode.isPresent()) { return configSplitsPerNode.get(); } List<SizeEstimate> estimates = session.getSizeEstimates(keyspace, table); return estimates.stream() .mapToLong(SizeEstimate::getPartitionsCount) .sum(); }
@Test public void testPartitionCountOverride() throws Exception { String tableName = "partition_count_override_table"; session.execute(format("CREATE TABLE %s.%s (key text PRIMARY KEY)", KEYSPACE, tableName)); EmbeddedCassandra.refreshSizeEstimates(KEYSPACE, tableName); CassandraTokenSplitManager onlyConfigSplitsPerNode = new CassandraTokenSplitManager(session, SPLIT_SIZE, Optional.of(12_345L)); assertEquals(12_345L, onlyConfigSplitsPerNode.getTotalPartitionsCount(KEYSPACE, tableName, Optional.empty())); CassandraTokenSplitManager onlySessionSplitsPerNode = new CassandraTokenSplitManager(session, SPLIT_SIZE, Optional.empty()); assertEquals(67_890L, onlySessionSplitsPerNode.getTotalPartitionsCount(KEYSPACE, tableName, Optional.of(67_890L))); CassandraTokenSplitManager sessionOverrideConfig = new CassandraTokenSplitManager(session, SPLIT_SIZE, Optional.of(12_345L)); assertEquals(67_890L, sessionOverrideConfig.getTotalPartitionsCount(KEYSPACE, tableName, Optional.of(67_890L))); CassandraTokenSplitManager defaultSplitManager = new CassandraTokenSplitManager(session, SPLIT_SIZE, Optional.empty()); assertEquals(0, defaultSplitManager.getTotalPartitionsCount(KEYSPACE, tableName, Optional.empty())); }
@Override public void setDefaultDataTableEntryTransformer( TableEntryByTypeTransformer defaultDataTableEntryByTypeTransformer ) { dataTableTypeRegistry.setDefaultDataTableEntryTransformer(defaultDataTableEntryByTypeTransformer); }
@Test void should_set_default_table_entry_transformer() { TableEntryByTypeTransformer expected = (entry, toValueType, tableCellByTypeTransformer) -> null; registry.setDefaultDataTableEntryTransformer(expected); }
public Optional<Session> login(@Nullable String currentSessionId, String host, ActorAwareAuthenticationToken authToken) throws AuthenticationServiceUnavailableException { final String previousSessionId = StringUtils.defaultIfBlank(currentSessionId, null); final Subject subject = new Subject.Builder().sessionId(previousSessionId).host(host).buildSubject(); ThreadContext.bind(subject); try { final Session session = subject.getSession(); subject.login(authToken); return createSession(subject, session, host); } catch (AuthenticationServiceUnavailableException e) { log.info("Session creation failed due to authentication service being unavailable. Actor: \"{}\"", authToken.getActor().urn()); final Map<String, Object> auditEventContext = ImmutableMap.of( "remote_address", host, "message", "Authentication service unavailable: " + e.getMessage() ); auditEventSender.failure(authToken.getActor(), SESSION_CREATE, auditEventContext); throw e; } catch (AuthenticationException e) { log.info("Invalid credentials in session create request. Actor: \"{}\"", authToken.getActor().urn()); final Map<String, Object> auditEventContext = ImmutableMap.of( "remote_address", host ); auditEventSender.failure(authToken.getActor(), SESSION_CREATE, auditEventContext); return Optional.empty(); } }
@Test public void invalidAuthToken() { sessionCreator.login(null, "host", invalidToken); verify(auditEventSender).failure(eq(validToken.getActor()), anyString(), anyMap()); }
public Num getTotalReturn() { Num totalProduct = one; int completeTimeFrames = (getBarSeries().getBarCount() / barCount); for (int i = 1; i <= completeTimeFrames; i++) { int index = i * barCount; Num currentReturn = getValue(index); // Skip NaN at the end of a series if (currentReturn != NaN) { currentReturn = currentReturn.plus(one); totalProduct = totalProduct.multipliedBy(currentReturn); } } return totalProduct.pow(one.dividedBy(numOf(completeTimeFrames))); }
@Test public void testGetTotalReturn() { PeriodicalGrowthRateIndicator gri = new PeriodicalGrowthRateIndicator(this.closePrice, 5); Num result = gri.getTotalReturn(); assertNumEquals(0.9564, result); }
public int getBytes(int index, byte[] dst, int off, int len) { int count = Math.min(len, size - index); if (buf.hasArray()) { System.arraycopy(buf.array(), buf.arrayOffset() + index, dst, off, count); } else { ByteBuffer dup = buf.duplicate(); dup.position(index); dup.get(dst, off, count); } return count; }
@Test public void testGetBytes() { final Msg msg = initMsg(); final byte[] dst = new byte[3]; msg.getBytes(0, dst, 0, 3); assertThat(dst, is(new byte[] { 0, 1, 2 })); }
@Override public void forEachEventTimeTimer(BiConsumerWithException<N, Long, Exception> consumer) { throw new UnsupportedOperationException( "The BatchExecutionInternalTimeService should not be used in State Processor API."); }
@Test void testForEachEventTimeTimerUnsupported() { BatchExecutionInternalTimeService<Object, Object> timeService = new BatchExecutionInternalTimeService<>( new TestProcessingTimeService(), LambdaTrigger.eventTimeTrigger(timer -> {})); assertThatThrownBy( () -> timeService.forEachEventTimeTimer( (o, aLong) -> fail( "The forEachEventTimeTimer() should not be supported"))) .isInstanceOf(UnsupportedOperationException.class) .hasMessageContaining( "The BatchExecutionInternalTimeService should not be used in State Processor API"); }
public abstract T getNow(T valueIfAbsent) throws InterruptedException, ExecutionException;
@Test public void testCompletingFuturesExceptionally() throws Exception { final KafkaFutureImpl<String> future = new KafkaFutureImpl<>(); CompleterThread<String> myThread = new CompleterThread<>(future, null, new RuntimeException("Ultimate efficiency achieved.")); assertIsNotCompleted(future); assertEquals("I am ready", future.getNow("I am ready")); myThread.start(); awaitAndAssertFailure(future, RuntimeException.class, "Ultimate efficiency achieved."); assertIsFailed(future); myThread.join(); assertNull(myThread.testException); }
Properties consumerProps() { return consumerProps; }
@Test public void testClientIdOverride() throws IOException { String[] args = new String[]{ "--bootstrap-server", "localhost:9092", "--topic", "test", "--from-beginning", "--consumer-property", "client.id=consumer-1" }; ConsoleConsumerOptions config = new ConsoleConsumerOptions(args); Properties consumerProperties = config.consumerProps(); assertEquals("consumer-1", consumerProperties.getProperty(ConsumerConfig.CLIENT_ID_CONFIG)); }
public static void bind(ServerSocket socket, InetSocketAddress address, int backlog) throws IOException { bind(socket, address, backlog, null, null); }
@Test public void testEmptyConfig() throws Exception { Configuration conf = new Configuration(); conf.set("TestRange", ""); ServerSocket socket = new ServerSocket(); InetSocketAddress address = new InetSocketAddress("0.0.0.0", 0); try { Server.bind(socket, address, 10, conf, "TestRange"); assertTrue(socket.isBound()); } finally { socket.close(); } }
public String getClientLatency() { if (!enabled) { return null; } Instant trackerStart = Instant.now(); String latencyDetails = queue.poll(); // non-blocking pop if (LOG.isDebugEnabled()) { Instant stop = Instant.now(); long elapsed = Duration.between(trackerStart, stop).toMillis(); LOG.debug("Dequeued latency info [{} ms]: {}", elapsed, latencyDetails); } return latencyDetails; }
@Test public void verifyDisablingOfTracker() throws Exception { // verify that disabling of the tracker works AbfsPerfTracker abfsPerfTracker = new AbfsPerfTracker(accountName, filesystemName, false); String latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should be empty").isNull(); try (AbfsPerfInfo tracker = new AbfsPerfInfo(abfsPerfTracker, "disablingCaller", "disablingCallee")) { AbfsJdkHttpOperation op = new AbfsJdkHttpOperation(url, "GET", new ArrayList<>(), Duration.ofMillis(DEFAULT_HTTP_CONNECTION_TIMEOUT), Duration.ofMillis(DEFAULT_HTTP_READ_TIMEOUT)); tracker.registerResult(op).registerSuccess(true); } latencyDetails = abfsPerfTracker.getClientLatency(); assertThat(latencyDetails).describedAs("AbfsPerfTracker should return no record").isNull(); }
@SneakyThrows(NoSuchAlgorithmException.class) @Override public void channelRead(final ChannelHandlerContext ctx, final Object msg) { if (msg instanceof MySQLHandshakePacket) { MySQLHandshakePacket handshake = (MySQLHandshakePacket) msg; MySQLHandshakeResponse41Packet handshakeResponsePacket = new MySQLHandshakeResponse41Packet(MAX_PACKET_SIZE, CHARACTER_SET, username); handshakeResponsePacket.setAuthResponse(generateAuthResponse(handshake.getAuthPluginData().getAuthenticationPluginData())); handshakeResponsePacket.setCapabilityFlags(generateClientCapability()); handshakeResponsePacket.setAuthPluginName(MySQLAuthenticationMethod.NATIVE); ctx.channel().writeAndFlush(handshakeResponsePacket); serverVersion = new MySQLServerVersion(handshake.getServerVersion()); return; } if (msg instanceof MySQLAuthSwitchRequestPacket) { MySQLAuthSwitchRequestPacket authSwitchRequest = (MySQLAuthSwitchRequestPacket) msg; ctx.channel().writeAndFlush(new MySQLAuthSwitchResponsePacket(getAuthPluginResponse(authSwitchRequest))); seed = authSwitchRequest.getAuthPluginData().getAuthenticationPluginData(); return; } if (msg instanceof MySQLAuthMoreDataPacket) { MySQLAuthMoreDataPacket authMoreData = (MySQLAuthMoreDataPacket) msg; handleCachingSha2Auth(ctx, authMoreData); return; } if (msg instanceof MySQLOKPacket) { ctx.channel().pipeline().remove(this); authResultCallback.setSuccess(serverVersion); return; } MySQLErrPacket error = (MySQLErrPacket) msg; ctx.channel().close(); throw new PipelineInternalException(error.getErrorMessage()); }
@Test void assertChannelReadHandshakeInitPacket() throws ReflectiveOperationException { MySQLHandshakePacket handshakePacket = new MySQLHandshakePacket(0, false, new MySQLAuthenticationPluginData(new byte[8], new byte[12])); handshakePacket.setAuthPluginName(MySQLAuthenticationMethod.NATIVE); mysqlNegotiateHandler.channelRead(channelHandlerContext, handshakePacket); verify(channel).writeAndFlush(ArgumentMatchers.any(MySQLHandshakeResponse41Packet.class)); MySQLServerVersion serverVersion = (MySQLServerVersion) Plugins.getMemberAccessor().get(MySQLNegotiateHandler.class.getDeclaredField("serverVersion"), mysqlNegotiateHandler); assertThat(Plugins.getMemberAccessor().get(MySQLServerVersion.class.getDeclaredField("major"), serverVersion), is(5)); assertThat(Plugins.getMemberAccessor().get(MySQLServerVersion.class.getDeclaredField("minor"), serverVersion), is(7)); assertThat(Plugins.getMemberAccessor().get(MySQLServerVersion.class.getDeclaredField("series"), serverVersion), is(22)); }
public static Optional<String> getSchemaName(final String path) { Pattern pattern = Pattern.compile(getMetaDataNode() + "/([\\w\\-]+)/schemas/([\\w\\-]+)$", Pattern.CASE_INSENSITIVE); Matcher matcher = pattern.matcher(path); return matcher.find() ? Optional.of(matcher.group(2)) : Optional.empty(); }
@Test void assertGetSchemaName() { Optional<String> actual = DatabaseMetaDataNode.getSchemaName("/metadata/foo_db/schemas/foo_schema"); assertTrue(actual.isPresent()); assertThat(actual.get(), is("foo_schema")); }
public void printHelp() { HelpFormatter formatter = new HelpFormatter(); formatter.printHelp("vespa-stat [options]", "Fetch statistics about a specific user, group, bucket, gid or document.", options, "", false); }
@Test void testPrintHelp() { ByteArrayOutputStream outContent = new ByteArrayOutputStream(); PrintStream oldOut = System.out; System.setOut(new PrintStream(outContent)); try { CommandLineOptions options = new CommandLineOptions(); options.printHelp(); String output = outContent.toString(); assertTrue(output.contains("vespa-stat [options]")); } finally { System.setOut(oldOut); outContent.reset(); } }
public List<String> getHostWhitelist() { return hostWhitelist; }
@Test public void testConfigList() { Assert.assertNotNull(routerConfig.getHostWhitelist()); Assert.assertEquals(routerConfig.getHostWhitelist().size(), 2); }
@Override public <InputT> TransformEvaluator<InputT> forApplication( AppliedPTransform<?, ?, ?> application, CommittedBundle<?> inputBundle) { @SuppressWarnings({"cast", "unchecked", "rawtypes"}) TransformEvaluator<InputT> evaluator = (TransformEvaluator<InputT>) createInMemoryEvaluator((AppliedPTransform) application); return evaluator; }
@Test public void testFlattenInMemoryEvaluator() throws Exception { PCollection<Integer> left = p.apply("left", Create.of(1, 2, 4)); PCollection<Integer> right = p.apply("right", Create.of(-1, 2, -4)); PCollectionList<Integer> list = PCollectionList.of(left).and(right); PCollection<Integer> flattened = list.apply(Flatten.pCollections()); CommittedBundle<Integer> leftBundle = bundleFactory.createBundle(left).commit(Instant.now()); CommittedBundle<Integer> rightBundle = bundleFactory.createBundle(right).commit(Instant.now()); EvaluationContext context = mock(EvaluationContext.class); UncommittedBundle<Integer> flattenedLeftBundle = bundleFactory.createBundle(flattened); UncommittedBundle<Integer> flattenedRightBundle = bundleFactory.createBundle(flattened); when(context.createBundle(flattened)).thenReturn(flattenedLeftBundle, flattenedRightBundle); FlattenEvaluatorFactory factory = new FlattenEvaluatorFactory(context); AppliedPTransform<?, ?, ?> flattenedProducer = DirectGraphs.getProducer(flattened); TransformEvaluator<Integer> leftSideEvaluator = factory.forApplication(flattenedProducer, leftBundle); TransformEvaluator<Integer> rightSideEvaluator = factory.forApplication(flattenedProducer, rightBundle); leftSideEvaluator.processElement(WindowedValue.valueInGlobalWindow(1)); rightSideEvaluator.processElement(WindowedValue.valueInGlobalWindow(-1)); leftSideEvaluator.processElement( WindowedValue.timestampedValueInGlobalWindow(2, new Instant(1024))); leftSideEvaluator.processElement(WindowedValue.valueInGlobalWindow(4, PaneInfo.NO_FIRING)); rightSideEvaluator.processElement( WindowedValue.valueInGlobalWindow(2, PaneInfo.ON_TIME_AND_ONLY_FIRING)); rightSideEvaluator.processElement( WindowedValue.timestampedValueInGlobalWindow(-4, new Instant(-4096))); TransformResult<Integer> rightSideResult = rightSideEvaluator.finishBundle(); TransformResult<Integer> leftSideResult = leftSideEvaluator.finishBundle(); assertThat(rightSideResult.getOutputBundles(), Matchers.contains(flattenedRightBundle)); assertThat( rightSideResult.getTransform(), Matchers.<AppliedPTransform<?, ?, ?>>equalTo(flattenedProducer)); assertThat(leftSideResult.getOutputBundles(), Matchers.contains(flattenedLeftBundle)); assertThat( leftSideResult.getTransform(), Matchers.<AppliedPTransform<?, ?, ?>>equalTo(flattenedProducer)); assertThat( flattenedLeftBundle.commit(Instant.now()).getElements(), containsInAnyOrder( WindowedValue.timestampedValueInGlobalWindow(2, new Instant(1024)), WindowedValue.valueInGlobalWindow(4, PaneInfo.NO_FIRING), WindowedValue.valueInGlobalWindow(1))); assertThat( flattenedRightBundle.commit(Instant.now()).getElements(), containsInAnyOrder( WindowedValue.valueInGlobalWindow(2, PaneInfo.ON_TIME_AND_ONLY_FIRING), WindowedValue.timestampedValueInGlobalWindow(-4, new Instant(-4096)), WindowedValue.valueInGlobalWindow(-1))); }
public static BaseFilterOperator getAndFilterOperator(QueryContext queryContext, List<BaseFilterOperator> filterOperators, int numDocs) { return _instance.getAndFilterOperator(queryContext, filterOperators, numDocs); }
@Test(dataProvider = "priorities") public void testPriority(BaseFilterOperator highPriorty, BaseFilterOperator lowerPriorty) { ArrayList<BaseFilterOperator> unsorted = Lists.newArrayList(lowerPriorty, highPriorty); BaseFilterOperator filterOperator = FilterOperatorUtils.getAndFilterOperator(QUERY_CONTEXT, unsorted, NUM_DOCS); assertTrue(filterOperator instanceof AndFilterOperator); List<Operator> actualChildOperators = ((AndFilterOperator) filterOperator).getChildOperators(); assertEquals(actualChildOperators, Lists.newArrayList(highPriorty, lowerPriorty), "Filter " + highPriorty + " should have more priority than filter " + lowerPriorty); }
@Override public TimelineEntity getEntity(TimelineReaderContext context, TimelineDataToRetrieve dataToRetrieve) throws IOException { String flowRunPathStr = getFlowRunPath(context.getUserId(), context.getClusterId(), context.getFlowName(), context.getFlowRunId(), context.getAppId()); Path clusterIdPath = new Path(entitiesPath, context.getClusterId()); Path flowRunPath = new Path(clusterIdPath, flowRunPathStr); Path appIdPath = new Path(flowRunPath, context.getAppId()); Path entityTypePath = new Path(appIdPath, context.getEntityType()); Path entityFilePath = getNormalPath(new Path(entityTypePath, context.getEntityId() + TIMELINE_SERVICE_STORAGE_EXTENSION)); if (entityFilePath == null) { return null; } try (BufferedReader reader = new BufferedReader(new InputStreamReader( fs.open(entityFilePath), StandardCharsets.UTF_8))) { TimelineEntity entity = readEntityFromFile(reader); return createEntityToBeReturned( entity, dataToRetrieve.getFieldsToRetrieve()); } catch (FileNotFoundException e) { LOG.info("Cannot find entity {id:" + context.getEntityId() + " , type:" + context.getEntityType() + "}. Will send HTTP 404 in response."); return null; } }
@Test void testGetEntityAllFields() throws Exception { // All fields of TimelineEntity will be returned. TimelineEntity result = reader.getEntity( new TimelineReaderContext("cluster1", "user1", "flow1", 1L, "app1", "app", "id_1"), new TimelineDataToRetrieve(null, null, EnumSet.of(Field.ALL), null, null, null)); assertEquals( (new TimelineEntity.Identifier("app", "id_1")).toString(), result.getIdentifier().toString()); assertEquals((Long) 1425016502000L, result.getCreatedTime()); assertEquals(3, result.getConfigs().size()); assertEquals(3, result.getMetrics().size()); // All fields including events will be returned. assertEquals(2, result.getEvents().size()); }
static Node selectNodeByRequesterAndStrategy(/*@NonNull*/ FlowRule rule, Context context, DefaultNode node) { // The limit app should not be empty. String limitApp = rule.getLimitApp(); int strategy = rule.getStrategy(); String origin = context.getOrigin(); if (limitApp.equals(origin) && filterOrigin(origin)) { if (strategy == RuleConstant.STRATEGY_DIRECT) { // Matches limit origin, return origin statistic node. return context.getOriginNode(); } return selectReferenceNode(rule, context, node); } else if (RuleConstant.LIMIT_APP_DEFAULT.equals(limitApp)) { if (strategy == RuleConstant.STRATEGY_DIRECT) { // Return the cluster node. return node.getClusterNode(); } return selectReferenceNode(rule, context, node); } else if (RuleConstant.LIMIT_APP_OTHER.equals(limitApp) && FlowRuleManager.isOtherOrigin(origin, rule.getResource())) { if (strategy == RuleConstant.STRATEGY_DIRECT) { return context.getOriginNode(); } return selectReferenceNode(rule, context, node); } return null; }
@Test public void testOtherOriginFlowSelectNode() { String originA = "appA"; String originB = "appB"; DefaultNode node = mock(DefaultNode.class); DefaultNode originNode = mock(DefaultNode.class); ClusterNode cn = mock(ClusterNode.class); when(node.getClusterNode()).thenReturn(cn); Context context = mock(Context.class); when(context.getOriginNode()).thenReturn(originNode); FlowRule ruleA = new FlowRule("testOtherOriginFlowSelectNode").setCount(1); ruleA.setLimitApp(originA); FlowRule ruleB = new FlowRule("testOtherOriginFlowSelectNode").setCount(2); ruleB.setLimitApp(RuleConstant.LIMIT_APP_OTHER); FlowRuleManager.loadRules(Arrays.asList(ruleA, ruleB)); // Origin matches other, return the origin node. when(context.getOrigin()).thenReturn(originB); assertEquals(originNode, FlowRuleChecker.selectNodeByRequesterAndStrategy(ruleB, context, node)); // Origin matches limitApp of an existing rule, so no nodes are selected. when(context.getOrigin()).thenReturn(originA); assertNull(FlowRuleChecker.selectNodeByRequesterAndStrategy(ruleB, context, node)); }
synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) { Event.EventType eventType = event.getType(); if (isStaleClient(zk)) return; if (LOG.isDebugEnabled()) { LOG.debug("Watcher event type: " + eventType + " with state:" + event.getState() + " for path:" + event.getPath() + " connectionState: " + zkConnectionState + " for " + this); } if (eventType == Event.EventType.None) { // the connection state has changed switch (event.getState()) { case SyncConnected: LOG.info("Session connected."); // if the listener was asked to move to safe state then it needs to // be undone ConnectionState prevConnectionState = zkConnectionState; zkConnectionState = ConnectionState.CONNECTED; if (prevConnectionState == ConnectionState.DISCONNECTED && wantToBeInElection) { monitorActiveStatus(); } break; case Disconnected: LOG.info("Session disconnected. Entering neutral mode..."); // ask the app to move to safe state because zookeeper connection // is not active and we dont know our state zkConnectionState = ConnectionState.DISCONNECTED; enterNeutralMode(); break; case Expired: // the connection got terminated because of session timeout // call listener to reconnect LOG.info("Session expired. Entering neutral mode and rejoining..."); enterNeutralMode(); reJoinElection(0); break; case SaslAuthenticated: LOG.info("Successfully authenticated to ZooKeeper using SASL."); break; default: fatalError("Unexpected Zookeeper watch event state: " + event.getState()); break; } return; } // a watch on lock path in zookeeper has fired. so something has changed on // the lock. ideally we should check that the path is the same as the lock // path but trusting zookeeper for now String path = event.getPath(); if (path != null) { switch (eventType) { case NodeDeleted: if (state == State.ACTIVE) { enterNeutralMode(); } joinElectionInternal(); break; case NodeDataChanged: monitorActiveStatus(); break; default: if (LOG.isDebugEnabled()) { LOG.debug("Unexpected node event: " + eventType + " for path: " + path); } monitorActiveStatus(); } return; } // some unexpected error has occurred fatalError("Unexpected watch error from Zookeeper"); }
@Test public void testBecomeActiveBeforeServiceHealthy() throws Exception { mockNoPriorActive(); WatchedEvent mockEvent = Mockito.mock(WatchedEvent.class); Mockito.when(mockEvent.getType()).thenReturn(Event.EventType.None); // session expired should enter safe mode // But for first time, before the SERVICE_HEALTY i.e. appData is set, // should not enter the election. Mockito.when(mockEvent.getState()).thenReturn(Event.KeeperState.Expired); elector.processWatchEvent(mockZK, mockEvent); // joinElection should not be called. Mockito.verify(mockZK, Mockito.times(0)).create(ZK_LOCK_NAME, null, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL, elector, mockZK); }
public static boolean isURL(String url) { return isMatch(URL_REGEX, url); }
@Test public void testURl() { Assert.assertEquals(true, PatternKit.isURL("http://biezhi.me")); Assert.assertEquals(true, PatternKit.isURL("https://biezhi.me")); Assert.assertEquals(true, PatternKit.isURL("ftp://192.168.1.2")); Assert.assertEquals(false, PatternKit.isURL("http:192.168.1.2")); }
@Override public Sensor addLatencyRateTotalSensor(final String scopeName, final String entityName, final String operationName, final Sensor.RecordingLevel recordingLevel, final String... tags) { final String threadId = Thread.currentThread().getName(); final String group = groupNameFromScope(scopeName); final Map<String, String> tagMap = customizedTags(threadId, scopeName, entityName, tags); final Sensor sensor = customInvocationRateAndCountSensor(threadId, group, entityName, operationName, tagMap, recordingLevel); addAvgAndMaxToSensor( sensor, group, tagMap, operationName + LATENCY_SUFFIX, AVG_LATENCY_DESCRIPTION + operationName, MAX_LATENCY_DESCRIPTION + operationName ); return sensor; }
@Test public void shouldAddLatencyRateTotalSensor() { final StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(metrics, CLIENT_ID, VERSION, time); shouldAddCustomSensor( streamsMetrics.addLatencyRateTotalSensor(SCOPE_NAME, ENTITY_NAME, OPERATION_NAME, RecordingLevel.DEBUG), streamsMetrics, Arrays.asList( OPERATION_NAME + LATENCY_SUFFIX + AVG_SUFFIX, OPERATION_NAME + LATENCY_SUFFIX + MAX_SUFFIX, OPERATION_NAME + TOTAL_SUFFIX, OPERATION_NAME + RATE_SUFFIX ) ); }
private String toStringString( boolean pad ) { String retval = null; if ( value == null ) { return null; } if ( value.getLength() <= 0 ) { // No length specified! if ( isNull() || value.getString() == null ) { retval = Const.NULL_STRING; } else { retval = value.getString(); } } else { if ( pad ) { StringBuilder ret = null; if ( isNull() || value.getString() == null ) { ret = new StringBuilder( Const.NULL_STRING ); } else { ret = new StringBuilder( value.getString() ); } int length = value.getLength(); if ( length > 16384 ) { length = 16384; // otherwise we get OUT OF MEMORY errors for CLOBS. } Const.rightPad( ret, length ); retval = ret.toString(); } else { if ( isNull() || value.getString() == null ) { retval = Const.NULL_STRING; } else { retval = value.getString(); } } } return retval; }
@Test public void testToStringString() { String result = null; Value vs = new Value( "Name", Value.VALUE_TYPE_STRING ); vs.setValue( "test string" ); result = vs.toString( true ); assertEquals( "test string", result ); vs.setLength( 20 ); result = vs.toString( true ); // padding assertEquals( "test string ", result ); vs.setLength( 4 ); result = vs.toString( true ); // truncate assertEquals( "test", result ); vs.setLength( 0 ); result = vs.toString( true ); // on 0 => full string assertEquals( "test string", result ); // no padding result = vs.toString( false ); assertEquals( "test string", result ); vs.setLength( 20 ); result = vs.toString( false ); assertEquals( "test string", result ); vs.setLength( 4 ); result = vs.toString( false ); assertEquals( "test string", result ); vs.setLength( 0 ); result = vs.toString( false ); assertEquals( "test string", result ); vs.setLength( 4 ); vs.setNull(); result = vs.toString( false ); assertEquals( "", result ); Value vs1 = new Value( "Name", Value.VALUE_TYPE_STRING ); assertEquals( "", vs1.toString() ); // Just to get 100% coverage Value vs2 = new Value( "Name", Value.VALUE_TYPE_NONE ); assertEquals( "", vs2.toString() ); }
@Override public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) { Set<RuleDescriptionSectionDto> advancedSections = rule.ruleDescriptionSections().stream() .map(this::toRuleDescriptionSectionDto) .collect(Collectors.toSet()); return addLegacySectionToAdvancedSections(advancedSections, rule); }
@Test public void generateSections_whenTwoSections_createsTwoSectionsAndDefault() { when(rule.ruleDescriptionSections()).thenReturn(List.of(SECTION_1, SECTION_2)); Set<RuleDescriptionSectionDto> ruleDescriptionSectionDtos = generator.generateSections(rule); assertThat(ruleDescriptionSectionDtos) .usingRecursiveFieldByFieldElementComparator() .containsExactlyInAnyOrder(EXPECTED_SECTION_1, EXPECTED_SECTION_2, LEGACY_SECTION); }
@Deprecated @Restricted(DoNotUse.class) public static String resolve(ConfigurationContext context, String toInterpolate) { return context.getSecretSourceResolver().resolve(toInterpolate); }
@Test public void resolve_FileWithSpace() throws Exception { String path = getPath("some secret.json").toAbsolutePath().toString(); String output = resolve("${readFile:" + path + "}"); assertThat(output, equalTo(FILE.lookup(path))); assertThat(output, containsString("\"Our secret\": \"Hello World\"")); }
public static String getAttributesXml( Map<String, Map<String, String>> attributesMap ) { return getAttributesXml( attributesMap, XML_TAG ); }
@Test public void testGetAttributesXml_DefaultTag_EmptyMap() { try ( MockedStatic<AttributesUtil> attributesUtilMockedStatic = mockStatic( AttributesUtil.class ) ) { attributesUtilMockedStatic.when( () -> AttributesUtil.getAttributesXml( anyMap() ) ).thenCallRealMethod(); attributesUtilMockedStatic.when( () -> AttributesUtil.getAttributesXml( anyMap(), anyString() ) ) .thenCallRealMethod(); Map<String, Map<String, String>> attributesMap = new HashMap<>(); String attributesXml = AttributesUtil.getAttributesXml( attributesMap ); assertNotNull( attributesXml ); // Check that it's not an empty XML fragment assertTrue( attributesXml.contains( AttributesUtil.XML_TAG ) ); } }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void getMe() { GetMeResponse response = bot.execute(new GetMe()); User user = response.user(); UserTest.checkUser(user); assertTrue(user.isBot()); assertFalse(user.canJoinGroups()); // can be changed via BotFather assertTrue(user.canReadAllGroupMessages()); assertTrue(user.supportsInlineQueries()); assertFalse(user.isPremium()); assertFalse(user.addedToAttachmentMenu()); }
@Override public void emit(OutboundPacket packet) { DeviceId devId = packet.sendThrough(); String scheme = devId.toString().split(":")[0]; if (!scheme.equals(this.id().scheme())) { throw new IllegalArgumentException( "Don't know how to handle Device with scheme " + scheme); } Dpid dpid = Dpid.dpid(devId.uri()); OpenFlowSwitch sw = controller.getSwitch(dpid); if (sw == null) { log.warn("Device {} isn't available?", devId); return; } OFPort inPort; if (packet.inPort() != null) { inPort = portDesc(packet.inPort()).getPortNo(); } else { inPort = OFPort.CONTROLLER; } //Ethernet eth = new Ethernet(); //eth.deserialize(packet.data().array(), 0, packet.data().array().length); OFPortDesc p = null; for (Instruction inst : packet.treatment().allInstructions()) { if (inst.type().equals(Instruction.Type.OUTPUT)) { p = portDesc(((OutputInstruction) inst).port()); OFPacketOut po = packetOut(sw, packet.data().array(), p.getPortNo(), inPort); sw.sendMsg(po); } } }
@Test public void emit() { MacAddress mac1 = MacAddress.of("00:00:00:11:00:01"); MacAddress mac2 = MacAddress.of("00:00:00:22:00:02"); ARP arp = new ARP(); arp.setSenderProtocolAddress(ANY) .setSenderHardwareAddress(mac1.getBytes()) .setTargetHardwareAddress(mac2.getBytes()) .setTargetProtocolAddress(ANY) .setHardwareType((short) 0) .setProtocolType((short) 0) .setHardwareAddressLength((byte) 6) .setProtocolAddressLength((byte) 4) .setOpCode((byte) 0); Ethernet eth = new Ethernet(); eth.setVlanID(VLANID) .setEtherType(Ethernet.TYPE_ARP) .setSourceMACAddress("00:00:00:11:00:01") .setDestinationMACAddress("00:00:00:22:00:02") .setPayload(arp); //the should-be working setup. OutboundPacket passPkt = outPacket(DID, TR, eth); sw.setRole(RoleState.MASTER); provider.emit(passPkt); assertEquals("invalid switch", sw, controller.current); assertEquals("message not sent", PLIST.size(), sw.sent.size()); sw.sent.clear(); //Send with different IN_PORT OutboundPacket inPortPkt = outPacket(DID, TR_ALL, eth, IN_PORT); sw.setRole(RoleState.MASTER); provider.emit(inPortPkt); assertEquals("invalid switch", sw, controller.current); assertEquals("message not sent", PLIST_ALL.size(), sw.sent.size()); OFMessage ofMessage = sw.sent.get(0); assertEquals("Wrong OF message type", OFType.PACKET_OUT, ofMessage.getType()); OFPacketOut packetOut = (OFPacketOut) ofMessage; assertEquals("Wrong in port", OFPort.of(IN_PORT_PN), packetOut.getInPort()); assertEquals("Unexpected number of actions", 1, packetOut.getActions().size()); OFAction ofAction = packetOut.getActions().get(0); assertEquals("Packet out action should be type output", OFActionType.OUTPUT, ofAction.getType()); OFActionOutput ofActionOutput = (OFActionOutput) ofAction; assertEquals("Output should be ALL", OFPort.ALL, ofActionOutput.getPort()); sw.sent.clear(); //wrong Role //sw.setRole(RoleState.SLAVE); //provider.emit(passPkt); //assertEquals("invalid switch", sw, controller.current); //assertEquals("message sent incorrectly", 0, sw.sent.size()); //sw.setRole(RoleState.MASTER); //missing switch OutboundPacket swFailPkt = outPacket(DID_MISSING, TR, eth); provider.emit(swFailPkt); assertNull("invalid switch", controller.current); assertEquals("message sent incorrectly", 0, sw.sent.size()); //to missing port //OutboundPacket portFailPkt = outPacket(DID, TR_MISSING, eth); //provider.emit(portFailPkt); //assertEquals("extra message sent", 1, sw.sent.size()); }
public static String getPrxName(final MetaData metaData) { return metaData.getPath().replace("/", "") + metaData.getMethodName() + "Prx"; }
@Test public void testGetPrxName() { final MetaData metaData = new MetaData("id", "appName", "contextPath", "/path", "rpcType", "serviceName", "methodName", "parameterTypes", "rpcExt", false); final String result = PrxInfoUtil.getPrxName(metaData); assertEquals("pathmethodNamePrx", result); }
@Override public ValidationTaskResult validateImpl(Map<String, String> optionMap) throws InterruptedException { String hadoopVersion; try { hadoopVersion = getHadoopVersion(); } catch (IOException e) { return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(), String.format("Failed to get hadoop version:%n%s.", ExceptionUtils.asPlainText(e)), "Please check if hadoop is on your PATH."); } String version = mConf.getString(PropertyKey.UNDERFS_VERSION); for (String prefix : new String[] {CDH_PREFIX, HADOOP_PREFIX}) { if (version.startsWith(prefix)) { version = version.substring(prefix.length()); break; } } if (hadoopVersion.contains(version)) { return new ValidationTaskResult(ValidationUtils.State.OK, getName(), String.format("Hadoop version %s contains UFS version defined in alluxio %s=%s.", hadoopVersion, PropertyKey.UNDERFS_VERSION, version), ""); } return new ValidationTaskResult(ValidationUtils.State.FAILED, getName(), String.format("Hadoop version %s does not match %s=%s.", hadoopVersion, PropertyKey.UNDERFS_VERSION, version), String.format("Please configure %s to match the HDFS version.", PropertyKey.UNDERFS_VERSION)); }
@Test public void versionNotMatchedDefault() throws Exception { PowerMockito.mockStatic(ShellUtils.class); String[] cmd = new String[]{"hadoop", "version"}; BDDMockito.given(ShellUtils.execCommand(cmd)).willReturn("Hadoop 2.2"); HdfsVersionValidationTask task = new HdfsVersionValidationTask(CONF); ValidationTaskResult result = task.validateImpl(ImmutableMap.of()); assertEquals(ValidationUtils.State.FAILED, result.getState()); assertThat(result.getResult(), containsString("2.2 does not match alluxio.underfs.version")); assertThat(result.getAdvice(), containsString("configure alluxio.underfs.version")); }
public int getMaxConnections() { return maxConnections; }
@Test public void testGetMaximumMongoDBConnectionsDefault() throws RepositoryException, ValidationException { MongoDbConfiguration configuration = new MongoDbConfiguration(); new JadConfig(new InMemoryRepository(), configuration).process(); assertEquals(1000, configuration.getMaxConnections()); }
public FontMetrics parse() throws IOException { return parseFontMetric(false); }
@Test void testHelveticaFontMetrics() throws IOException { AFMParser parser = new AFMParser( new FileInputStream("src/test/resources/afm/Helvetica.afm")); checkHelveticaFontMetrics(parser.parse()); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof FileLocationOptions)) { return false; } FileLocationOptions that = (FileLocationOptions) o; return Objects.equal(mOffset, that.mOffset); }
@Test public void equalsTest() throws Exception { CommonUtils.testEquals(FileLocationOptions.class); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMultimapEntriesAndKeysMergeLocalRemove() { final String tag = "multimap"; StateTag<MultimapState<byte[], Integer>> addr = StateTags.multimap(tag, ByteArrayCoder.of(), VarIntCoder.of()); MultimapState<byte[], Integer> multimapState = underTest.state(NAMESPACE, addr); final byte[] key1 = "key1".getBytes(StandardCharsets.UTF_8); final byte[] key2 = "key2".getBytes(StandardCharsets.UTF_8); final byte[] key3 = "key3".getBytes(StandardCharsets.UTF_8); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> entriesFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( false, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(entriesFuture); SettableFuture<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> keysFuture = SettableFuture.create(); when(mockReader.multimapFetchAllFuture( true, key(NAMESPACE, tag), STATE_FAMILY, VarIntCoder.of())) .thenReturn(keysFuture); ReadableState<Iterable<Map.Entry<byte[], Integer>>> entriesResult = multimapState.entries().readLater(); ReadableState<Iterable<byte[]>> keysResult = multimapState.keys().readLater(); waitAndSet( entriesFuture, Arrays.asList(multimapEntry(key1, 1, 2, 3), multimapEntry(key2, 2, 3, 4)), 30); waitAndSet(keysFuture, Arrays.asList(multimapEntry(key1), multimapEntry(key2)), 30); multimapState.remove(dup(key1)); multimapState.put(key2, 8); multimapState.put(dup(key3), 8); Iterable<Map.Entry<byte[], Integer>> entries = entriesResult.read(); assertEquals(5, Iterables.size(entries)); assertThat( entries, Matchers.containsInAnyOrder( multimapEntryMatcher(key2, 4), multimapEntryMatcher(key2, 2), multimapEntryMatcher(key2, 3), multimapEntryMatcher(key2, 8), multimapEntryMatcher(key3, 8))); Iterable<byte[]> keys = keysResult.read(); assertThat(keys, Matchers.containsInAnyOrder(key2, key3)); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testWatermarkAddBeforeReadLatest() throws Exception { StateTag<WatermarkHoldState> addr = StateTags.watermarkStateInternal("watermark", TimestampCombiner.LATEST); WatermarkHoldState bag = underTest.state(NAMESPACE, addr); SettableFuture<Instant> future = SettableFuture.create(); when(mockReader.watermarkFuture(key(NAMESPACE, "watermark"), STATE_FAMILY)).thenReturn(future); // Suggesting we will read it later should get a future from the underlying WindmillStateReader bag.readLater(); // Actually reading it will request another future, and get the same one, from // WindmillStateReader bag.add(new Instant(3000)); waitAndSet(future, new Instant(2000), 200); assertThat(bag.read(), Matchers.equalTo(new Instant(3000))); Mockito.verify(mockReader, times(2)).watermarkFuture(key(NAMESPACE, "watermark"), STATE_FAMILY); Mockito.verifyNoMoreInteractions(mockReader); // Adding another value doesn't create another future, but does update the result. bag.add(new Instant(3000)); assertThat(bag.read(), Matchers.equalTo(new Instant(3000))); Mockito.verifyNoMoreInteractions(mockReader); }
protected boolean update(final Local file, final NSImage icon) { synchronized(NSWorkspace.class) { // Specify 0 if you want to generate icons in all available icon representation formats if(workspace.setIcon_forFile_options(icon, file.getAbsolute(), new NSUInteger(0))) { workspace.noteFileSystemChanged(new NFDNormalizer().normalize(file.getAbsolute()).toString()); return true; } return false; } }
@Test public void testSetProgressFolder() throws Exception { final WorkspaceIconService s = new WorkspaceIconService(); final Local file = new Local(PreferencesFactory.get().getProperty("tmp.dir"), UUID.randomUUID().toString()); new DefaultLocalDirectoryFeature().mkdir(file); assertTrue(s.update(file, NSImage.imageWithContentsOfFile("../../img/download0.icns"))); }
@Override public AppResponse process(Flow flow, AppSessionRequest request) { Map<String, Object> result = new HashMap<>(digidClient.getAccountRequestGbaStatus(appSession.getRegistrationId())); if (result.get(lowerUnderscore(STATUS)).equals("OK")) { return new OkResponse(); } else if (result.get(lowerUnderscore(STATUS)).equals("PENDING")) { setValid(false); // gba check in progress, do not transition to next step yet return new StatusResponse("PENDING"); } else { if (result.get(lowerUnderscore(STATUS)).equals("NOK") && result.get(ERROR) != null) { return new PollBrpResponse((String) result.get(ERROR), result); } return new NokResponse(); } }
@Test void processNOKTest() { when(digidClientMock.getAccountRequestGbaStatus(1337L)).thenReturn(Map.of( lowerUnderscore(STATUS), "NOK" )); AppResponse appResponse = pollBrp.process(flowMock, null); assertTrue(appResponse instanceof NokResponse); assertEquals("NOK", ((NokResponse) appResponse).getStatus()); }
public List<Block> chunk(String resourceId, List<TokensLine> fragments) { List<TokensLine> filtered = new ArrayList<>(); int i = 0; while (i < fragments.size()) { TokensLine first = fragments.get(i); int j = i + 1; while (j < fragments.size() && fragments.get(j).getValue().equals(first.getValue())) { j++; } filtered.add(fragments.get(i)); if (i < j - 1) { filtered.add(fragments.get(j - 1)); } i = j; } fragments = filtered; if (fragments.size() < blockSize) { return new ArrayList<>(); } TokensLine[] fragmentsArr = fragments.toArray(new TokensLine[fragments.size()]); List<Block> blocks = new ArrayList<>(fragmentsArr.length - blockSize + 1); long hash = 0; int first = 0; int last = 0; for (; last < blockSize - 1; last++) { hash = hash * PRIME_BASE + fragmentsArr[last].getHashCode(); } Block.Builder blockBuilder = Block.builder().setResourceId(resourceId); for (; last < fragmentsArr.length; last++, first++) { TokensLine firstFragment = fragmentsArr[first]; TokensLine lastFragment = fragmentsArr[last]; // add last statement to hash hash = hash * PRIME_BASE + lastFragment.getHashCode(); // create block Block block = blockBuilder .setBlockHash(new ByteArray(hash)) .setIndexInFile(first) .setLines(firstFragment.getStartLine(), lastFragment.getEndLine()) .setUnit(firstFragment.getStartUnit(), lastFragment.getEndUnit()) .build(); blocks.add(block); // remove first statement from hash hash -= power * firstFragment.getHashCode(); } return blocks; }
@Test public void shouldBuildBlocks() { TokensLine line1 = new TokensLine(0, 9, 1, Character.toString((char) 1)); TokensLine line2 = new TokensLine(10, 19, 2, Character.toString((char) 2)); TokensLine line3 = new TokensLine(20, 29, 3, Character.toString((char) 3)); List<Block> blocks = new PmdBlockChunker(2).chunk("resourceId", Arrays.asList(line1, line2, line3)); assertThat(blocks.size(), is(2)); Block block = blocks.get(0); // assertThat(block.getLengthInUnits(), is(11)); assertThat(block.getStartLine(), is(1)); assertThat(block.getEndLine(), is(2)); assertThat(block.getBlockHash(), is(new ByteArray(1L * 31 + 2))); block = blocks.get(1); // assertThat(block.getLengthInUnits(), is(33)); assertThat(block.getStartLine(), is(2)); assertThat(block.getEndLine(), is(3)); assertThat(block.getBlockHash(), is(new ByteArray(2L * 31 + 3))); }
public List<DirectEncryptedPseudonymType> provideDep(ProvideDEPsRequest request) throws BsnkException { try { return ((BSNKDEPPort) this.bindingProvider).bsnkProvideDEPs(request).getDirectEncryptedPseudonyms(); } catch (SOAPFaultException ex) { if (ex.getCause().getMessage().equals("The signature or decryption was invalid")) { throw new BsnkException("SignatureValidationFault", ex.getCause().getMessage(), ex.getCause()); } throw new BsnkException("BSNKProvideDEPFault", ex.getMessage(), ex); } catch (WebServiceException ex) { throw new BsnkException("Could not send bsnkProvidePPPPCAOptimized", ex.getCause().getMessage(), ex.getCause()); } catch (BSNKProvideDEPFault ex) { throw new BsnkException("BSNKProvideDEPFault", ex.getCause().getMessage(), ex.getCause()); } }
@Test public void testResponseWithX509KeyIdentifier() throws IOException { String ppResponseTemplate = Resources.toString(provideDepResponseFile.getURL(), StandardCharsets.UTF_8); this.mockResponseBody = this.signingHelper.sign(ppResponseTemplate, WSConstants.X509_KEY_IDENTIFIER); setupWireMock(); try { assertEquals(1, client.provideDep(request).size()); } catch (BsnkException ex) { fail(ex.getMessage()); } }
public static void validateValue(Schema schema, Object value) { validateValue(null, schema, value); }
@Test public void testValidateValueMismatchTime() { assertThrows(DataException.class, () -> ConnectSchema.validateValue(Time.SCHEMA, 1000L)); }
public List<String> toPrefix(String in) { List<String> tokens = buildTokens(alignINClause(in)); List<String> output = new ArrayList<>(); List<String> stack = new ArrayList<>(); for (String token : tokens) { if (isOperand(token)) { if (token.equals(")")) { while (openParanthesesFound(stack)) { output.add(stack.remove(stack.size() - 1)); } if (!stack.isEmpty()) { // temporarily fix for issue #189 stack.remove(stack.size() - 1); } } else { while (openParanthesesFound(stack) && !hasHigherPrecedence(token, stack.get(stack.size() - 1))) { output.add(stack.remove(stack.size() - 1)); } stack.add(token); } } else { output.add(token); } } while (!stack.isEmpty()) { output.add(stack.remove(stack.size() - 1)); } return output; }
@Test public void testNotEqual1() { String query = "b != 30"; List<String> list = parser.toPrefix(query); assertEquals(Arrays.asList("b", "30", "!="), list); }
@Override public final Environment getEnvironment() { return environment; }
@Test void testMailboxMetricsMeasurement() throws Exception { final int numMails = 10, sleepTime = 5; StreamTaskMailboxTestHarnessBuilder<Integer> builder = new StreamTaskMailboxTestHarnessBuilder<>( OneInputStreamTask::new, BasicTypeInfo.INT_TYPE_INFO) .addInput(BasicTypeInfo.INT_TYPE_INFO) .setupOutputForSingletonOperatorChain( new TestBoundedOneInputStreamOperator()); try (StreamTaskMailboxTestHarness<Integer> harness = builder.build()) { Histogram mailboxLatencyMetric = harness.streamTask .getEnvironment() .getMetricGroup() .getIOMetricGroup() .getMailboxLatency(); Gauge<Integer> mailboxSizeMetric = harness.streamTask .getEnvironment() .getMetricGroup() .getIOMetricGroup() .getMailboxSize(); long startTime = SystemClock.getInstance().relativeTimeMillis(); harness.streamTask.mailboxProcessor.getMailboxMetricsControl().measureMailboxLatency(); for (int i = 0; i < numMails; ++i) { harness.streamTask.mainMailboxExecutor.execute( () -> Thread.sleep(sleepTime), "add value"); } harness.streamTask.mailboxProcessor.getMailboxMetricsControl().measureMailboxLatency(); assertThat(mailboxSizeMetric.getValue()).isGreaterThanOrEqualTo(numMails); assertThat(mailboxLatencyMetric.getCount()).isZero(); harness.processAll(); long endTime = SystemClock.getInstance().relativeTimeMillis(); assertThat(mailboxSizeMetric.getValue()).isZero(); assertThat(mailboxLatencyMetric.getCount()).isEqualTo(2L); assertThat(mailboxLatencyMetric.getStatistics().getMax()) .isBetween((long) (sleepTime * numMails), endTime - startTime); } }
@Override public String name() { return name; }
@Test public void testValidDomainNameTrailingDot() { String name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."; AbstractDnsRecord record = new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; assertEquals(name, record.name()); }
@Override public KTable<K, V> reduce(final Reducer<V> adder, final Reducer<V> subtractor, final Materialized<K, V, KeyValueStore<Bytes, byte[]>> materialized) { return reduce(adder, subtractor, NamedInternal.empty(), materialized); }
@Test public void shouldReduceAndMaterializeResults() { final KeyValueMapper<String, Number, KeyValue<String, Integer>> intProjection = (key, value) -> KeyValue.pair(key, value.intValue()); final KTable<String, Integer> reduced = builder .table( topic, Consumed.with(Serdes.String(), Serdes.Double())) .groupBy(intProjection) .reduce( MockReducer.INTEGER_ADDER, MockReducer.INTEGER_SUBTRACTOR, Materialized.<String, Integer, KeyValueStore<Bytes, byte[]>>as("reduce") .withKeySerde(Serdes.String()) .withValueSerde(Serdes.Integer())); final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = getReducedResults(reduced); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { assertReduced(supplier.theCapturedProcessor().lastValueAndTimestampPerKey(), topic, driver); { final KeyValueStore<String, Integer> reduce = driver.getKeyValueStore("reduce"); assertThat(reduce.get("A"), equalTo(5)); assertThat(reduce.get("B"), equalTo(6)); } { final KeyValueStore<String, ValueAndTimestamp<Integer>> reduce = driver.getTimestampedKeyValueStore("reduce"); assertThat(reduce.get("A"), equalTo(ValueAndTimestamp.make(5, 50L))); assertThat(reduce.get("B"), equalTo(ValueAndTimestamp.make(6, 30L))); } } }
@Override public String getTaskExecutionBody(TaskConfig config, TaskExecutionContext taskExecutionContext) { Map requestBody = new HashMap(); Map contextMap = new HashMap(); contextMap.put("environmentVariables", taskExecutionContext.environment().asMap()); contextMap.put("workingDirectory", taskExecutionContext.workingDir()); requestBody.put("context", contextMap); requestBody.put("config", configPropertiesAsMap(config)); return new Gson().toJson(requestBody); }
@Test public void shouldReturnRequestBodyForTaskExecution() { TaskExecutionContext context = mock(TaskExecutionContext.class); String workingDir = "working-dir"; TaskConfig config = new TaskConfig(); config.add(new TaskConfigProperty("Property1", "Value1")); config.add(new TaskConfigProperty("Property2", "Value2")); when(context.workingDir()).thenReturn(workingDir); when(context.environment()).thenReturn(getEnvironmentVariables()); String requestBody = new JsonBasedTaskExtensionHandler_V1().getTaskExecutionBody(config, context); Map result = (Map) new GsonBuilder().create().fromJson(requestBody, Object.class); Map taskExecutionContextFromRequest = (Map) result.get("context"); assertThat(taskExecutionContextFromRequest.get("workingDirectory"), is(workingDir)); Map environmentVariables = (Map) taskExecutionContextFromRequest.get("environmentVariables"); assertThat(environmentVariables.size(), is(2)); assertThat(environmentVariables.get("ENV1").toString(), is("VAL1")); assertThat(environmentVariables.get("ENV2").toString(), is("VAL2")); Map<String,Object> taskConfigMap = (Map<String,Object>) result.get("config"); assertThat(taskConfigMap.size(), is(2)); Map property1 = (Map) taskConfigMap.get("Property1"); Map property2 = (Map) taskConfigMap.get("Property2"); assertThat(property1.get("value").toString(), is("Value1")); assertThat(property2.get("value").toString(), is("Value2")); }
public void putUserProperty(final String name, final String value) { if (MessageConst.STRING_HASH_SET.contains(name)) { throw new RuntimeException(String.format( "The Property<%s> is used by system, input another please", name)); } if (value == null || value.trim().isEmpty() || name == null || name.trim().isEmpty()) { throw new IllegalArgumentException( "The name or value of property can not be null or blank string!" ); } this.putProperty(name, value); }
@Test(expected = RuntimeException.class) public void putUserPropertyWithRuntimeException() throws Exception { Message m = new Message(); m.putUserProperty(PROPERTY_TRACE_SWITCH, ""); }
@Override public long size() { return mSize.longValue(); }
@Test public void longRunningIterAndCheckpoint() throws Exception { // Manually set this flag, otherwise an exception will be thrown when the exclusive lock // is forced. Configuration.set(PropertyKey.TEST_MODE, false); prepareBlocks(FILE_NUMBER); // Create a bunch of long running iterators on the InodeStore CountDownLatch readerLatch = new CountDownLatch(THREAD_NUMBER); CountDownLatch restoreLatch = new CountDownLatch(1); ArrayBlockingQueue<Exception> errors = new ArrayBlockingQueue<>(THREAD_NUMBER); ArrayBlockingQueue<Integer> results = new ArrayBlockingQueue<>(THREAD_NUMBER); List<Future<Void>> futures = submitIterJob(THREAD_NUMBER, errors, results, readerLatch, restoreLatch); // Await for the 20 threads to be iterating in the middle, then trigger the shutdown event readerLatch.await(); File checkpointFile = File.createTempFile("checkpoint-for-recovery", ""); try (BufferedOutputStream out = new BufferedOutputStream(new FileOutputStream(checkpointFile))) { mStore.writeToCheckpoint(out); } assertTrue(Files.size(checkpointFile.toPath()) > 0); // Verify that the iterators can still run restoreLatch.countDown(); waitForReaders(futures); // All iterators should abort because the RocksDB contents have changed assertEquals(0, errors.size()); long completed = results.stream().filter(n -> n == FILE_NUMBER).count(); assertEquals(THREAD_NUMBER, completed); }
@Override protected void encodeInitialLine(ByteBuf buf, HttpRequest request) throws Exception { ByteBufUtil.copy(request.method().asciiName(), buf); String uri = request.uri(); if (uri.isEmpty()) { // Add " / " as absolute path if uri is not present. // See https://tools.ietf.org/html/rfc2616#section-5.1.2 ByteBufUtil.writeMediumBE(buf, SPACE_SLASH_AND_SPACE_MEDIUM); } else { CharSequence uriCharSequence = uri; boolean needSlash = false; int start = uri.indexOf("://"); if (start != -1 && uri.charAt(0) != SLASH) { start += 3; // Correctly handle query params. // See https://github.com/netty/netty/issues/2732 int index = uri.indexOf(QUESTION_MARK, start); if (index == -1) { if (uri.lastIndexOf(SLASH) < start) { needSlash = true; } } else { if (uri.lastIndexOf(SLASH, index) < start) { uriCharSequence = new StringBuilder(uri).insert(index, SLASH); } } } buf.writeByte(SP).writeCharSequence(uriCharSequence, CharsetUtil.UTF_8); if (needSlash) { // write "/ " after uri ByteBufUtil.writeShortBE(buf, SLASH_AND_SPACE_SHORT); } else { buf.writeByte(SP); } } request.protocolVersion().encode(buf); ByteBufUtil.writeShortBE(buf, CRLF_SHORT); }
@Test public void testQueryStringPath() throws Exception { for (ByteBuf buffer : getBuffers()) { HttpRequestEncoder encoder = new HttpRequestEncoder(); encoder.encodeInitialLine(buffer, new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/?url=http://example.com")); String req = buffer.toString(Charset.forName("US-ASCII")); assertEquals("GET /?url=http://example.com HTTP/1.1\r\n", req); buffer.release(); } }
public static Metric metric(String name) { return MetricsImpl.metric(name, Unit.COUNT); }
@Test public void typicalUsage() { pipeline.readFrom(TestSources.items(5L, 4L, 3L, 2L, 1L, 0L)) .filter(l -> { boolean pass = l % 2 == 0; if (pass) { Metrics.metric("single-flip-flop").decrement(); Metrics.metric("multi-flip-flop").decrement(10); } else { Metrics.metric("dropped").increment(); Metrics.metric("single-flip-flop").increment(); Metrics.metric("multi-flip-flop").increment(10); } Metrics.metric("total").increment(); Metrics.metric("last").set(l); return pass; }) .writeTo(Sinks.noop()); Job job = runPipeline(pipeline.toDag()); JobMetricsChecker checker = new JobMetricsChecker(job); checker.assertSummedMetricValue("dropped", 3); checker.assertSummedMetricValue("total", 6); checker.assertSummedMetricValue("single-flip-flop", 0); checker.assertSummedMetricValue("multi-flip-flop", 0); }
@Override protected CompletableFuture<EmptyResponseBody> handleRequest( @Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { final String jarId = request.getPathParameter(JarIdPathParameter.class); return CompletableFuture.supplyAsync( () -> { final Path jarToDelete = jarDir.resolve(jarId); if (!Files.exists(jarToDelete)) { throw new CompletionException( new RestHandlerException( String.format( "File %s does not exist in %s.", jarId, jarDir), HttpResponseStatus.BAD_REQUEST)); } else { try { Files.delete(jarToDelete); return EmptyResponseBody.getInstance(); } catch (final IOException e) { throw new CompletionException( new RestHandlerException( String.format("Failed to delete jar %s.", jarToDelete), HttpResponseStatus.INTERNAL_SERVER_ERROR, e)); } } }, executor); }
@Test void testDeleteJarById() throws Exception { assertThat(Files.exists(jarDir.resolve(TEST_JAR_NAME))).isTrue(); final HandlerRequest<EmptyRequestBody> request = createRequest(TEST_JAR_NAME); jarDeleteHandler.handleRequest(request, restfulGateway).get(); assertThat(Files.exists(jarDir.resolve(TEST_JAR_NAME))).isFalse(); }
@Udf(description = "Converts a string representation of a date in the given format" + " into the number of days since 1970-01-01 00:00:00 UTC/GMT.") public int stringToDate( @UdfParameter( description = "The string representation of a date.") final String formattedDate, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { // NB: We do not perform a null here preferring to throw an exception as // there is no sentinel value for a "null" Date. try { final DateTimeFormatter formatter = formatters.get(formatPattern); return ((int)LocalDate.parse(formattedDate, formatter).toEpochDay()); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to parse date '" + formattedDate + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldSupportEmbeddedChars() { // When: final Object result = udf.stringToDate("2021-12-01Fred", "yyyy-MM-dd'Fred'"); // Then: assertThat(result, is(18962)); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void creates_default_summary_printer_for_deprecated_default_summary_argument() { RuntimeOptions options = parser .parse("--plugin default_summary") .addDefaultSummaryPrinterIfNotDisabled() .build(); Plugins plugins = new Plugins(new PluginFactory(), options); plugins.setEventBusOnEventListenerPlugins(new TimeServiceEventBus(Clock.systemUTC(), UUID::randomUUID)); assertThat(plugins.getPlugins(), hasItem(plugin("io.cucumber.core.plugin.DefaultSummaryPrinter"))); }
public static DeterministicAnnealing fit(double[][] data, int Kmax) { return fit(data, Kmax, 0.9, 100, 1E-4, 1E-2); }
@Test public void testUSPS() throws Exception { System.out.println("USPS"); MathEx.setSeed(19650218); // to get repeatable results. double[][] x = USPS.x; int[] y = USPS.y; double[][] testx = USPS.testx; int[] testy = USPS.testy; DeterministicAnnealing model = DeterministicAnnealing.fit(x, 10, 0.8, 100, 1E-4, 1E-2); System.out.println(model); double r = RandIndex.of(y, model.y); double r2 = AdjustedRandIndex.of(y, model.y); System.out.format("Training rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8975, r, 1E-4); assertEquals(0.4701, r2, 1E-4); System.out.format("MI = %.2f%n", MutualInformation.of(y, model.y)); System.out.format("NMI.joint = %.2f%%%n", 100 * NormalizedMutualInformation.joint(y, model.y)); System.out.format("NMI.max = %.2f%%%n", 100 * NormalizedMutualInformation.max(y, model.y)); System.out.format("NMI.min = %.2f%%%n", 100 * NormalizedMutualInformation.min(y, model.y)); System.out.format("NMI.sum = %.2f%%%n", 100 * NormalizedMutualInformation.sum(y, model.y)); System.out.format("NMI.sqrt = %.2f%%%n", 100 * NormalizedMutualInformation.sqrt(y, model.y)); int[] p = new int[testx.length]; for (int i = 0; i < testx.length; i++) { p[i] = model.predict(testx[i]); } r = RandIndex.of(testy, p); r2 = AdjustedRandIndex.of(testy, p); System.out.format("Testing rand index = %.2f%%, adjusted rand index = %.2f%%%n", 100.0 * r, 100.0 * r2); assertEquals(0.8995, r, 1E-4); assertEquals(0.4745, r2, 1E-4); java.nio.file.Path temp = Write.object(model); Read.object(temp); }
public static @Nullable String emptyToNull(@Nullable String string) { return stringIsNullOrEmpty(string) ? null : string; }
@Test public void testStringEmptyToNull() { assertNull(StringUtils.emptyToNull("")); assertEquals("Test String", StringUtils.emptyToNull("Test String")); }
@Override public int hashCode() { int result = protocol.hashCode(); result = 31 * result + (address != null ? address.hashCode() : 0); result = 31 * result + (index != null ? index.hashCode() : 0); result = 31 * result + (register ? 1 : 0); result = 31 * result + (subscribe ? 1 : 0); result = 31 * result + timeout; result = 31 * result + connectTimeout; result = 31 * result + (file != null ? file.hashCode() : 0); result = 31 * result + (batch ? 1 : 0); result = 31 * result + batchSize; result = 31 * result + heartbeatPeriod; result = 31 * result + reconnectPeriod; result = 31 * result + (parameters != null ? parameters.hashCode() : 0); return result; }
@Test public void testHashCode() { RegistryConfig config1 = new RegistryConfig(); RegistryConfig config2 = new RegistryConfig(); config1.setAddress("127.0.0.1:1234").setProtocol("xxx"); config2.setAddress("127.0.0.1:1234").setProtocol("yyy"); Assert.assertFalse(config1.hashCode() == config2.hashCode()); config2.setProtocol("xxx"); Assert.assertTrue(config1.hashCode() == config2.hashCode()); }
public String toInetAddr() { return ip + ":" + port; }
@Test void testToInetAddr() { Instance instance = new Instance(); setInstance(instance); assertEquals("1.1.1.1:1000", instance.toInetAddr()); }
public boolean containInstanceMetadata(Service service, String metadataId) { return instanceMetadataMap.containsKey(service) && instanceMetadataMap.get(service).containsKey(metadataId); }
@Test void testContainInstanceMetadata() { boolean result = namingMetadataManager.containInstanceMetadata(service, METADATA_ID); assertTrue(result); }
@Override public void afterAborted(TransactionState txnState, boolean txnOperated, String txnStatusChangeReason) throws UserException { if (!txnOperated) { return; } writeLock(); try { if (isFinalState()) { return; } if (coord != null && !isSyncStreamLoad) { coord.cancel(txnStatusChangeReason); QeProcessorImpl.INSTANCE.unregisterQuery(loadId); } for (int i = 0; i < channelNum; i++) { this.channels.set(i, State.CANCELLED); } endTimeMs = System.currentTimeMillis(); state = State.CANCELLED; errorMsg = txnState.getReason(); gcObject(); GlobalStateMgr.getCurrentState().getGlobalTransactionMgr().getCallbackFactory().removeCallback(id); } finally { writeUnlock(); // sync stream load related query info should unregister here QeProcessorImpl.INSTANCE.unregisterQuery(loadId); } }
@Test public void testAfterAborted() throws UserException { TransactionState txnState = new TransactionState(); boolean txnOperated = true; TUniqueId labelId = new TUniqueId(2, 3); streamLoadTask.setTUniqueId(labelId); QeProcessorImpl.INSTANCE.registerQuery(streamLoadTask.getTUniqueId(), coord); Assert.assertEquals(1, QeProcessorImpl.INSTANCE.getCoordinatorCount()); streamLoadTask.afterAborted(txnState, txnOperated, ""); Assert.assertEquals(0, QeProcessorImpl.INSTANCE.getCoordinatorCount()); }
static String generatePackageName(final OpenAPI document) { final String host = RestDslGenerator.determineHostFrom(document); if (ObjectHelper.isNotEmpty(host)) { final StringBuilder packageName = new StringBuilder(); final String hostWithoutPort = host.replaceFirst(":.*", ""); if ("localhost".equalsIgnoreCase(hostWithoutPort)) { return DEFAULT_PACKAGE_NAME; } final String[] parts = hostWithoutPort.split("\\."); for (int i = parts.length - 1; i >= 0; i--) { packageName.append(parts[i]); if (i != 0) { packageName.append('.'); } } return packageName.toString(); } return DEFAULT_PACKAGE_NAME; }
@Test public void shouldUseDefaultPackageNameIfNoHostIsSpecified() { final OpenAPI openapi = new OpenAPI(); assertThat(RestDslSourceCodeGenerator.generatePackageName(openapi)) .isEqualTo(RestDslSourceCodeGenerator.DEFAULT_PACKAGE_NAME); }
public static List<AclEntry> mergeAclEntries(List<AclEntry> existingAcl, List<AclEntry> inAclSpec) throws AclException { ValidatedAclSpec aclSpec = new ValidatedAclSpec(inAclSpec); ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); List<AclEntry> foundAclSpecEntries = Lists.newArrayListWithCapacity(MAX_ENTRIES); EnumMap<AclEntryScope, AclEntry> providedMask = Maps.newEnumMap(AclEntryScope.class); EnumSet<AclEntryScope> maskDirty = EnumSet.noneOf(AclEntryScope.class); EnumSet<AclEntryScope> scopeDirty = EnumSet.noneOf(AclEntryScope.class); for (AclEntry existingEntry: existingAcl) { AclEntry aclSpecEntry = aclSpec.findByKey(existingEntry); if (aclSpecEntry != null) { foundAclSpecEntries.add(aclSpecEntry); scopeDirty.add(aclSpecEntry.getScope()); if (aclSpecEntry.getType() == MASK) { providedMask.put(aclSpecEntry.getScope(), aclSpecEntry); maskDirty.add(aclSpecEntry.getScope()); } else { aclBuilder.add(aclSpecEntry); } } else { if (existingEntry.getType() == MASK) { providedMask.put(existingEntry.getScope(), existingEntry); } else { aclBuilder.add(existingEntry); } } } // ACL spec entries that were not replacements are new additions. for (AclEntry newEntry: aclSpec) { if (Collections.binarySearch(foundAclSpecEntries, newEntry, ACL_ENTRY_COMPARATOR) < 0) { scopeDirty.add(newEntry.getScope()); if (newEntry.getType() == MASK) { providedMask.put(newEntry.getScope(), newEntry); maskDirty.add(newEntry.getScope()); } else { aclBuilder.add(newEntry); } } } copyDefaultsIfNeeded(aclBuilder); calculateMasks(aclBuilder, providedMask, maskDirty, scopeDirty); return buildAndValidateAcl(aclBuilder); }
@Test public void testMergeAclEntriesAccessMaskCalculated() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, READ)) .add(aclEntry(ACCESS, OTHER, READ)) .build(); List<AclEntry> aclSpec = Lists.newArrayList( aclEntry(ACCESS, USER, "bruce", READ_EXECUTE), aclEntry(ACCESS, USER, "diana", READ)); List<AclEntry> expected = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE)) .add(aclEntry(ACCESS, USER, "diana", READ)) .add(aclEntry(ACCESS, GROUP, READ)) .add(aclEntry(ACCESS, MASK, READ_EXECUTE)) .add(aclEntry(ACCESS, OTHER, READ)) .build(); assertEquals(expected, mergeAclEntries(existing, aclSpec)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testJoinOnConstantExpression() { analyze("SELECT * FROM t1 JOIN t2 ON 1 = 1"); }
static boolean needWrap(MethodDescriptor methodDescriptor, Class<?>[] parameterClasses, Class<?> returnClass) { String methodName = methodDescriptor.getMethodName(); // generic call must be wrapped if (CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName)) { return true; } // echo must be wrapped if ($ECHO.equals(methodName)) { return true; } boolean returnClassProtobuf = isProtobufClass(returnClass); // Response foo() if (parameterClasses.length == 0) { return !returnClassProtobuf; } int protobufParameterCount = 0; int javaParameterCount = 0; int streamParameterCount = 0; boolean secondParameterStream = false; // count normal and protobuf param for (int i = 0; i < parameterClasses.length; i++) { Class<?> parameterClass = parameterClasses[i]; if (isProtobufClass(parameterClass)) { protobufParameterCount++; } else { if (isStreamType(parameterClass)) { if (i == 1) { secondParameterStream = true; } streamParameterCount++; } else { javaParameterCount++; } } } // more than one stream param if (streamParameterCount > 1) { throw new IllegalStateException("method params error: more than one Stream params. method=" + methodName); } // protobuf only support one param if (protobufParameterCount >= 2) { throw new IllegalStateException("method params error: more than one protobuf params. method=" + methodName); } // server stream support one normal param and one stream param if (streamParameterCount == 1) { if (javaParameterCount + protobufParameterCount > 1) { throw new IllegalStateException( "method params error: server stream does not support more than one normal param." + " method=" + methodName); } // server stream: void foo(Request, StreamObserver<Response>) if (!secondParameterStream) { throw new IllegalStateException( "method params error: server stream's second param must be StreamObserver." + " method=" + methodName); } } if (methodDescriptor.getRpcType() != MethodDescriptor.RpcType.UNARY) { if (MethodDescriptor.RpcType.SERVER_STREAM == methodDescriptor.getRpcType()) { if (!secondParameterStream) { throw new IllegalStateException( "method params error:server stream's second param must be StreamObserver." + " method=" + methodName); } } // param type must be consistent if (returnClassProtobuf) { if (javaParameterCount > 0) { throw new IllegalStateException( "method params error: both normal and protobuf param found. method=" + methodName); } } else { if (protobufParameterCount > 0) { throw new IllegalStateException("method params error method=" + methodName); } } } else { if (streamParameterCount > 0) { throw new IllegalStateException( "method params error: unary method should not contain any StreamObserver." + " method=" + methodName); } if (protobufParameterCount > 0 && returnClassProtobuf) { return false; } // handler reactor or rxjava only consider gen by proto if (isMono(returnClass) || isRx(returnClass)) { return false; } if (protobufParameterCount <= 0 && !returnClassProtobuf) { return true; } // handle grpc stub only consider gen by proto if (GRPC_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName()) && protobufParameterCount == 1) { return false; } // handle dubbo generated method if (TRI_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName())) { Class<?> actualReturnClass = (Class<?>) ((ParameterizedType) methodDescriptor.getMethod().getGenericReturnType()) .getActualTypeArguments()[0]; boolean actualReturnClassProtobuf = isProtobufClass(actualReturnClass); if (actualReturnClassProtobuf && protobufParameterCount == 1) { return false; } if (!actualReturnClassProtobuf && protobufParameterCount == 0) { return true; } } // todo remove this in future boolean ignore = checkNeedIgnore(returnClass); if (ignore) { return protobufParameterCount != 1; } throw new IllegalStateException("method params error method=" + methodName); } // java param should be wrapped return javaParameterCount > 0; }
@Test void testWrapperBiStream() throws Exception { Method method = DescriptorService.class.getMethod("wrapBidirectionalStream", StreamObserver.class); ReflectionMethodDescriptor descriptor = new ReflectionMethodDescriptor(method); Assertions.assertEquals(1, descriptor.getParameterClasses().length); assertEquals(MethodDescriptor.RpcType.BI_STREAM, descriptor.getRpcType()); assertTrue(needWrap(descriptor)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testWithCaseInsensitiveResolution() { // TODO: verify output analyze("WITH AB AS (SELECT * FROM t1) SELECT * FROM ab"); }
public static boolean isClusterController(ServiceCluster cluster) { return ServiceType.CLUSTER_CONTROLLER.equals(cluster.serviceType()); }
@Test public void verifyControllerClusterIsRecognized() { ServiceCluster cluster = createServiceCluster(ServiceType.CLUSTER_CONTROLLER); assertTrue(VespaModelUtil.isClusterController(cluster)); }
@VisibleForTesting static ByteString getFullSearchKeyByteString(final byte prefix, final byte[] searchKeyBytes) { final ByteBuffer fullSearchKeyBuffer = ByteBuffer.allocate(searchKeyBytes.length + 1); fullSearchKeyBuffer.put(prefix); fullSearchKeyBuffer.put(searchKeyBytes); fullSearchKeyBuffer.flip(); return ByteString.copyFrom(fullSearchKeyBuffer.array()); }
@Test void getFullSearchKey() { final byte[] charBytes = new byte[]{KeyTransparencyController.ACI_PREFIX}; final byte[] aci = ACI.toCompactByteArray(); final byte[] expectedFullSearchKey = new byte[aci.length + 1]; System.arraycopy(charBytes, 0, expectedFullSearchKey, 0, charBytes.length); System.arraycopy(aci, 0, expectedFullSearchKey, charBytes.length, aci.length); assertArrayEquals(expectedFullSearchKey, KeyTransparencyController.getFullSearchKeyByteString(KeyTransparencyController.ACI_PREFIX, aci).toByteArray()); }
@Override public void unsubscribe(String serviceName, EventListener listener) throws NacosException { unsubscribe(serviceName, new ArrayList<>(), listener); }
@Test void testUnSubscribe3() throws NacosException { //given String serviceName = "service1"; List<String> clusterList = Arrays.asList("cluster1", "cluster2"); EventListener listener = event -> { }; when(changeNotifier.isSubscribed(Constants.DEFAULT_GROUP, serviceName)).thenReturn(false); //when client.unsubscribe(serviceName, clusterList, listener); NamingSelectorWrapper wrapper = new NamingSelectorWrapper(NamingSelectorFactory.newClusterSelector(clusterList), listener); //then verify(changeNotifier, times(1)).deregisterListener(Constants.DEFAULT_GROUP, serviceName, wrapper); verify(proxy, times(1)).unsubscribe(serviceName, Constants.DEFAULT_GROUP, Constants.NULL); }
public MailConfiguration getConfiguration() { if (configuration == null) { configuration = new MailConfiguration(getCamelContext()); } return configuration; }
@Test public void testManyConfigurations() { MailEndpoint endpoint = checkEndpoint("smtp://james@myhost:30/subject?password=secret" + "&from=me@camelriders.org&delete=true&folderName=riders" + "&contentType=text/html&unseen=false"); MailConfiguration config = endpoint.getConfiguration(); assertEquals("smtp", config.getProtocol(), "getProtocol()"); assertEquals("myhost", config.getHost(), "getHost()"); assertEquals(30, config.getPort(), "getPort()"); assertEquals("james", config.getUsername(), "getUsername()"); assertEquals("james@myhost", config.getRecipients().get(Message.RecipientType.TO), "getRecipients().get(Message.RecipientType.TO)"); assertEquals("riders", config.getFolderName(), "folder"); assertEquals("me@camelriders.org", config.getFrom(), "from"); assertEquals("secret", config.getPassword(), "password"); assertEquals(true, config.isDelete()); assertFalse(config.isIgnoreUriScheme()); assertEquals(-1, config.getFetchSize(), "fetchSize"); assertFalse(config.isUnseen(), "unseen"); assertEquals("text/html", config.getContentType(), MailConstants.MAIL_CONTENT_TYPE); assertFalse(config.isDebugMode()); }
public static TopicMessageType getMessageType(SendMessageRequestHeader requestHeader) { Map<String, String> properties = MessageDecoder.string2messageProperties(requestHeader.getProperties()); String traFlag = properties.get(MessageConst.PROPERTY_TRANSACTION_PREPARED); TopicMessageType topicMessageType = TopicMessageType.NORMAL; if (Boolean.parseBoolean(traFlag)) { topicMessageType = TopicMessageType.TRANSACTION; } else if (properties.containsKey(MessageConst.PROPERTY_SHARDING_KEY)) { topicMessageType = TopicMessageType.FIFO; } else if (properties.get("__STARTDELIVERTIME") != null || properties.get(MessageConst.PROPERTY_DELAY_TIME_LEVEL) != null || properties.get(MessageConst.PROPERTY_TIMER_DELIVER_MS) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_SEC) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_MS) != null) { topicMessageType = TopicMessageType.DELAY; } return topicMessageType; }
@Test public void testGetMessageTypeAsDelaySEC() { SendMessageRequestHeader requestHeader = new SendMessageRequestHeader(); Map<String, String> map = new HashMap<>(); map.put(MessageConst.PROPERTY_TIMER_DELAY_SEC, "1"); requestHeader.setProperties(MessageDecoder.messageProperties2String(map)); TopicMessageType result = BrokerMetricsManager.getMessageType(requestHeader); assertThat(TopicMessageType.DELAY).isEqualTo(result); }
@VisibleForTesting Schema convertSchema(org.apache.avro.Schema schema) { return Schema.of(getFields(schema)); }
@Test void convertSchema_logicalTypes() { String schemaString = "{\"type\":\"record\",\"name\":\"logicalTypes\",\"fields\":[{\"name\":\"int_date\",\"type\":{\"type\":\"int\",\"logicalType\":\"date\"}}," + "{\"name\":\"int_time_millis\",\"type\":{\"type\":\"int\",\"logicalType\":\"time-millis\"}},{\"name\":\"long_time_micros\",\"type\":{\"type\":\"long\",\"logicalType\":\"time-micros\"}}," + "{\"name\":\"long_timestamp_millis\",\"type\":{\"type\":\"long\",\"logicalType\":\"timestamp-millis\"}}," + "{\"name\":\"long_timestamp_micros\",\"type\":{\"type\":\"long\",\"logicalType\":\"timestamp-micros\"}}," + "{\"name\":\"long_timestamp_millis_local\",\"type\":{\"type\":\"long\",\"logicalType\":\"local-timestamp-millis\"}}," + "{\"name\":\"long_timestamp_micros_local\",\"type\":{\"type\":\"long\",\"logicalType\":\"local-timestamp-micros\"}}," + "{\"name\":\"bytes_decimal\",\"type\":{\"type\":\"bytes\",\"logicalType\":\"decimal\", \"precision\": 4, \"scale\": 2}}]}"; Schema.Parser parser = new Schema.Parser(); Schema input = parser.parse(schemaString); com.google.cloud.bigquery.Schema expected = com.google.cloud.bigquery.Schema.of( Field.newBuilder("int_date", StandardSQLTypeName.DATE).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("int_time_millis", StandardSQLTypeName.TIME).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("long_time_micros", StandardSQLTypeName.TIME).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("long_timestamp_millis", StandardSQLTypeName.TIMESTAMP).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("long_timestamp_micros", StandardSQLTypeName.TIMESTAMP).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("long_timestamp_millis_local", StandardSQLTypeName.INT64).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("long_timestamp_micros_local", StandardSQLTypeName.INT64).setMode(Field.Mode.REQUIRED).build(), Field.newBuilder("bytes_decimal", StandardSQLTypeName.NUMERIC).setMode(Field.Mode.REQUIRED).build()); Assertions.assertEquals(expected, SCHEMA_RESOLVER.convertSchema(input)); }
public static ByteBuf copyDouble(double value) { ByteBuf buf = buffer(8); buf.writeDouble(value); return buf; }
@Test public void testWrapDouble() { ByteBuf buffer = copyDouble(1, 4); assertEquals(16, buffer.capacity()); assertEquals(1, buffer.readDouble(), 0.01); assertEquals(4, buffer.readDouble(), 0.01); assertFalse(buffer.isReadable()); buffer.release(); buffer = copyDouble(null); assertEquals(0, buffer.capacity()); buffer.release(); buffer = copyDouble(new double[] {}); assertEquals(0, buffer.capacity()); buffer.release(); }
@Override public Optional<Object> getCustomContextData(String key) { if (_customRequestContext != null && key != null && !key.isEmpty() && _customRequestContext.containsKey(key)) { return Optional.of(_customRequestContext.get(key)); } return Optional.empty(); }
@Test public void testGetEmptyCustomContextData() throws RestLiSyntaxException { final ResourceContextImpl context = new ResourceContextImpl(); Optional<Object> foo = context.getCustomContextData("foo"); Assert.assertFalse(foo.isPresent()); }
@Override public String version() { return AppInfoParser.getVersion(); }
@Test public void testInheritedVersionRetrievedFromAppInfoParser() { assertEquals(AppInfoParser.getVersion(), converter.version()); }
public void execute(int[] bytecode) { for (var i = 0; i < bytecode.length; i++) { Instruction instruction = Instruction.getInstruction(bytecode[i]); switch (instruction) { case LITERAL: // Read the next byte from the bytecode. int value = bytecode[++i]; // Push the next value to stack stack.push(value); break; case SET_AGILITY: var amount = stack.pop(); var wizard = stack.pop(); setAgility(wizard, amount); break; case SET_WISDOM: amount = stack.pop(); wizard = stack.pop(); setWisdom(wizard, amount); break; case SET_HEALTH: amount = stack.pop(); wizard = stack.pop(); setHealth(wizard, amount); break; case GET_HEALTH: wizard = stack.pop(); stack.push(getHealth(wizard)); break; case GET_AGILITY: wizard = stack.pop(); stack.push(getAgility(wizard)); break; case GET_WISDOM: wizard = stack.pop(); stack.push(getWisdom(wizard)); break; case ADD: var a = stack.pop(); var b = stack.pop(); stack.push(a + b); break; case DIVIDE: a = stack.pop(); b = stack.pop(); stack.push(b / a); break; case PLAY_SOUND: wizard = stack.pop(); getWizards()[wizard].playSound(); break; case SPAWN_PARTICLES: wizard = stack.pop(); getWizards()[wizard].spawnParticles(); break; default: throw new IllegalArgumentException("Invalid instruction value"); } LOGGER.info("Executed " + instruction.name() + ", Stack contains " + getStack()); } }
@Test void testLiteral() { var bytecode = new int[2]; bytecode[0] = LITERAL.getIntValue(); bytecode[1] = 10; var vm = new VirtualMachine(); vm.execute(bytecode); assertEquals(1, vm.getStack().size()); assertEquals(Integer.valueOf(10), vm.getStack().pop()); }
@Override public void loadData(Priority priority, DataCallback<? super T> callback) { this.callback = callback; serializer.startRequest(priority, url, this); }
@Test public void testRequestComplete_withUnauthorizedStatusCode_callsCallbackWithAuthError() throws Exception { UrlResponseInfo info = getInfo(0, HttpURLConnection.HTTP_FORBIDDEN); fetcher.loadData(Priority.LOW, callback); UrlRequest.Callback urlCallback = urlRequestListenerCaptor.getValue(); succeed(info, urlCallback, ByteBuffer.allocateDirect(0)); verifyAuthError(); }
@Override public RecordCursor cursor() { return new JdbcRecordCursor(jdbcClient, session, split, columnHandles); }
@Test public void testCursorMixedOrder() { RecordSet recordSet = new JdbcRecordSet(jdbcClient, session, split, ImmutableList.of( columnHandles.get("value"), columnHandles.get("value"), columnHandles.get("text"))); try (RecordCursor cursor = recordSet.cursor()) { assertEquals(cursor.getType(0), BIGINT); assertEquals(cursor.getType(1), BIGINT); assertEquals(cursor.getType(2), VARCHAR); Map<String, Long> data = new LinkedHashMap<>(); while (cursor.advanceNextPosition()) { assertEquals(cursor.getLong(0), cursor.getLong(1)); data.put(cursor.getSlice(2).toStringUtf8(), cursor.getLong(0)); } assertEquals(data, ImmutableMap.<String, Long>builder() .put("one", 1L) .put("two", 2L) .put("three", 3L) .put("ten", 10L) .put("eleven", 11L) .put("twelve", 12L) .build()); } }
@Override public boolean test(Pickle pickle) { if (expressions.isEmpty()) { return true; } List<String> tags = pickle.getTags(); return expressions.stream() .allMatch(expression -> expression.evaluate(tags)); }
@Test void and_tag_predicate_matches_pickle_with_all_tags() { Pickle pickle = createPickleWithTags("@FOO", "@BAR"); TagPredicate predicate = createPredicate("@FOO and @BAR"); assertTrue(predicate.test(pickle)); }
public static void writeIdlProtocol(Writer writer, Protocol protocol) throws IOException { final String protocolFullName = protocol.getName(); final int lastDotPos = protocolFullName.lastIndexOf("."); final String protocolNameSpace; if (lastDotPos < 0) { protocolNameSpace = protocol.getNamespace(); } else if (lastDotPos > 0) { protocolNameSpace = protocolFullName.substring(0, lastDotPos); } else { protocolNameSpace = null; } writeIdlProtocol(writer, protocol, protocolNameSpace, protocolFullName.substring(lastDotPos + 1), protocol.getTypes(), protocol.getMessages().values()); }
@Test public void cannotWriteEmptyUnionTypes() { assertThrows(AvroRuntimeException.class, () -> IdlUtils.writeIdlProtocol(new StringWriter(), Schema.createRecord("Single", null, "naming", false, singletonList(new Schema.Field("field", Schema.createUnion()))))); }
public <T> boolean parse(Handler<T> handler, T target, CharSequence input) { if (input == null) throw new NullPointerException("input == null"); return parse(handler, target, input, 0, input.length()); }
@Test void parse_badParameters() { assertThatThrownBy(() -> entrySplitter.parse(null, map, "")) .isInstanceOf(NullPointerException.class) .hasMessage("handler == null"); assertThatThrownBy(() -> entrySplitter.parse(parseIntoMap, null, "")) .isInstanceOf(NullPointerException.class) .hasMessage("target == null"); assertThatThrownBy(() -> entrySplitter.parse(parseIntoMap, map, null)) .isInstanceOf(NullPointerException.class) .hasMessage("input == null"); assertThatThrownBy(() -> entrySplitter.parse(parseIntoMap, map, "", -1, 1)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("beginIndex < 0"); assertThatThrownBy(() -> entrySplitter.parse(parseIntoMap, map, "", 0, 2)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("endIndex > input.length()"); }
@SuppressWarnings("unchecked") public <IN, OUT> AvroDatumConverter<IN, OUT> create(Class<IN> inputClass) { boolean isMapOnly = ((JobConf) getConf()).getNumReduceTasks() == 0; if (AvroKey.class.isAssignableFrom(inputClass)) { Schema schema; if (isMapOnly) { schema = AvroJob.getMapOutputKeySchema(getConf()); if (null == schema) { schema = AvroJob.getOutputKeySchema(getConf()); } } else { schema = AvroJob.getOutputKeySchema(getConf()); } if (null == schema) { throw new IllegalStateException("Writer schema for output key was not set. Use AvroJob.setOutputKeySchema()."); } return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema); } if (AvroValue.class.isAssignableFrom(inputClass)) { Schema schema; if (isMapOnly) { schema = AvroJob.getMapOutputValueSchema(getConf()); if (null == schema) { schema = AvroJob.getOutputValueSchema(getConf()); } } else { schema = AvroJob.getOutputValueSchema(getConf()); } if (null == schema) { throw new IllegalStateException( "Writer schema for output value was not set. Use AvroJob.setOutputValueSchema()."); } return (AvroDatumConverter<IN, OUT>) new AvroWrapperConverter(schema); } if (BooleanWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new BooleanWritableConverter(); } if (BytesWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new BytesWritableConverter(); } if (ByteWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new ByteWritableConverter(); } if (DoubleWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new DoubleWritableConverter(); } if (FloatWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new FloatWritableConverter(); } if (IntWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new IntWritableConverter(); } if (LongWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new LongWritableConverter(); } if (NullWritable.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new NullWritableConverter(); } if (Text.class.isAssignableFrom(inputClass)) { return (AvroDatumConverter<IN, OUT>) new TextConverter(); } throw new UnsupportedOperationException("Unsupported input type: " + inputClass.getName()); }
@Test void convertText() { AvroDatumConverter<Text, CharSequence> converter = mFactory.create(Text.class); assertEquals("foo", converter.convert(new Text("foo")).toString()); }
public static Statement sanitize( final Statement node, final MetaStore metaStore) { return sanitize(node, metaStore, true); }
@Test public void shouldPreserveAliasIfPresent() { // Given: final Statement stmt = givenQuery("SELECT COL1 AS BOB FROM TEST1;"); // When: final Query result = (Query) AstSanitizer.sanitize(stmt, META_STORE); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of( new SingleColumn(column(TEST1_NAME, "COL1"), Optional.of(ColumnName.of("BOB"))) )))); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldThrowWhenInsertIntoSchemaDoesNotMatch() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); // Given: execute( serviceContext, ksqlEngine, "create stream bar as select * from orders;", ksqlConfig, emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> execute( serviceContext, ksqlEngine, "insert into bar select orderTime, itemid from orders;", ksqlConfig, emptyMap() ) ); // Then: assertThat(e, rawMessage( containsString( "Incompatible schema between results and sink."))); assertThat(e, statementText( is("insert into bar select orderTime, itemid from orders;"))); }
@PostMapping("/login") public Object login(@RequestParam String username, @RequestParam String password, HttpServletResponse response, HttpServletRequest request) throws AccessException, IOException { if (AuthSystemTypes.NACOS.name().equalsIgnoreCase(authConfigs.getNacosAuthSystemType()) || AuthSystemTypes.LDAP.name().equalsIgnoreCase(authConfigs.getNacosAuthSystemType())) { NacosUser user = iAuthenticationManager.authenticate(request); response.addHeader(AuthConstants.AUTHORIZATION_HEADER, AuthConstants.TOKEN_PREFIX + user.getToken()); ObjectNode result = JacksonUtils.createEmptyJsonNode(); result.put(Constants.ACCESS_TOKEN, user.getToken()); result.put(Constants.TOKEN_TTL, jwtTokenManager.getTokenTtlInSeconds(user.getToken())); result.put(Constants.GLOBAL_ADMIN, iAuthenticationManager.hasGlobalAdminRole(user)); result.put(Constants.USERNAME, user.getUserName()); return result; } // create Authentication class through username and password, the implement class is UsernamePasswordAuthenticationToken UsernamePasswordAuthenticationToken authenticationToken = new UsernamePasswordAuthenticationToken(username, password); try { // use the method authenticate of AuthenticationManager(default implement is ProviderManager) to valid Authentication Authentication authentication = authenticationManager.authenticate(authenticationToken); // bind SecurityContext to Authentication SecurityContextHolder.getContext().setAuthentication(authentication); // generate Token String token = jwtTokenManager.createToken(authentication); // write Token to Http header response.addHeader(AuthConstants.AUTHORIZATION_HEADER, "Bearer " + token); return RestResultUtils.success("Bearer " + token); } catch (BadCredentialsException authentication) { return RestResultUtils.failed(HttpStatus.UNAUTHORIZED.value(), null, "Login failed"); } }
@Test void testLoginWithAuthedUser() throws AccessException, IOException { when(authenticationManager.authenticate(request)).thenReturn(user); when(authenticationManager.hasGlobalAdminRole(user)).thenReturn(true); when(authConfigs.getNacosAuthSystemType()).thenReturn(AuthSystemTypes.NACOS.name()); when(tokenManagerDelegate.getTokenTtlInSeconds(anyString())).thenReturn(18000L); Object actual = userController.login("nacos", "nacos", response, request); assertTrue(actual instanceof JsonNode); String actualString = actual.toString(); assertTrue(actualString.contains("\"accessToken\":\"1234567890\"")); assertTrue(actualString.contains("\"tokenTtl\":18000")); assertTrue(actualString.contains("\"globalAdmin\":true")); }
public void setUpNewTable(TableConfig tableConfig, IdealState idealState) { Preconditions.checkState(!_isStopping, "Segment manager is stopping"); String realtimeTableName = tableConfig.getTableName(); LOGGER.info("Setting up new LLC table: {}", realtimeTableName); _flushThresholdUpdateManager.clearFlushThresholdUpdater(realtimeTableName); StreamConfig streamConfig = new StreamConfig(tableConfig.getTableName(), IngestionConfigUtils.getStreamConfigMap(tableConfig)); InstancePartitions instancePartitions = getConsumingInstancePartitions(tableConfig); List<PartitionGroupMetadata> newPartitionGroupMetadataList = getNewPartitionGroupMetadataList(streamConfig, Collections.emptyList()); int numPartitionGroups = newPartitionGroupMetadataList.size(); int numReplicas = getNumReplicas(tableConfig, instancePartitions); SegmentAssignment segmentAssignment = SegmentAssignmentFactory.getSegmentAssignment(_helixManager, tableConfig, _controllerMetrics); Map<InstancePartitionsType, InstancePartitions> instancePartitionsMap = Collections.singletonMap(InstancePartitionsType.CONSUMING, instancePartitions); long currentTimeMs = getCurrentTimeMs(); Map<String, Map<String, String>> instanceStatesMap = idealState.getRecord().getMapFields(); for (PartitionGroupMetadata partitionGroupMetadata : newPartitionGroupMetadataList) { String segmentName = setupNewPartitionGroup(tableConfig, streamConfig, partitionGroupMetadata, currentTimeMs, instancePartitions, numPartitionGroups, numReplicas); updateInstanceStatesForNewConsumingSegment(instanceStatesMap, null, segmentName, segmentAssignment, instancePartitionsMap); } setIdealState(realtimeTableName, idealState); }
@Test public void testSetUpNewTable() { // Insufficient instances - 2 replicas, 1 instance, 4 partitions testSetUpNewTable(2, 1, 4, true); // Noop path - 2 replicas, 3 instances, 0 partition testSetUpNewTable(2, 3, 0, false); // Happy paths // 2 replicas, 3 instances, 4 partitions testSetUpNewTable(2, 3, 4, false); // 2 replicas, 3 instances, 8 partitions testSetUpNewTable(2, 3, 8, false); // 8 replicas, 10 instances, 4 partitions testSetUpNewTable(8, 10, 4, false); }
@Override public PlanNode optimize( PlanNode maxSubplan, ConnectorSession session, VariableAllocator variableAllocator, PlanNodeIdAllocator idAllocator) { return rewriteWith(new Rewriter(session, idAllocator), maxSubplan); }
@Test public void testJdbcComputePushdownNotOperator() { String table = "test_table"; String schema = "test_schema"; String expression = "c1 AND NOT(c2)"; TypeProvider typeProvider = TypeProvider.copyOf(ImmutableMap.of("c1", BOOLEAN, "c2", BOOLEAN)); RowExpression rowExpression = sqlToRowExpressionTranslator.translateAndOptimize(expression(expression), typeProvider); PlanNode original = filter(jdbcTableScan(schema, table, BOOLEAN, "c1", "c2"), rowExpression); Set<ColumnHandle> columns = Stream.of("c1", "c2").map(TestJdbcComputePushdown::booleanJdbcColumnHandle).collect(Collectors.toSet()); JdbcTableHandle jdbcTableHandle = new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName(schema, table), CATALOG_NAME, schema, table); ConnectorSession session = new TestingConnectorSession(ImmutableList.of()); JdbcTableLayoutHandle jdbcTableLayoutHandle = new JdbcTableLayoutHandle( session.getSqlFunctionProperties(), jdbcTableHandle, TupleDomain.none(), Optional.of(new JdbcExpression("(('c1') AND ((NOT('c2'))))"))); PlanNode actual = this.jdbcComputePushdown.optimize(original, session, null, ID_ALLOCATOR); assertPlanMatch(actual, PlanMatchPattern.filter( expression, JdbcTableScanMatcher.jdbcTableScanPattern(jdbcTableLayoutHandle, columns))); }
@VisibleForTesting @Override public String getFailureDomain() { final String responsePayload = getAzureInstanceMetadata(); // For a sample response payload, // check https://docs.microsoft.com/en-us/azure/virtual-machines/windows/instance-metadata-service?tabs=linux try { final JsonNode jsonNode = JsonUtils.stringToJsonNode(responsePayload); final JsonNode computeNode = jsonNode.path(COMPUTE); if (computeNode.isMissingNode()) { throw new RuntimeException( "[AzureEnvironmentProvider]: Compute node is missing in the payload. Cannot retrieve failure domain " + "information"); } final JsonNode platformFailureDomainNode = computeNode.path(PLATFORM_FAULT_DOMAIN); if (platformFailureDomainNode.isMissingNode() || !platformFailureDomainNode.isTextual()) { throw new RuntimeException("[AzureEnvironmentProvider]: Json node platformFaultDomain is missing or is invalid." + " No failure domain information retrieved for given server instance"); } return platformFailureDomainNode.textValue(); } catch (IOException ex) { throw new RuntimeException(String.format( "[AzureEnvironmentProvider]: Errors when parsing response payload from Azure Instance Metadata Service: %s", responsePayload), ex); } }
@Test(expectedExceptions = RuntimeException.class, expectedExceptionsMessageRegExp = "\\[AzureEnvironmentProvider\\]: Json node platformFaultDomain is missing or is invalid." + " No failure domain information retrieved for given server instance") public void testMissingFaultDomainResponse() throws IOException { mockUtil(); when(_mockHttpEntity.getContent()).thenReturn( getClass().getClassLoader().getResourceAsStream(IMDS_RESPONSE_WITHOUT_FAULT_DOMAIN_INFO)); _azureEnvironmentProviderWithParams.getFailureDomain(); }
@Override public void createNode(KubevirtNode node) { checkNotNull(node, ERR_NULL_NODE); KubevirtNode intNode; KubevirtNode tunNode; if (node.intgBridge() == null) { String deviceIdStr = genDpidFromName(INTEGRATION_BRIDGE + "-" + node.hostname()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); intNode = node.updateIntgBridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()), NOT_DUPLICATED_MSG, intNode.intgBridge()); } else { intNode = node; checkArgument(!hasIntgBridge(intNode.intgBridge(), intNode.hostname()), NOT_DUPLICATED_MSG, intNode.intgBridge()); } if (node.tunBridge() == null) { String deviceIdStr = genDpidFromName(TUNNEL_BRIDGE + "-" + node.hostname()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); tunNode = intNode.updateTunBridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()), NOT_DUPLICATED_MSG, tunNode.tunBridge()); } else { tunNode = intNode; checkArgument(!hasTunBridge(tunNode.tunBridge(), tunNode.hostname()), NOT_DUPLICATED_MSG, tunNode.tunBridge()); } nodeStore.createNode(tunNode); log.info(String.format(MSG_NODE, tunNode.hostname(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreateDuplicateNode() { target.createNode(WORKER_1); target.createNode(WORKER_1); }
@CheckForNull public String clientId() { return config.get(CONSUMER_KEY).orElse(null); }
@Test public void return_client_id() { settings.setProperty("sonar.auth.bitbucket.clientId.secured", "id"); assertThat(underTest.clientId()).isEqualTo("id"); }
public static Builder forPage(int page) { return new Builder(page); }
@Test void forPage_fails_with_IAE_if_page_is_less_than_0() { assertThatThrownBy(() -> forPage(-Math.abs(new Random().nextInt()) - 1)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("page index must be >= 1"); }
private Function<KsqlConfig, Kudf> getUdfFactory( final Method method, final UdfDescription udfDescriptionAnnotation, final String functionName, final FunctionInvoker invoker, final String sensorName ) { return ksqlConfig -> { final Object actualUdf = FunctionLoaderUtils.instantiateFunctionInstance( method.getDeclaringClass(), udfDescriptionAnnotation.name()); if (actualUdf instanceof Configurable) { ExtensionSecurityManager.INSTANCE.pushInUdf(); try { ((Configurable) actualUdf) .configure(ksqlConfig.getKsqlFunctionsConfigProps(functionName)); } finally { ExtensionSecurityManager.INSTANCE.popOutUdf(); } } final PluggableUdf theUdf = new PluggableUdf(invoker, actualUdf); return metrics.<Kudf>map(m -> new UdfMetricProducer( m.getSensor(sensorName), theUdf, Time.SYSTEM )).orElse(theUdf); }; }
@Test public void shouldLoadBadFunctionButNotLetItExit() { // Given: final List<SqlArgument> argList = singletonList(SqlArgument.of(SqlTypes.STRING)); // We do need to set up the ExtensionSecurityManager for our test. // This is controlled by a feature flag and in this test, we just directly enable it. SecurityManager manager = System.getSecurityManager(); System.setSecurityManager(ExtensionSecurityManager.INSTANCE); final UdfFactory function = FUNC_REG.getUdfFactory(FunctionName.of("bad_test_udf")); assertThat(function, not(nullValue())); KsqlScalarFunction ksqlScalarFunction = function.getFunction(argList); // When: final Exception e1 = assertThrows( KsqlException.class, () -> ksqlScalarFunction.getReturnType(argList) ); // Then: assertThat(e1.getMessage(), containsString( "Cannot invoke the schema provider method exit for UDF bad_test_udf.")); System.setSecurityManager(manager); assertEquals(System.getSecurityManager(), manager); }
@JsonAnyGetter public Map<String, PartitionsSpec> get() { return map; }
@Test public void testPartitionsSpec() throws Exception { String text = "{\"numPartitions\": 5, \"configs\": {\"foo\": \"bar\"}}"; PartitionsSpec spec = JsonUtil.JSON_SERDE.readValue(text, PartitionsSpec.class); assertEquals(5, spec.numPartitions()); assertEquals("bar", spec.configs().get("foo")); assertEquals(1, spec.configs().size()); }
public static void disablePullConsumption(DefaultLitePullConsumerWrapper wrapper, Set<String> topics) { Set<String> subscribedTopic = wrapper.getSubscribedTopics(); if (subscribedTopic.stream().anyMatch(topics::contains)) { suspendPullConsumer(wrapper); return; } resumePullConsumer(wrapper); }
@Test public void testDisablePullConsumptionWithSubTractTopics() { subscribedTopics = new HashSet<>(); subscribedTopics.add("test-topic-1"); subscribedTopics.add("test-topic-2"); pullConsumerWrapper.setSubscribedTopics(subscribedTopics); pullConsumerWrapper.setProhibition(false); pullConsumerWrapper.setSubscriptionType(SubscriptionType.SUBSCRIBE); RocketMqPullConsumerController.disablePullConsumption(pullConsumerWrapper, prohibitionTopics); Assert.assertTrue(pullConsumerWrapper.isProhibition()); // 禁消费后,再次下发禁消费 MQClientInstance clientFactory = pullConsumerWrapper.getClientFactory(); Mockito.reset(clientFactory); RocketMqPullConsumerController.disablePullConsumption(pullConsumerWrapper, prohibitionTopics); Mockito.verify(clientFactory, Mockito.times(0)) .unregisterConsumer("test-group"); }
@Override public void doPushWithCallback(String clientId, Subscriber subscriber, PushDataWrapper data, NamingPushCallback callBack) { ServiceInfo actualServiceInfo = getServiceInfo(data, subscriber); callBack.setActualServiceInfo(actualServiceInfo); pushService.pushWithCallback(clientId, NotifySubscriberRequest.buildNotifySubscriberRequest(actualServiceInfo), callBack, GlobalExecutor.getCallbackExecutor()); }
@Test void testDoPushWithCallback() { doAnswer(new CallbackAnswer()).when(pushService) .pushWithCallback(eq(rpcClientId), any(NotifySubscriberRequest.class), eq(pushCallBack), eq(GlobalExecutor.getCallbackExecutor())); pushExecutor.doPushWithCallback(rpcClientId, subscriber, pushData, pushCallBack); verify(pushCallBack).onSuccess(); }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testRunStreamingJobUsingPAssertThatSucceeds() throws Exception { options.setStreaming(true); Pipeline p = TestPipeline.create(options); PCollection<Integer> pc = p.apply(Create.of(1, 2, 3)); PAssert.that(pc).containsInAnyOrder(1, 2, 3); DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class); when(mockJob.getState()).thenReturn(State.DONE); when(mockJob.waitUntilFinish(any(Duration.class), any(JobMessagesHandler.class))) .thenReturn(State.DONE); when(mockJob.getProjectId()).thenReturn("test-project"); when(mockJob.getJobId()).thenReturn("test-job"); DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class); when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob); when(mockClient.getJobMetrics(anyString())) .thenReturn(generateMockMetricResponse(true /* success */, true /* tentative */)); TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient); runner.run(p, mockRunner); }
synchronized void add(int splitCount) { int pos = count % history.length; history[pos] = splitCount; count += 1; }
@Test public void testExactFullHistory() { EnumerationHistory history = new EnumerationHistory(3); history.add(1); history.add(2); history.add(3); int[] expectedHistorySnapshot = {1, 2, 3}; testHistory(history, expectedHistorySnapshot); }
public static List<TargetInfo> parseOptTarget(CommandLine cmd, AlluxioConfiguration conf) throws IOException { String[] targets; if (cmd.hasOption(TARGET_OPTION_NAME)) { String argTarget = cmd.getOptionValue(TARGET_OPTION_NAME); if (StringUtils.isBlank(argTarget)) { throw new IOException("Option " + TARGET_OPTION_NAME + " can not be blank."); } else if (argTarget.contains(TARGET_SEPARATOR)) { targets = argTarget.split(TARGET_SEPARATOR); } else { targets = new String[]{argTarget}; } } else { // By default we set on all targets (master/workers/job_master/job_workers) targets = new String[]{ROLE_MASTER, ROLE_JOB_MASTER, ROLE_WORKERS, ROLE_JOB_WORKERS}; } return getTargetInfos(targets, conf); }
@Test public void parsetManualTargets() throws Exception { // Successfully guess all targets // One extra comma at the end // Some extra whitespace String allTargets = "masters-1:" + MASTER_WEB_PORT + " ,masters-2:" + JOB_MASTER_WEB_PORT + " ,\tworkers-1:" + WORKER_WEB_PORT + ",workers-2:" + WORKER_WEB_PORT + ",workers-3:" + JOB_WORKER_WEB_PORT + ",workers-4:" + JOB_WORKER_WEB_PORT + ", "; CommandLine mockCommandLine = mock(CommandLine.class); String[] mockArgs = new String[]{"--target", allTargets}; when(mockCommandLine.getArgs()).thenReturn(mockArgs); when(mockCommandLine.hasOption(LogLevel.TARGET_OPTION_NAME)).thenReturn(true); when(mockCommandLine.getOptionValue(LogLevel.TARGET_OPTION_NAME)).thenReturn(mockArgs[1]); List<LogLevel.TargetInfo> targets = LogLevel.parseOptTarget(mockCommandLine, mConf); assertEquals(6, targets.size()); assertEquals(new HashSet<>(Arrays.asList( new LogLevel.TargetInfo("masters-1", MASTER_WEB_PORT, "master"), new LogLevel.TargetInfo("masters-2", JOB_MASTER_WEB_PORT, "job_master"), new LogLevel.TargetInfo("workers-1", WORKER_WEB_PORT, "worker"), new LogLevel.TargetInfo("workers-2", WORKER_WEB_PORT, "worker"), new LogLevel.TargetInfo("workers-3", JOB_WORKER_WEB_PORT, "job_worker"), new LogLevel.TargetInfo("workers-4", JOB_WORKER_WEB_PORT, "job_worker"))), new HashSet<>(targets)); }