focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void unregisterPropertyPlugin(SAPropertyPlugin plugin) { }
@Test public void unregisterPropertyPlugin() { mSensorsAPI.unregisterPropertyPlugin(null); }
public String getDBPrefix(InterpreterContext context) { Map<String, String> localProperties = context.getLocalProperties(); // It is recommended to use this kind of format: %jdbc(db=mysql) if (localProperties.containsKey("db")) { return localProperties.get("db"); } // %jdbc(mysql) is only for backward compatibility for (Map.Entry<String, String> entry : localProperties.entrySet()) { if (entry.getKey().equals(entry.getValue())) { return entry.getKey(); } } return DEFAULT_KEY; }
@Test void testForParsePropertyKey() { JDBCInterpreter t = new JDBCInterpreter(new Properties()); Map<String, String> localProperties = new HashMap<>(); InterpreterContext interpreterContext = InterpreterContext.builder() .setLocalProperties(localProperties) .build(); assertEquals(JDBCInterpreter.DEFAULT_KEY, t.getDBPrefix(interpreterContext)); localProperties = new HashMap<>(); localProperties.put("db", "mysql"); interpreterContext = InterpreterContext.builder() .setLocalProperties(localProperties) .build(); assertEquals("mysql", t.getDBPrefix(interpreterContext)); localProperties = new HashMap<>(); localProperties.put("hive", "hive"); interpreterContext = InterpreterContext.builder() .setLocalProperties(localProperties) .build(); assertEquals("hive", t.getDBPrefix(interpreterContext)); }
static boolean needWrap(MethodDescriptor methodDescriptor, Class<?>[] parameterClasses, Class<?> returnClass) { String methodName = methodDescriptor.getMethodName(); // generic call must be wrapped if (CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName)) { return true; } // echo must be wrapped if ($ECHO.equals(methodName)) { return true; } boolean returnClassProtobuf = isProtobufClass(returnClass); // Response foo() if (parameterClasses.length == 0) { return !returnClassProtobuf; } int protobufParameterCount = 0; int javaParameterCount = 0; int streamParameterCount = 0; boolean secondParameterStream = false; // count normal and protobuf param for (int i = 0; i < parameterClasses.length; i++) { Class<?> parameterClass = parameterClasses[i]; if (isProtobufClass(parameterClass)) { protobufParameterCount++; } else { if (isStreamType(parameterClass)) { if (i == 1) { secondParameterStream = true; } streamParameterCount++; } else { javaParameterCount++; } } } // more than one stream param if (streamParameterCount > 1) { throw new IllegalStateException("method params error: more than one Stream params. method=" + methodName); } // protobuf only support one param if (protobufParameterCount >= 2) { throw new IllegalStateException("method params error: more than one protobuf params. method=" + methodName); } // server stream support one normal param and one stream param if (streamParameterCount == 1) { if (javaParameterCount + protobufParameterCount > 1) { throw new IllegalStateException( "method params error: server stream does not support more than one normal param." + " method=" + methodName); } // server stream: void foo(Request, StreamObserver<Response>) if (!secondParameterStream) { throw new IllegalStateException( "method params error: server stream's second param must be StreamObserver." + " method=" + methodName); } } if (methodDescriptor.getRpcType() != MethodDescriptor.RpcType.UNARY) { if (MethodDescriptor.RpcType.SERVER_STREAM == methodDescriptor.getRpcType()) { if (!secondParameterStream) { throw new IllegalStateException( "method params error:server stream's second param must be StreamObserver." + " method=" + methodName); } } // param type must be consistent if (returnClassProtobuf) { if (javaParameterCount > 0) { throw new IllegalStateException( "method params error: both normal and protobuf param found. method=" + methodName); } } else { if (protobufParameterCount > 0) { throw new IllegalStateException("method params error method=" + methodName); } } } else { if (streamParameterCount > 0) { throw new IllegalStateException( "method params error: unary method should not contain any StreamObserver." + " method=" + methodName); } if (protobufParameterCount > 0 && returnClassProtobuf) { return false; } // handler reactor or rxjava only consider gen by proto if (isMono(returnClass) || isRx(returnClass)) { return false; } if (protobufParameterCount <= 0 && !returnClassProtobuf) { return true; } // handle grpc stub only consider gen by proto if (GRPC_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName()) && protobufParameterCount == 1) { return false; } // handle dubbo generated method if (TRI_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName())) { Class<?> actualReturnClass = (Class<?>) ((ParameterizedType) methodDescriptor.getMethod().getGenericReturnType()) .getActualTypeArguments()[0]; boolean actualReturnClassProtobuf = isProtobufClass(actualReturnClass); if (actualReturnClassProtobuf && protobufParameterCount == 1) { return false; } if (!actualReturnClassProtobuf && protobufParameterCount == 0) { return true; } } // todo remove this in future boolean ignore = checkNeedIgnore(returnClass); if (ignore) { return protobufParameterCount != 1; } throw new IllegalStateException("method params error method=" + methodName); } // java param should be wrapped return javaParameterCount > 0; }
@Test void testIgnoreMethod() throws NoSuchMethodException { Method method = DescriptorService.class.getMethod("iteratorServerStream", HelloReply.class); MethodDescriptor descriptor = new ReflectionMethodDescriptor(method); Assertions.assertFalse(needWrap(descriptor)); Method method2 = DescriptorService.class.getMethod("reactorMethod", HelloReply.class); MethodDescriptor descriptor2 = new ReflectionMethodDescriptor(method2); Assertions.assertFalse(needWrap(descriptor2)); Method method3 = DescriptorService.class.getMethod("reactorMethod2", Mono.class); MethodDescriptor descriptor3 = new ReflectionMethodDescriptor(method3); Assertions.assertFalse(needWrap(descriptor3)); Method method4 = DescriptorService.class.getMethod("rxJavaMethod", Single.class); MethodDescriptor descriptor4 = new ReflectionMethodDescriptor(method4); Assertions.assertFalse(needWrap(descriptor4)); }
@SuppressWarnings("unchecked") @Override public RegisterNodeManagerResponse registerNodeManager( RegisterNodeManagerRequest request) throws YarnException, IOException { NodeId nodeId = request.getNodeId(); String host = nodeId.getHost(); int cmPort = nodeId.getPort(); int httpPort = request.getHttpPort(); Resource capability = request.getResource(); String nodeManagerVersion = request.getNMVersion(); Resource physicalResource = request.getPhysicalResource(); NodeStatus nodeStatus = request.getNodeStatus(); RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); if (!minimumNodeManagerVersion.equals("NONE")) { if (minimumNodeManagerVersion.equals("EqualToRM")) { minimumNodeManagerVersion = YarnVersionInfo.getVersion(); } if ((nodeManagerVersion == null) || (VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) { String message = "Disallowed NodeManager Version " + nodeManagerVersion + ", is less than the minimum version " + minimumNodeManagerVersion + " sending SHUTDOWN signal to " + "NodeManager."; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } } if (checkIpHostnameInRegistration) { InetSocketAddress nmAddress = NetUtils.createSocketAddrForHost(host, cmPort); InetAddress inetAddress = Server.getRemoteIp(); if (inetAddress != null && nmAddress.isUnresolved()) { // Reject registration of unresolved nm to prevent resourcemanager // getting stuck at allocations. final String message = "hostname cannot be resolved (ip=" + inetAddress.getHostAddress() + ", hostname=" + host + ")"; LOG.warn("Unresolved nodemanager registration: " + message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } } // Check if this node is a 'valid' node if (!this.nodesListManager.isValidNode(host) && !isNodeInDecommissioning(nodeId)) { String message = "Disallowed NodeManager from " + host + ", Sending SHUTDOWN signal to the NodeManager."; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } // check if node's capacity is load from dynamic-resources.xml String nid = nodeId.toString(); Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid); if (dynamicLoadCapability != null) { LOG.debug("Resource for node: {} is adjusted from: {} to: {} due to" + " settings in dynamic-resources.xml.", nid, capability, dynamicLoadCapability); capability = dynamicLoadCapability; // sync back with new resource. response.setResource(capability); } // Check if this node has minimum allocations if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) { String message = "NodeManager from " + host + " doesn't satisfy minimum allocations, Sending SHUTDOWN" + " signal to the NodeManager. Node capabilities are " + capability + "; minimums are " + minAllocMb + "mb and " + minAllocVcores + " vcores"; LOG.info(message); response.setDiagnosticsMessage(message); response.setNodeAction(NodeAction.SHUTDOWN); return response; } response.setContainerTokenMasterKey(containerTokenSecretManager .getCurrentKey()); response.setNMTokenMasterKey(nmTokenSecretManager .getCurrentKey()); RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability, nodeManagerVersion, physicalResource); RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode); if (oldNode == null) { RMNodeStartedEvent startEvent = new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), request.getRunningApplications(), nodeStatus); if (request.getLogAggregationReportsForApps() != null && !request.getLogAggregationReportsForApps().isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug("Found the number of previous cached log aggregation " + "status from nodemanager:" + nodeId + " is :" + request.getLogAggregationReportsForApps().size()); } startEvent.setLogAggregationReportsForApps(request .getLogAggregationReportsForApps()); } this.rmContext.getDispatcher().getEventHandler().handle( startEvent); } else { LOG.info("Reconnect from the node at: " + host); this.nmLivelinessMonitor.unregister(nodeId); if (CollectionUtils.isEmpty(request.getRunningApplications()) && rmNode.getState() != NodeState.DECOMMISSIONING && rmNode.getHttpPort() != oldNode.getHttpPort()) { // Reconnected node differs, so replace old node and start new node switch (rmNode.getState()) { case RUNNING: ClusterMetrics.getMetrics().decrNumActiveNodes(); break; case UNHEALTHY: ClusterMetrics.getMetrics().decrNumUnhealthyNMs(); break; default: LOG.debug("Unexpected Rmnode state"); } this.rmContext.getDispatcher().getEventHandler() .handle(new NodeRemovedSchedulerEvent(rmNode)); this.rmContext.getRMNodes().put(nodeId, rmNode); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeStartedEvent(nodeId, null, null, nodeStatus)); } else { // Reset heartbeat ID since node just restarted. oldNode.resetLastNodeHeartBeatResponse(); this.rmContext.getDispatcher().getEventHandler() .handle(new RMNodeReconnectEvent(nodeId, rmNode, request.getRunningApplications(), request.getNMContainerStatuses())); } } // On every node manager register we will be clearing NMToken keys if // present for any running application. this.nmTokenSecretManager.removeNodeKey(nodeId); this.nmLivelinessMonitor.register(nodeId); // Handle received container status, this should be processed after new // RMNode inserted if (!rmContext.isWorkPreservingRecoveryEnabled()) { if (!request.getNMContainerStatuses().isEmpty()) { LOG.info("received container statuses on node manager register :" + request.getNMContainerStatuses()); for (NMContainerStatus status : request.getNMContainerStatuses()) { handleNMContainerStatus(status, nodeId); } } } // Update node's labels to RM's NodeLabelManager. Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet( request.getNodeLabels()); if (isDistributedNodeLabelsConf && nodeLabels != null) { try { updateNodeLabelsFromNMReport(nodeLabels, nodeId); response.setAreNodeLabelsAcceptedByRM(true); } catch (IOException ex) { // Ensure the exception is captured in the response response.setDiagnosticsMessage(ex.getMessage()); response.setAreNodeLabelsAcceptedByRM(false); } } else if (isDelegatedCentralizedNodeLabelsConf) { this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId); } // Update node's attributes to RM's NodeAttributesManager. if (request.getNodeAttributes() != null) { try { // update node attributes if necessary then update heartbeat response updateNodeAttributesIfNecessary(nodeId, request.getNodeAttributes()); response.setAreNodeAttributesAcceptedByRM(true); } catch (IOException ex) { //ensure the error message is captured and sent across in response String errorMsg = response.getDiagnosticsMessage() == null ? ex.getMessage() : response.getDiagnosticsMessage() + "\n" + ex.getMessage(); response.setDiagnosticsMessage(errorMsg); response.setAreNodeAttributesAcceptedByRM(false); } } StringBuilder message = new StringBuilder(); message.append("NodeManager from node ").append(host).append("(cmPort: ") .append(cmPort).append(" httpPort: "); message.append(httpPort).append(") ") .append("registered with capability: ").append(capability); message.append(", assigned nodeId ").append(nodeId); if (response.getAreNodeLabelsAcceptedByRM()) { message.append(", node labels { ").append( StringUtils.join(",", nodeLabels) + " } "); } if (response.getAreNodeAttributesAcceptedByRM()) { message.append(", node attributes { ") .append(request.getNodeAttributes() + " } "); } LOG.info(message.toString()); response.setNodeAction(NodeAction.NORMAL); response.setRMIdentifier(ResourceManager.getClusterTimeStamp()); response.setRMVersion(YarnVersionInfo.getVersion()); return response; }
@Test public void testNodeRegistrationWithAttributes() throws Exception { writeToHostsFile("host2"); Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile.getAbsolutePath()); conf.setClass(YarnConfiguration.FS_NODE_ATTRIBUTE_STORE_IMPL_CLASS, FileSystemNodeAttributeStore.class, NodeAttributeStore.class); File tempDir = File.createTempFile("nattr", ".tmp"); tempDir.delete(); tempDir.mkdirs(); tempDir.deleteOnExit(); conf.set(YarnConfiguration.FS_NODE_ATTRIBUTE_STORE_ROOT_DIR, tempDir.getAbsolutePath()); rm = new MockRM(conf); rm.start(); ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService(); RegisterNodeManagerRequest registerReq = Records.newRecord(RegisterNodeManagerRequest.class); NodeId nodeId = NodeId.newInstance("host2", 1234); Resource capability = Resources.createResource(1024); NodeAttribute nodeAttribute1 = NodeAttribute .newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "Attr1", NodeAttributeType.STRING, "V1"); NodeAttribute nodeAttribute2 = NodeAttribute .newInstance(NodeAttribute.PREFIX_DISTRIBUTED, "Attr2", NodeAttributeType.STRING, "V2"); registerReq.setResource(capability); registerReq.setNodeId(nodeId); registerReq.setHttpPort(1234); registerReq.setNMVersion(YarnVersionInfo.getVersion()); registerReq.setNodeAttributes(toSet(nodeAttribute1, nodeAttribute2)); RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(registerReq); Assert.assertEquals("Action should be normal on valid Node Attributes", NodeAction.NORMAL, response.getNodeAction()); Assert.assertTrue(NodeLabelUtil.isNodeAttributesEquals( rm.getRMContext().getNodeAttributesManager() .getAttributesForNode(nodeId.getHost()).keySet(), registerReq.getNodeAttributes())); Assert.assertTrue("Valid Node Attributes were not accepted by RM", response.getAreNodeAttributesAcceptedByRM()); if (rm != null) { rm.stop(); } }
@SuppressWarnings("checkstyle:WhitespaceAround") public static String[] split(final String str, String separatorChars) { if (str == null) { return null; } if (str.length() == 0) { return new String[0]; } if (separatorChars == null) { separatorChars = " +"; } return str.split(separatorChars); }
@Test void testSplit() { assertNull(StringUtils.split(null, ",")); assertArrayEquals(new String[0], StringUtils.split("", ",")); assertArrayEquals(new String[] {"ab", "cd", "ef"}, StringUtils.split("ab cd ef", null)); assertArrayEquals(new String[] {"ab", "cd", "ef"}, StringUtils.split("ab cd ef", null)); assertArrayEquals(new String[] {"ab", "cd", "ef"}, StringUtils.split("ab:cd:ef", ":")); }
@Override public void check(Collection<? extends T> collection, ConditionEvents events) { ViolatedAndSatisfiedConditionEvents subEvents = new ViolatedAndSatisfiedConditionEvents(); for (T item : collection) { condition.check(item, subEvents); } if (!subEvents.getAllowed().isEmpty() || !subEvents.getViolating().isEmpty()) { events.add(new OnlyConditionEvent(collection, subEvents)); } }
@Test public void inverting_works() { ConditionEvents events = ConditionEvents.Factory.create(); ArchCondition<Collection<?>> condition = containOnlyElementsThat(IS_SERIALIZABLE); condition.check(TWO_SERIALIZABLE_OBJECTS, events); assertThat(events).containNoViolation(); events = ConditionEvents.Factory.create(); never(condition).check(TWO_SERIALIZABLE_OBJECTS, events); assertThat(events).containViolations(messageForTwoTimes(isSerializableMessageFor(SerializableObject.class))); }
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) { final Fetch<K, V> fetch = Fetch.empty(); final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final CompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) break; if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { // Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and // (2) there are no fetched completedFetch with actual content preceding this exception. // The first condition ensures that the completedFetches is not stuck with the same completedFetch // in cases such as the TopicAuthorizationException, and the second condition ensures that no // potential data loss due to an exception in a following record. if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0) fetchBuffer.poll(); throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else if (subscriptions.isPaused(nextInLineFetch.partition)) { // when the partition is paused we add the records back to the completedFetches queue instead of draining // them so that they can be returned on a subsequent poll if the partition is resumed at that time log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); pausedCompletedFetches.add(nextInLineFetch); fetchBuffer.setNextInLineFetch(null); } else { final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining); recordsRemaining -= nextFetch.numRecords(); fetch.add(nextFetch); } } } catch (KafkaException e) { if (fetch.isEmpty()) throw e; } finally { // add any polled completed fetches for paused partitions back to the completed fetches queue to be // re-evaluated in the next poll fetchBuffer.addAll(pausedCompletedFetches); } return fetch; }
@Test public void testFetchWithOffsetOutOfRangeWithPreferredReadReplica() { int records = 10; buildDependencies(records); assignAndSeek(topicAPartition0); // Set the preferred read replica and just to be safe, verify it was set. int preferredReadReplicaId = 67; subscriptions.updatePreferredReadReplica(topicAPartition0, preferredReadReplicaId, time::milliseconds); assertNotNull(subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); CompletedFetch completedFetch = completedFetchBuilder .error(Errors.OFFSET_OUT_OF_RANGE) .build(); fetchBuffer.add(completedFetch); Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); // The Fetch and read replica settings should be empty. assertTrue(fetch.isEmpty()); assertEquals(Optional.empty(), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); }
public static RawPrivateTransaction decode(final String hexTransaction) { final byte[] transaction = Numeric.hexStringToByteArray(hexTransaction); final TransactionType transactionType = getPrivateTransactionType(transaction); if (transactionType == TransactionType.EIP1559) { return decodePrivateTransaction1559(transaction); } return decodeLegacyPrivateTransaction(transaction); }
@Test public void testDecoding1559() { final BigInteger nonce = BigInteger.ZERO; final long chainId = 2018; final BigInteger gasLimit = BigInteger.TEN; final BigInteger maxPriorityFeePerGas = BigInteger.ONE; final BigInteger maxFeePerGas = BigInteger.ONE; final String to = "0x0add5355"; final PrivateTransaction1559 privateTx = PrivateTransaction1559.createTransaction( chainId, nonce, gasLimit, to, "", maxPriorityFeePerGas, maxFeePerGas, MOCK_ENCLAVE_KEY, MOCK_PRIVATE_FOR, RESTRICTED); byte[] encodedMessage = PrivateTransactionEncoder.encode(new RawPrivateTransaction(privateTx)); final String hexMessage = Numeric.toHexString(encodedMessage); final PrivateTransaction1559 result = (PrivateTransaction1559) PrivateTransactionDecoder.decode(hexMessage).getPrivateTransaction(); assertNotNull(result); assertEquals(nonce, result.getNonce()); assertEquals(chainId, result.getChainId()); assertEquals(maxPriorityFeePerGas, result.getMaxPriorityFeePerGas()); assertEquals(maxFeePerGas, result.getMaxFeePerGas()); assertEquals(gasLimit, result.getGasLimit()); assertEquals(to, result.getTo()); assertEquals("0x", result.getData()); assertEquals(MOCK_ENCLAVE_KEY, result.getPrivateFrom()); assertEquals(MOCK_PRIVATE_FOR, result.getPrivateFor().get()); assertEquals(RESTRICTED, result.getRestriction()); }
public void checkCoinBaseHeight(final int height) throws VerificationException { checkArgument(height >= Block.BLOCK_HEIGHT_GENESIS); checkState(isCoinBase()); // Check block height is in coinbase input script final TransactionInput in = this.getInput(0); final ScriptBuilder builder = new ScriptBuilder(); builder.number(height); final byte[] expected = builder.build().program(); final byte[] actual = in.getScriptBytes(); if (actual.length < expected.length) { throw new VerificationException.CoinbaseHeightMismatch("Block height mismatch in coinbase."); } for (int scriptIdx = 0; scriptIdx < expected.length; scriptIdx++) { if (actual[scriptIdx] != expected[scriptIdx]) { throw new VerificationException.CoinbaseHeightMismatch("Block height mismatch in coinbase."); } } }
@Test public void testCoinbaseHeightCheck() { // Coinbase transaction from block 300,000 ByteBuffer transactionBytes = ByteBuffer.wrap(ByteUtils.parseHex( "01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff4803e09304062f503253482f0403c86d53087ceca141295a00002e522cfabe6d6d7561cf262313da1144026c8f7a43e3899c44f6145f39a36507d36679a8b7006104000000000000000000000001c8704095000000001976a91480ad90d403581fa3bf46086a91b2d9d4125db6c188ac00000000")); final int height = 300000; final Transaction transaction = TESTNET.getDefaultSerializer().makeTransaction(transactionBytes); transaction.checkCoinBaseHeight(height); }
public Statement buildStatement(final ParserRuleContext parseTree) { return build(Optional.of(getSources(parseTree)), parseTree); }
@Test public void shouldOmitSelectAliasIfNotPresent() { // Given: final SingleStatementContext stmt = givenQuery("SELECT COL0 FROM TEST1;"); // When: final Query result = (Query) builder.buildStatement(stmt); // Then: assertThat(result.getSelect(), is(new Select(ImmutableList.of( new SingleColumn( column("COL0"), Optional.empty()) )))); }
@Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { final TriRpcStatus status = TriRpcStatus.INTERNAL.withCause(cause); LOGGER.warn( PROTOCOL_FAILED_SERIALIZE_TRIPLE, "", "", "Meet Exception on ClientResponseHandler, status code is: " + status.code, cause); transportListener.cancelByRemote(Http2Error.INTERNAL_ERROR.code()); ctx.close(); }
@Test void testExceptionCaught() { RuntimeException exception = new RuntimeException(); handler.exceptionCaught(ctx, exception); Mockito.verify(ctx).close(); Mockito.verify(transportListener).cancelByRemote(Http2Error.INTERNAL_ERROR.code()); }
private synchronized boolean validateClientAcknowledgement(long h) { if (h < 0) { throw new IllegalArgumentException("Argument 'h' cannot be negative, but was: " + h); } if (h > MASK) { throw new IllegalArgumentException("Argument 'h' cannot be larger than 2^32 -1, but was: " + h); } final long oldH = clientProcessedStanzas.get(); final Long lastUnackedX = unacknowledgedServerStanzas.isEmpty() ? null : unacknowledgedServerStanzas.getLast().x; return validateClientAcknowledgement(h, oldH, lastUnackedX); }
@Test public void testValidateClientAcknowledgement_rollover_edgecase3a() throws Exception { // Setup test fixture. final long MAX = new BigInteger( "2" ).pow( 32 ).longValue() - 1; final long h = 0; final long oldH = MAX-2; final Long lastUnackedX = 4L; // Execute system under test. final boolean result = StreamManager.validateClientAcknowledgement(h, oldH, lastUnackedX); // Verify results. assertTrue(result); }
@SuppressWarnings({"SimplifyBooleanReturn"}) public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) { if (params == null || params.isEmpty()) { return params; } Map<String, ParamDefinition> mapped = params.entrySet().stream() .collect( MapHelper.toListMap( Map.Entry::getKey, p -> { ParamDefinition param = p.getValue(); if (param.getType() == ParamType.MAP) { MapParamDefinition mapParamDef = param.asMapParamDef(); if (mapParamDef.getValue() == null && (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) { return mapParamDef; } return MapParamDefinition.builder() .name(mapParamDef.getName()) .value(cleanupParams(mapParamDef.getValue())) .expression(mapParamDef.getExpression()) .name(mapParamDef.getName()) .validator(mapParamDef.getValidator()) .tags(mapParamDef.getTags()) .mode(mapParamDef.getMode()) .meta(mapParamDef.getMeta()) .build(); } else { return param; } })); Map<String, ParamDefinition> filtered = mapped.entrySet().stream() .filter( p -> { ParamDefinition param = p.getValue(); if (param.getInternalMode() == InternalParamMode.OPTIONAL) { if (param.getValue() == null && param.getExpression() == null) { return false; } else if (param.getType() == ParamType.MAP && param.asMapParamDef().getValue() != null && param.asMapParamDef().getValue().isEmpty()) { return false; } else { return true; } } else { Checks.checkTrue( param.getValue() != null || param.getExpression() != null, String.format( "[%s] is a required parameter (type=[%s])", p.getKey(), param.getType())); return true; } }) .collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue)); return cleanIntermediateMetadata(filtered); }
@Test public void testCleanupWithMapInsideMap() throws JsonProcessingException { for (ParamMode mode : ParamMode.values()) { Map<String, ParamDefinition> allParams = parseParamDefMap( String.format( "{'map': {'type': 'MAP','value': {'present': {'type': 'STRING', 'mode': '%s', 'value': 'hello'}," + " 'm':{'type':'MAP','mode': '%s', 'expression':'job_param'}}}}", mode, mode)); Map<String, ParamDefinition> cleanedParams = ParamsMergeHelper.cleanupParams(allParams); assertEquals(2, cleanedParams.get("map").asMapParamDef().getValue().size()); } }
static void validateCsvFormat(CSVFormat format) { String[] header = checkArgumentNotNull(format.getHeader(), "Illegal %s: header is required", CSVFormat.class); checkArgument(header.length > 0, "Illegal %s: header cannot be empty", CSVFormat.class); checkArgument( !format.getAllowMissingColumnNames(), "Illegal %s: cannot allow missing column names", CSVFormat.class); checkArgument( !format.getIgnoreHeaderCase(), "Illegal %s: cannot ignore header case", CSVFormat.class); checkArgument( !format.getAllowDuplicateHeaderNames(), "Illegal %s: cannot allow duplicate header names", CSVFormat.class); for (String columnName : header) { checkArgument( !Strings.isNullOrEmpty(columnName), "Illegal %s: column name is required", CSVFormat.class); } checkArgument( !format.getSkipHeaderRecord(), "Illegal %s: cannot skip header record because the header is already accounted for", CSVFormat.class); }
@Test public void givenCSVFormatThatIgnoresHeaderCase_throwsException() { CSVFormat format = csvFormatWithHeader().withIgnoreHeaderCase(true); String gotMessage = assertThrows( IllegalArgumentException.class, () -> CsvIOParseHelpers.validateCsvFormat(format)) .getMessage(); assertEquals( "Illegal class org.apache.commons.csv.CSVFormat: cannot ignore header case", gotMessage); }
public static String toString(RedisCommand<?> command, Object... params) { if (RedisCommands.AUTH.equals(command)) { return "command: " + command + ", params: (password masked)"; } return "command: " + command + ", params: " + LogHelper.toString(params); }
@Test public void toStringWithBigCollections() { List<String> strings = Collections.nCopies(15, "0"); List<Integer> ints = Collections.nCopies(15, 1); List<Long> longs = Collections.nCopies(15, 2L); List<Double> doubles = Collections.nCopies(15, 3.1D); List<Float> floats = Collections.nCopies(15, 4.2F); List<Byte> bytes = Collections.nCopies(15, (byte)5); List<Character> chars = Collections.nCopies(15, '6'); assertThat(LogHelper.toString(strings)).isEqualTo("[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ...]"); assertThat(LogHelper.toString(ints)).isEqualTo("[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...]"); assertThat(LogHelper.toString(longs)).isEqualTo("[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, ...]"); assertThat(LogHelper.toString(doubles)).isEqualTo("[3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, 3.1, ...]"); assertThat(LogHelper.toString(floats)).isEqualTo("[4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, 4.2, ...]"); assertThat(LogHelper.toString(bytes)).isEqualTo("[5, 5, 5, 5, 5, 5, 5, 5, 5, 5, ...]"); assertThat(LogHelper.toString(chars)).isEqualTo("[6, 6, 6, 6, 6, 6, 6, 6, 6, 6, ...]"); }
public void processResponseCommand(ChannelHandlerContext ctx, RemotingCommand cmd) { final int opaque = cmd.getOpaque(); final ResponseFuture responseFuture = responseTable.get(opaque); if (responseFuture != null) { responseFuture.setResponseCommand(cmd); responseTable.remove(opaque); if (responseFuture.getInvokeCallback() != null) { executeInvokeCallback(responseFuture); } else { responseFuture.putResponse(cmd); responseFuture.release(); } } else { log.warn("receive response, cmd={}, but not matched any request, address={}", cmd, RemotingHelper.parseChannelRemoteAddr(ctx.channel())); } }
@Test public void testProcessResponseCommand() throws InterruptedException { final Semaphore semaphore = new Semaphore(0); ResponseFuture responseFuture = new ResponseFuture(null, 1, 3000, new InvokeCallback() { @Override public void operationComplete(ResponseFuture responseFuture) { } @Override public void operationSucceed(RemotingCommand response) { assertThat(semaphore.availablePermits()).isEqualTo(0); } @Override public void operationFail(Throwable throwable) { } }, new SemaphoreReleaseOnlyOnce(semaphore)); remotingAbstract.responseTable.putIfAbsent(1, responseFuture); RemotingCommand response = RemotingCommand.createResponseCommand(0, "Foo"); response.setOpaque(1); remotingAbstract.processResponseCommand(null, response); // Acquire the release permit after call back semaphore.acquire(1); assertThat(semaphore.availablePermits()).isEqualTo(0); }
SchemaTransformer delegate() { return transformer; }
@Test void shouldAllSchemasAsOnlyInput() { final SchemaTransformerFactory schemaTransformerFactory = new SchemaTransformerFactory("*:5"); final SchemaTransformer delegate = schemaTransformerFactory.delegate(); assertInstanceOf(SinceVersionSchemaTransformer.class, delegate); final SinceVersionSchemaTransformer transformer = (SinceVersionSchemaTransformer)delegate; assertEquals(5, transformer.sinceVersion()); }
@CanIgnoreReturnValue @SuppressWarnings("deprecation") // TODO(b/134064106): design an alternative to no-arg check() public final Ordered containsExactly() { return check().about(iterableEntries()).that(checkNotNull(actual).entries()).containsExactly(); }
@Test public void containsExactlyVarargInOrder() { ImmutableMultimap<Integer, String> actual = ImmutableMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); assertThat(actual) .containsExactly(3, "one", 3, "six", 3, "two", 4, "five", 4, "four") .inOrder(); }
public String summarize(final ExecutionStep<?> step) { return summarize(step, "").summary; }
@Test public void shouldThrowOnUnsupportedStepType() { // Given: final ExecutionStep<?> step = mock(ExecutionStep.class); // When: assertThrows( UnsupportedOperationException.class, () -> planSummaryBuilder.summarize(step) ); }
public static void main(String[] args) { try { FSConfigToCSConfigArgumentHandler fsConfigConversionArgumentHandler = new FSConfigToCSConfigArgumentHandler(); int exitCode = fsConfigConversionArgumentHandler.parseAndConvert(args); if (exitCode != 0) { LOG.error(FATAL, "Error while starting FS configuration conversion, " + "see previous error messages for details!"); } exitFunction.accept(exitCode); } catch (Throwable t) { LOG.error(FATAL, "Error while starting FS configuration conversion!", t); exitFunction.accept(-1); } }
@Test public void testShortHelpSwitch() { FSConfigToCSConfigConverterMain.main(new String[] {"-h"}); verifyHelpText(); assertEquals("Exit code", 0, exitFunc.exitCode); }
public static String resolveMethodName(Method method) { if (method == null) { throw new IllegalArgumentException("Null method"); } String methodName = methodNameMap.get(method); if (methodName == null) { synchronized (LOCK) { methodName = methodNameMap.get(method); if (methodName == null) { StringBuilder sb = new StringBuilder(); String className = method.getDeclaringClass().getName(); String name = method.getName(); Class<?>[] params = method.getParameterTypes(); sb.append(className).append(":").append(name); sb.append("("); int paramPos = 0; for (Class<?> clazz : params) { sb.append(clazz.getCanonicalName()); if (++paramPos < params.length) { sb.append(","); } } sb.append(")"); methodName = sb.toString(); methodNameMap.put(method, methodName); } } } return methodName; }
@Test public void testResolveMethodName() { Method fooMethod = null; for (Method m : GoodClass.class.getMethods()) { if (m.getName().contains("foo")) { fooMethod = m; break; } } assertNotNull(fooMethod); assertEquals("com.alibaba.csp.sentinel.util.MethodUtilTest$GoodClass:foo(long[],java.lang.String,java.lang.Integer[])", MethodUtil.resolveMethodName(fooMethod)); Method bazMethod = null; for (Method m : GoodClass.class.getMethods()) { if (m.getName().contains("baz")) { bazMethod = m; break; } } assertNotNull(bazMethod); assertEquals("com.alibaba.csp.sentinel.util.MethodUtilTest$GoodClass:baz(double)", MethodUtil.resolveMethodName(bazMethod)); }
@Override public Integer getCategoryLevel(Long id) { if (Objects.equals(id, PARENT_ID_NULL)) { return 0; } int level = 1; // for 的原因,是因为避免脏数据,导致可能的死循环。一般不会超过 100 层哈 for (int i = 0; i < Byte.MAX_VALUE; i++) { // 如果没有父节点,break 结束 ProductCategoryDO category = productCategoryMapper.selectById(id); if (category == null || Objects.equals(category.getParentId(), PARENT_ID_NULL)) { break; } // 继续递归父节点 level++; id = category.getParentId(); } return level; }
@Test public void testGetCategoryLevel() { // mock 数据 ProductCategoryDO category1 = randomPojo(ProductCategoryDO.class, o -> o.setParentId(PARENT_ID_NULL)); productCategoryMapper.insert(category1); ProductCategoryDO category2 = randomPojo(ProductCategoryDO.class, o -> o.setParentId(category1.getId())); productCategoryMapper.insert(category2); ProductCategoryDO category3 = randomPojo(ProductCategoryDO.class, o -> o.setParentId(category2.getId())); productCategoryMapper.insert(category3); // 调用,并断言 assertEquals(productCategoryService.getCategoryLevel(category1.getId()), 1); assertEquals(productCategoryService.getCategoryLevel(category2.getId()), 2); assertEquals(productCategoryService.getCategoryLevel(category3.getId()), 3); }
@Override public CreatingExecutionGraph.AssignmentResult tryToAssignSlots( CreatingExecutionGraph.ExecutionGraphWithVertexParallelism executionGraphWithVertexParallelism) { final ExecutionGraph executionGraph = executionGraphWithVertexParallelism.getExecutionGraph(); executionGraph.start(componentMainThreadExecutor); executionGraph.setInternalTaskFailuresListener( new UpdateSchedulerNgOnInternalFailuresListener(this)); final JobSchedulingPlan jobSchedulingPlan = executionGraphWithVertexParallelism.getJobSchedulingPlan(); return slotAllocator .tryReserveResources(jobSchedulingPlan) .map(reservedSlots -> assignSlotsToExecutionGraph(executionGraph, reservedSlots)) .map(CreatingExecutionGraph.AssignmentResult::success) .orElseGet(CreatingExecutionGraph.AssignmentResult::notPossible); }
@Test void testTryToAssignSlotsReturnsNotPossibleIfExpectedResourcesAreNotAvailable() throws Exception { final TestingSlotAllocator slotAllocator = TestingSlotAllocator.newBuilder() .setTryReserveResourcesFunction(ignored -> Optional.empty()) .build(); final AdaptiveScheduler adaptiveScheduler = new AdaptiveSchedulerBuilder( createJobGraph(), mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .setSlotAllocator(slotAllocator) .build(); final CreatingExecutionGraph.AssignmentResult assignmentResult = adaptiveScheduler.tryToAssignSlots( CreatingExecutionGraph.ExecutionGraphWithVertexParallelism.create( new StateTrackingMockExecutionGraph(), JobSchedulingPlan.empty())); assertThat(assignmentResult.isSuccess()).isFalse(); }
@Override public void run() { doHealthCheck(); }
@Test void testRunUnhealthyInstanceWithoutExpire() { injectInstance(false, 0); beatCheckTask.run(); assertFalse(client.getAllInstancePublishInfo().isEmpty()); }
public boolean canWrite() { long writtenCount = getWrittenRecordCount(); if (writtenCount >= recordCountForNextSizeCheck) { long dataSize = getDataSize(); // In some very extreme cases, like all records are same value, then it's possible // the dataSize is much lower than the writtenRecordCount(high compression ratio), // causing avgRecordSize to 0, we'll force the avgRecordSize to 1 for such cases. long avgRecordSize = Math.max(dataSize / writtenCount, 1); // Follow the parquet block size check logic here, return false // if it is within ~2 records of the limit if (dataSize > (maxFileSize - avgRecordSize * 2)) { return false; } recordCountForNextSizeCheck = writtenCount + Math.min( // Do check it in the halfway Math.max(ParquetProperties.DEFAULT_MINIMUM_RECORD_COUNT_FOR_CHECK, (maxFileSize / avgRecordSize - writtenCount) / 2), ParquetProperties.DEFAULT_MAXIMUM_RECORD_COUNT_FOR_CHECK); } return true; }
@Test public void testCanWrite() throws IOException { BloomFilter filter = BloomFilterFactory.createBloomFilter(1000, 0.0001, 10000, BloomFilterTypeCode.DYNAMIC_V0.name()); StorageConfiguration conf = HoodieTestUtils.getDefaultStorageConfWithDefaults(); Schema schema = new Schema.Parser().parse(HoodieTestDataGenerator.TRIP_EXAMPLE_SCHEMA); HoodieAvroWriteSupport writeSupport = new HoodieAvroWriteSupport(new AvroSchemaConverter().convert(schema), schema, Option.of(filter), new Properties()); long maxFileSize = 2 * 1024 * 1024; HoodieParquetConfig<HoodieAvroWriteSupport> parquetConfig = new HoodieParquetConfig<>(writeSupport, CompressionCodecName.GZIP, ParquetWriter.DEFAULT_BLOCK_SIZE, ParquetWriter.DEFAULT_PAGE_SIZE, maxFileSize, conf, 0, true); StoragePath filePath = new StoragePath( new StoragePath(tempDir.toUri()), "test_fileSize.parquet"); try (MockHoodieParquetWriter writer = new MockHoodieParquetWriter(filePath, parquetConfig)) { // doesn't start write, should return true assertTrue(writer.canWrite()); // recordCountForNextSizeCheck should be DEFAULT_MINIMUM_RECORD_COUNT_FOR_CHECK assertEquals(writer.getRecordCountForNextSizeCheck(), DEFAULT_MINIMUM_RECORD_COUNT_FOR_CHECK); // 10 bytes per record writer.setCurrentDataSize(1000); writer.setWrittenRecordCount(writer.getRecordCountForNextSizeCheck()); assertTrue(writer.canWrite()); // Should check it with more DEFAULT_MAXIMUM_RECORD_COUNT_FOR_CHECK records assertEquals(writer.getRecordCountForNextSizeCheck(), writer.getWrittenRecordCount() + DEFAULT_MAXIMUM_RECORD_COUNT_FOR_CHECK); // 80 bytes per record writer.setCurrentDataSize(808000); writer.setWrittenRecordCount(writer.getRecordCountForNextSizeCheck()); assertTrue(writer.canWrite()); // Should check it half way, not DEFAULT_MAXIMUM_RECORD_COUNT_FOR_CHECK long avgRecordSize = writer.getDataSize() / writer.getWrittenRecordCount(); long recordsDelta = (maxFileSize / avgRecordSize - writer.getWrittenRecordCount()) / 2; assertEquals(writer.getRecordCountForNextSizeCheck(), writer.getWrittenRecordCount() + recordsDelta); writer.setCurrentDataSize(maxFileSize); writer.setWrittenRecordCount(writer.getRecordCountForNextSizeCheck()); assertFalse(writer.canWrite(), "The writer stops write new records while the file doesn't reach the max file size limit"); } }
public static DeleteOptions defaults() { return new DeleteOptions(); }
@Test public void defaults() throws IOException { DeleteOptions options = DeleteOptions.defaults(); assertEquals(false, options.isRecursive()); }
public OkHttpClient get(boolean keepAlive, boolean skipTLSVerify) { try { return cache.get(Parameters.fromBoolean(keepAlive, skipTLSVerify)); } catch (ExecutionException e) { throw new RuntimeException(e); } }
@Test public void testWithSkipTlsVerifyAndKeepAlive() throws IOException, NoSuchAlgorithmException, KeyStoreException, KeyManagementException { final ParameterizedHttpClientProvider provider = new ParameterizedHttpClientProvider(client(null)); final OkHttpClient okHttpClient = provider.get(true, true); assertThat(okHttpClient.sslSocketFactory().createSocket().getOption(StandardSocketOptions.SO_KEEPALIVE)).isTrue(); try (Response response = okHttpClient.newCall(new Request.Builder().url(server.url("/")).get().build()).execute()) { assertThat(response.isSuccessful()).isTrue(); } }
@Override public <T extends Response> CompletableFuture<T> sendAsync( Request request, Class<T> responseType) { CompletableFuture<T> result = new CompletableFuture<>(); long requestId = request.getId(); requestForId.put(requestId, new WebSocketRequest<>(result, responseType)); try { sendRequest(request, requestId); } catch (IOException e) { closeRequest(requestId, e); } return result; }
@Test public void testCancelRequestAfterTimeout() { when(executorService.schedule( any(Runnable.class), eq(WebSocketService.REQUEST_TIMEOUT), eq(TimeUnit.SECONDS))) .then( invocation -> { Runnable runnable = invocation.getArgument(0, Runnable.class); runnable.run(); return null; }); CompletableFuture<Web3ClientVersion> reply = service.sendAsync(request, Web3ClientVersion.class); assertTrue(reply.isDone()); assertThrows(ExecutionException.class, () -> reply.get()); }
public Array getArray(String name) { Array a = arrayMap.get(name); if (a == null) { validateArray(name); a = new Array(configDefinition, name); arrayMap.put(name, a); } return a; }
@Test public void require_that_arrays_can_be_indexed_simple_values() { ConfigPayloadBuilder builder = new ConfigPayloadBuilder(); ConfigPayloadBuilder.Array array = builder.getArray("foo"); array.set(3, "bar"); array.set(2, "baz"); array.set(6, "bim"); array.set(4, "bum"); Cursor root = createSlime(builder); Cursor a = root.field("foo"); assertEquals("bar", a.entry(0).asString()); assertEquals("baz", a.entry(1).asString()); assertEquals("bim", a.entry(2).asString()); assertEquals("bum", a.entry(3).asString()); }
@Override public double cdf(double k) { if (k < 0) { return 0.0; } else if (k >= n) { return 1.0; } else { return Beta.regularizedIncompleteBetaFunction(n - k, k + 1, 1 - p); } }
@Test public void testCdf() { System.out.println("cdf"); BinomialDistribution instance = new BinomialDistribution(100, 0.3); instance.rand(); assertEquals(3.234477e-16, instance.cdf(0), 1E-20); assertEquals(1.418549e-14, instance.cdf(1), 1E-18); assertEquals(1.555566e-06, instance.cdf(10), 1E-10); assertEquals(0.01646285, instance.cdf(20), 1E-7); assertEquals(0.5491236, instance.cdf(30), 1E-7); assertEquals(1.0, instance.cdf(100), 1E-7); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthGetTransactionByHash() throws Exception { web3j.ethGetTransactionByHash( "0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238") .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getTransactionByHash\",\"params\":[" + "\"0xb903239f8543d04b5dc1ba6579132b143087c68db1b2168786408fcbce568238\"]," + "\"id\":1}"); }
public void start() { logger.info("Starting " + reactorCount + " reactors of type [" + reactorType() + "]"); for (; ; ) { State oldState = state.get(); if (oldState != NEW) { throw new IllegalStateException("Can't start TpcEngine, it isn't in NEW state."); } if (state.compareAndSet(oldState, RUNNING)) { break; } } for (Reactor reactor : reactors) { reactor.start(); } }
@Test(expected = IllegalStateException.class) public void start_whenRunning() { engine = new TpcEngine(); engine.start(); engine.start(); }
public static FunctionConfig validateUpdate(FunctionConfig existingConfig, FunctionConfig newConfig) { FunctionConfig mergedConfig = existingConfig.toBuilder().build(); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getJar())) { mergedConfig.setJar(newConfig.getJar()); } if (newConfig.getInputSpecs() == null) { newConfig.setInputSpecs(new HashMap<>()); } if (mergedConfig.getInputSpecs() == null) { mergedConfig.setInputSpecs(new HashMap<>()); } if (newConfig.getInputs() != null) { newConfig.getInputs().forEach((topicName -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder().isRegexPattern(false).build()); })); } if (newConfig.getTopicsPattern() != null && !newConfig.getTopicsPattern().isEmpty()) { newConfig.getInputSpecs().put(newConfig.getTopicsPattern(), ConsumerConfig.builder() .isRegexPattern(true) .build()); } if (newConfig.getCustomSerdeInputs() != null) { newConfig.getCustomSerdeInputs().forEach((topicName, serdeClassName) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .serdeClassName(serdeClassName) .isRegexPattern(false) .build()); }); } if (newConfig.getCustomSchemaInputs() != null) { newConfig.getCustomSchemaInputs().forEach((topicName, schemaClassname) -> { newConfig.getInputSpecs().put(topicName, ConsumerConfig.builder() .schemaType(schemaClassname) .isRegexPattern(false) .build()); }); } if (!newConfig.getInputSpecs().isEmpty()) { newConfig.getInputSpecs().forEach((topicName, consumerConfig) -> { if (!existingConfig.getInputSpecs().containsKey(topicName)) { throw new IllegalArgumentException("Input Topics cannot be altered"); } if (consumerConfig.isRegexPattern() != existingConfig.getInputSpecs().get(topicName).isRegexPattern()) { throw new IllegalArgumentException( "isRegexPattern for input topic " + topicName + " cannot be altered"); } mergedConfig.getInputSpecs().put(topicName, consumerConfig); }); } if (!StringUtils.isEmpty(newConfig.getOutputSerdeClassName()) && !newConfig.getOutputSerdeClassName() .equals(existingConfig.getOutputSerdeClassName())) { throw new IllegalArgumentException("Output Serde mismatch"); } if (!StringUtils.isEmpty(newConfig.getOutputSchemaType()) && !newConfig.getOutputSchemaType() .equals(existingConfig.getOutputSchemaType())) { throw new IllegalArgumentException("Output Schema mismatch"); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getRetainOrdering() != null && !newConfig.getRetainOrdering() .equals(existingConfig.getRetainOrdering())) { throw new IllegalArgumentException("Retain Ordering cannot be altered"); } if (newConfig.getRetainKeyOrdering() != null && !newConfig.getRetainKeyOrdering() .equals(existingConfig.getRetainKeyOrdering())) { throw new IllegalArgumentException("Retain Key Ordering cannot be altered"); } if (!StringUtils.isEmpty(newConfig.getOutput())) { mergedConfig.setOutput(newConfig.getOutput()); } if (newConfig.getUserConfig() != null) { mergedConfig.setUserConfig(newConfig.getUserConfig()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (newConfig.getRuntime() != null && !newConfig.getRuntime().equals(existingConfig.getRuntime())) { throw new IllegalArgumentException("Runtime cannot be altered"); } if (newConfig.getAutoAck() != null && !newConfig.getAutoAck().equals(existingConfig.getAutoAck())) { throw new IllegalArgumentException("AutoAck cannot be altered"); } if (newConfig.getMaxMessageRetries() != null) { mergedConfig.setMaxMessageRetries(newConfig.getMaxMessageRetries()); } if (!StringUtils.isEmpty(newConfig.getDeadLetterTopic())) { mergedConfig.setDeadLetterTopic(newConfig.getDeadLetterTopic()); } if (!StringUtils.isEmpty(newConfig.getSubName()) && !newConfig.getSubName() .equals(existingConfig.getSubName())) { throw new IllegalArgumentException("Subscription Name cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (newConfig.getWindowConfig() != null) { mergedConfig.setWindowConfig(newConfig.getWindowConfig()); } if (newConfig.getTimeoutMs() != null) { mergedConfig.setTimeoutMs(newConfig.getTimeoutMs()); } if (newConfig.getCleanupSubscription() != null) { mergedConfig.setCleanupSubscription(newConfig.getCleanupSubscription()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test(expectedExceptions = IllegalArgumentException.class, expectedExceptionsMessageRegExp = "Function Names differ") public void testMergeDifferentName() { FunctionConfig functionConfig = createFunctionConfig(); FunctionConfig newFunctionConfig = createUpdatedFunctionConfig("name", "Different"); FunctionConfigUtils.validateUpdate(functionConfig, newFunctionConfig); }
@Override public void writeInt(final int v) throws IOException { ensureAvailable(INT_SIZE_IN_BYTES); MEM.putInt(buffer, ARRAY_BYTE_BASE_OFFSET + pos, v); pos += INT_SIZE_IN_BYTES; }
@Test public void testWriteIntForVByteOrder() throws Exception { int expected = 100; out.writeInt(expected, LITTLE_ENDIAN); out.writeInt(expected, BIG_ENDIAN); int actual1 = Bits.readInt(out.buffer, 0, false); int actual2 = Bits.readInt(out.buffer, 4, true); assertEquals(expected, actual1); assertEquals(expected, actual2); }
public static boolean containsSystemSchema(final DatabaseType databaseType, final Collection<String> schemaNames, final ShardingSphereDatabase database) { DialectDatabaseMetaData dialectDatabaseMetaData = new DatabaseTypeRegistry(databaseType).getDialectDatabaseMetaData(); if (database.isComplete() && !dialectDatabaseMetaData.getDefaultSchema().isPresent()) { return false; } SystemDatabase systemDatabase = new SystemDatabase(databaseType); for (String each : schemaNames) { if (systemDatabase.getSystemSchemas().contains(each)) { return true; } } return !dialectDatabaseMetaData.getDefaultSchema().isPresent() && systemDatabase.getSystemSchemas().contains(database.getName()); }
@Test void assertContainsSystemSchemaForMySQL() { DatabaseType databaseType = TypedSPILoader.getService(DatabaseType.class, "MySQL"); ShardingSphereDatabase informationSchemaDatabase = mockShardingSphereDatabase("information_schema", false); assertTrue(SystemSchemaUtils.containsSystemSchema(databaseType, Arrays.asList("information_schema", "mysql"), informationSchemaDatabase)); ShardingSphereDatabase shardingSchemaDatabase = mockShardingSphereDatabase("sharding_db", false); assertFalse(SystemSchemaUtils.containsSystemSchema(databaseType, Collections.singletonList("sharding_db"), shardingSchemaDatabase)); ShardingSphereDatabase customizedInformationSchemaDatabase = mockShardingSphereDatabase("information_schema", true); assertFalse(SystemSchemaUtils.containsSystemSchema(databaseType, Arrays.asList("information_schema", "mysql"), customizedInformationSchemaDatabase)); }
@Override public void registerClusterListener(final LoadBalancerClusterListener listener) { trace(_log, "register listener: ", listener); _executor.execute(new PropertyEvent("add cluster listener for state") { @Override public void innerRun() { if (!_clusterListeners.contains(listener)) { // don't allow duplicates, there's no need for a cluster listener to be registered twice. _clusterListeners.add(listener); } } }); }
@Test public void testRegisterClusterListener() { reset(); MockClusterListener clusterListener = new MockClusterListener(); _state.registerClusterListener(clusterListener); assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 0, "expected zero count since no action has been triggered"); // first add a cluster _state.listenToCluster(CLUSTER1_CLUSTER_NAME, new NullStateListenerCallback()); _clusterRegistry.put(CLUSTER1_CLUSTER_NAME, new ClusterProperties(CLUSTER1_CLUSTER_NAME)); assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 1, "expected 1 call after clusterRegistry put"); // then update the cluster _clusterRegistry.put(CLUSTER1_CLUSTER_NAME, new ClusterProperties(CLUSTER1_CLUSTER_NAME)); assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 2, "expected 2 calls after additional clusterRegistry put"); }
public static List<String> filterMatches(@Nullable List<String> candidates, @Nullable Pattern[] positivePatterns, @Nullable Pattern[] negativePatterns) { if (candidates == null || candidates.isEmpty()) { return Collections.emptyList(); } final Pattern[] positive = (positivePatterns == null || positivePatterns.length == 0) ? MATCH_ALL_PATTERN : positivePatterns; final Pattern[] negative = negativePatterns == null ? EMPTY_PATTERN : negativePatterns; return candidates.stream() .filter(c -> Arrays.stream(positive).anyMatch(p -> p.matcher(c).matches())) .filter(c -> Arrays.stream(negative).noneMatch(p -> p.matcher(c).matches())) .collect(Collectors.toList()); }
@Test public void filterMatchesPositive() { List<String> candidates = ImmutableList.of("foo", "bar"); List<String> expected = ImmutableList.of("foo"); assertThat(filterMatches(candidates, new Pattern[]{Pattern.compile("f.*")}, null), is(expected)); }
public byte[] address() { return address; }
@Test public void testAddressNotIncludeScopeId() { int port = 80; ByteBuffer buffer = ByteBuffer.wrap(new byte[24]); buffer.put(new byte[] {'0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '0', '1'}); buffer.putInt(0); buffer.putInt(port); InetSocketAddress address = NativeInetAddress.address(buffer.array(), 0, buffer.capacity()); assertEquals(port, address.getPort()); assertInstanceOf(Inet6Address.class, address.getAddress()); assertFalse(address.getAddress().isLinkLocalAddress()); assertEquals("3030:3030:3030:3030:3030:3030:3030:3031", address.getAddress().getHostName()); }
@RequestMapping("/config") public List<ServiceDTO> getConfigService( @RequestParam(value = "appId", defaultValue = "") String appId, @RequestParam(value = "ip", required = false) String clientIp) { return discoveryService.getServiceInstances(ServiceNameConsts.APOLLO_CONFIGSERVICE); }
@Test public void testGetConfigService() { String someAppId = "someAppId"; String someClientIp = "someClientIp"; when(discoveryService.getServiceInstances(ServiceNameConsts.APOLLO_CONFIGSERVICE)) .thenReturn(someServices); assertEquals(someServices, serviceController.getConfigService(someAppId, someClientIp)); }
public static Builder builder() { return new Builder(); }
@Test public void testEqualsAndHashCode() { DivideUpstream upstream1 = DivideUpstream.builder().protocol("protocol").upstreamHost("host").upstreamUrl("url") .status(true).warmup(50).timestamp(1650549243L).weight(100).build(); DivideUpstream upstream2 = DivideUpstream.builder().protocol("protocol").upstreamHost("host").upstreamUrl("url") .status(true).warmup(50).timestamp(1650549243L).weight(100).build(); assertThat(ImmutableSet.of(upstream1, upstream2), hasSize(1)); }
public void importCounters(String[] counterNames, String[] counterKinds, long[] counterDeltas) { final int length = counterNames.length; if (counterKinds.length != length || counterDeltas.length != length) { throw new AssertionError("array lengths do not match"); } for (int i = 0; i < length; ++i) { final CounterName name = CounterName.named(counterPrefix + counterNames[i]); final String kind = counterKinds[i]; final long delta = counterDeltas[i]; switch (kind) { case "sum": counterFactory.longSum(name).addValue(delta); break; case "max": counterFactory.longMax(name).addValue(delta); break; case "min": counterFactory.longMin(name).addValue(delta); break; default: throw new IllegalArgumentException("unsupported counter kind: " + kind); } } }
@Test public void testUnsupportedKind() throws Exception { String[] names = {"sum_counter"}; String[] kinds = {"sum_int"}; long[] deltas = {122}; thrown.expect(IllegalArgumentException.class); thrown.expectMessage("sum_int"); counters.importCounters(names, kinds, deltas); }
@VisibleForTesting public void validateConfigKeyUnique(Long id, String key) { ConfigDO config = configMapper.selectByKey(key); if (config == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的参数配置 if (id == null) { throw exception(CONFIG_KEY_DUPLICATE); } if (!config.getId().equals(id)) { throw exception(CONFIG_KEY_DUPLICATE); } }
@Test public void testValidateConfigKeyUnique_keyDuplicateForCreate() { // 准备参数 String key = randomString(); // mock 数据 configMapper.insert(randomConfigDO(o -> o.setConfigKey(key))); // 调用,校验异常 assertServiceException(() -> configService.validateConfigKeyUnique(null, key), CONFIG_KEY_DUPLICATE); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); final Optional<EventDefinitionDto> eventDefinition = eventDefinitionService.get(modelId.id()); if (!eventDefinition.isPresent()) { LOG.debug("Couldn't find event definition {}", entityDescriptor); return Optional.empty(); } return Optional.of(exportNativeEntity(eventDefinition.get(), entityDescriptorIds)); }
@Test @MongoDBFixtures("EventDefinitionFacadeTest.json") public void exportEntityWithoutScheduling() { final ModelId id = ModelId.of("5d4032513d2746703d1467f6"); when(jobDefinitionService.getByConfigField(eq("event_definition_id"), eq(id.id()))) .thenReturn(Optional.empty()); final EntityDescriptor descriptor = EntityDescriptor.create(id, ModelTypes.EVENT_DEFINITION_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Optional<Entity> entity = facade.exportEntity(descriptor, entityDescriptorIds); assertThat(entity).isPresent(); final EntityV1 entityV1 = (EntityV1) entity.get(); final EventDefinitionEntity eventDefinitionEntity = objectMapper.convertValue(entityV1.data(), EventDefinitionEntity.class); assertThat(eventDefinitionEntity.title().asString()).isEqualTo("title"); assertThat(eventDefinitionEntity.description().asString()).isEqualTo("description"); assertThat(eventDefinitionEntity.remediationSteps().asString()).isEqualTo(REMEDIATION_STEPS); assertThat(eventDefinitionEntity.config().type()).isEqualTo(AggregationEventProcessorConfigEntity.TYPE_NAME); assertThat(eventDefinitionEntity.isScheduled().asBoolean(ImmutableMap.of())).isFalse(); }
int updatePublisherPositionAndLimit() { int workCount = 0; if (State.ACTIVE == state) { final long producerPosition = producerPosition(); publisherPos.setOrdered(producerPosition); if (subscriberPositions.length > 0) { long minSubscriberPosition = Long.MAX_VALUE; long maxSubscriberPosition = consumerPosition; for (final ReadablePosition subscriberPosition : subscriberPositions) { final long position = subscriberPosition.getVolatile(); minSubscriberPosition = Math.min(minSubscriberPosition, position); maxSubscriberPosition = Math.max(maxSubscriberPosition, position); } if (maxSubscriberPosition > consumerPosition) { consumerPosition = maxSubscriberPosition; } final long proposedLimit = minSubscriberPosition + termWindowLength; if (proposedLimit > tripLimit) { cleanBufferTo(minSubscriberPosition); publisherLimit.setOrdered(proposedLimit); tripLimit = proposedLimit + tripGain; workCount = 1; } } else if (publisherLimit.get() > consumerPosition) { tripLimit = consumerPosition; publisherLimit.setOrdered(consumerPosition); cleanBufferTo(consumerPosition); } } return workCount; }
@Test void shouldKeepPublisherLimitZeroOnNoSubscriptionUpdate() { ipcPublication.updatePublisherPositionAndLimit(); assertThat(publisherLimit.get(), is(0L)); }
public String build(TablePath tablePath) { StringBuilder createTableSql = new StringBuilder(); createTableSql .append("CREATE TABLE ") .append(tablePath.getSchemaAndTableName("\"")) .append(" (\n"); List<String> columnSqls = columns.stream() .map(column -> CatalogUtils.getFieldIde(buildColumnSql(column), fieldIde)) .collect(Collectors.toList()); // Add primary key directly in the create table statement if (createIndex && primaryKey != null && primaryKey.getColumnNames() != null && primaryKey.getColumnNames().size() > 0) { columnSqls.add(buildPrimaryKeySql(primaryKey)); } createTableSql.append(String.join(",\n", columnSqls)); createTableSql.append("\n)"); List<String> commentSqls = columns.stream() .filter(column -> StringUtils.isNotBlank(column.getComment())) .map( column -> buildColumnCommentSql( column, tablePath.getSchemaAndTableName("\""))) .collect(Collectors.toList()); if (!commentSqls.isEmpty()) { createTableSql.append(";\n"); createTableSql.append(String.join(";\n", commentSqls)); } return createTableSql.toString(); }
@Test public void testBuild() { String dataBaseName = "test_database"; String tableName = "test_table"; TablePath tablePath = TablePath.of(dataBaseName, tableName); TableSchema tableSchema = TableSchema.builder() .column(PhysicalColumn.of("id", BasicType.LONG_TYPE, 22, false, null, "id")) .column( PhysicalColumn.of( "name", BasicType.STRING_TYPE, 128, false, null, "name")) .column( PhysicalColumn.of( "age", BasicType.INT_TYPE, (Long) null, true, null, "age")) .column( PhysicalColumn.of( "blob_v", PrimitiveByteArrayType.INSTANCE, Long.MAX_VALUE, true, null, "blob_v")) .column( PhysicalColumn.of( "createTime", LocalTimeType.LOCAL_DATE_TIME_TYPE, 3, true, null, "createTime")) .column( PhysicalColumn.of( "lastUpdateTime", LocalTimeType.LOCAL_DATE_TIME_TYPE, 3, true, null, "lastUpdateTime")) .primaryKey(PrimaryKey.of("id", Lists.newArrayList("id"))) .constraintKey( Arrays.asList( ConstraintKey.of( ConstraintKey.ConstraintType.INDEX_KEY, "name", Lists.newArrayList( ConstraintKey.ConstraintKeyColumn.of( "name", null))), ConstraintKey.of( ConstraintKey.ConstraintType.INDEX_KEY, "blob_v", Lists.newArrayList( ConstraintKey.ConstraintKeyColumn.of( "blob_v", null))))) .build(); CatalogTable catalogTable = CatalogTable.of( TableIdentifier.of("test_catalog", dataBaseName, tableName), tableSchema, new HashMap<>(), new ArrayList<>(), "User table"); XuguCreateTableSqlBuilder xuguCreateTableSqlBuilder = new XuguCreateTableSqlBuilder(catalogTable, true); String createTableSql = xuguCreateTableSqlBuilder.build(tablePath); // create table sql is change; The old unit tests are no longer applicable String expect = "CREATE TABLE \"test_table\" (\n" + "\"id\" BIGINT NOT NULL,\n" + "\"name\" VARCHAR(128) NOT NULL,\n" + "\"age\" INTEGER,\n" + "\"blob_v\" BLOB,\n" + "\"createTime\" TIMESTAMP,\n" + "\"lastUpdateTime\" TIMESTAMP,\n" + "CONSTRAINT id_88a3 PRIMARY KEY (\"id\")\n" + ");\n" + "COMMENT ON COLUMN \"test_table\".\"id\" IS 'id';\n" + "COMMENT ON COLUMN \"test_table\".\"name\" IS 'name';\n" + "COMMENT ON COLUMN \"test_table\".\"age\" IS 'age';\n" + "COMMENT ON COLUMN \"test_table\".\"blob_v\" IS 'blob_v';\n" + "COMMENT ON COLUMN \"test_table\".\"createTime\" IS 'createTime';\n" + "COMMENT ON COLUMN \"test_table\".\"lastUpdateTime\" IS 'lastUpdateTime'"; // replace "CONSTRAINT id_xxxx" because it's dynamically generated(random) String regex = "id_\\w+"; String replacedStr1 = createTableSql.replaceAll(regex, "id_"); String replacedStr2 = expect.replaceAll(regex, "id_"); CONSOLE.println(replacedStr2); Assertions.assertEquals(replacedStr2, replacedStr1); // skip index XuguCreateTableSqlBuilder xuguCreateTableSqlBuilderSkipIndex = new XuguCreateTableSqlBuilder(catalogTable, false); String createTableSqlSkipIndex = xuguCreateTableSqlBuilderSkipIndex.build(tablePath); String expectSkipIndex = "CREATE TABLE \"test_table\" (\n" + "\"id\" BIGINT NOT NULL,\n" + "\"name\" VARCHAR(128) NOT NULL,\n" + "\"age\" INTEGER,\n" + "\"blob_v\" BLOB,\n" + "\"createTime\" TIMESTAMP,\n" + "\"lastUpdateTime\" TIMESTAMP\n" + ");\n" + "COMMENT ON COLUMN \"test_table\".\"id\" IS 'id';\n" + "COMMENT ON COLUMN \"test_table\".\"name\" IS 'name';\n" + "COMMENT ON COLUMN \"test_table\".\"age\" IS 'age';\n" + "COMMENT ON COLUMN \"test_table\".\"blob_v\" IS 'blob_v';\n" + "COMMENT ON COLUMN \"test_table\".\"createTime\" IS 'createTime';\n" + "COMMENT ON COLUMN \"test_table\".\"lastUpdateTime\" IS 'lastUpdateTime'"; CONSOLE.println(expectSkipIndex); Assertions.assertEquals(expectSkipIndex, createTableSqlSkipIndex); }
@Override public T getValue() { if (shouldLoad()) { this.value = loadValue(); } return value; }
@Test public void reloadsTheCachedValueAfterTheGivenPeriod() throws Exception { assertThat(gauge.getValue()) .isEqualTo(1); Thread.sleep(150); assertThat(gauge.getValue()) .isEqualTo(2); assertThat(gauge.getValue()) .isEqualTo(2); }
@SuppressWarnings("OptionalGetWithoutIsPresent") public static StatementExecutorResponse execute( final ConfiguredStatement<ListConnectors> configuredStatement, final SessionProperties sessionProperties, final KsqlExecutionContext ksqlExecutionContext, final ServiceContext serviceContext ) { final ConnectClient connectClient = serviceContext.getConnectClient(); final ConnectResponse<List<String>> connectors = serviceContext.getConnectClient().connectors(); if (connectors.error().isPresent()) { final String errorMsg = "Failed to list connectors: " + connectors.error().get(); throw new KsqlRestException(EndpointResponse.create() .status(connectors.httpCode()) .entity(new KsqlErrorMessage(Errors.toErrorCode(connectors.httpCode()), errorMsg)) .build() ); } final List<SimpleConnectorInfo> infos = new ArrayList<>(); final List<KsqlWarning> warnings = new ArrayList<>(); final Scope scope = configuredStatement.getStatement().getScope(); for (final String name : connectors.datum().get()) { final ConnectResponse<ConnectorInfo> response = connectClient.describe(name); if (response.datum().filter(i -> inScope(i.type(), scope)).isPresent()) { final ConnectResponse<ConnectorStateInfo> status = connectClient.status(name); infos.add(fromConnectorInfoResponse(name, response, status)); } else if (response.error().isPresent()) { if (scope == Scope.ALL) { infos.add(new SimpleConnectorInfo(name, ConnectorType.UNKNOWN, null, null)); } warnings.add( new KsqlWarning( String.format( "Could not describe connector %s: %s", name, response.error().get()))); } } return StatementExecutorResponse.handled(Optional.of( new ConnectorList( configuredStatement.getMaskedStatementText(), warnings, infos) )); }
@Test public void shouldFilterNonMatchingConnectors() { // Given: when(connectClient.connectors()) .thenReturn(ConnectResponse.success(ImmutableList.of("connector", "connector2"), HttpStatus.SC_OK)); final ConfiguredStatement<ListConnectors> statement = ConfiguredStatement .of(PreparedStatement.of("", new ListConnectors(Optional.empty(), Scope.SINK)), SessionConfig.of(new KsqlConfig(ImmutableMap.of()), ImmutableMap.of()) ); // When: final Optional<KsqlEntity> entity = ListConnectorsExecutor .execute(statement, mock(SessionProperties.class), engine, serviceContext).getEntity(); // Then: assertThat("expected response!", entity.isPresent()); final ConnectorList connectorList = (ConnectorList) entity.get(); assertThat(connectorList, is(new ConnectorList( "", ImmutableList.of(), ImmutableList.of() ))); }
public void sample(double x) { sample(x, defaultPosition); }
@Test final void testSampleDouble() throws InterruptedException { final String metricName = "unitTestGauge"; Gauge g = receiver.declareGauge(metricName); g.sample(1.0d); Bucket b = receiver.getSnapshot(); final Map<String, List<Entry<Point, UntypedMetric>>> valuesByMetricName = b.getValuesByMetricName(); assertEquals(1, valuesByMetricName.size()); List<Entry<Point, UntypedMetric>> x = valuesByMetricName.get(metricName); assertEquals(1, x.size()); assertEquals(Point.emptyPoint(), x.get(0).getKey()); assertEquals(1L, x.get(0).getValue().getCount()); assertEquals(1.0d, x.get(0).getValue().getLast(), 0.0d); }
public String build() { if (columnDefs.isEmpty()) { throw new IllegalStateException("No column has been defined"); } StringBuilder sql = new StringBuilder().append("ALTER TABLE ").append(tableName).append(" "); switch (dialect.getId()) { case PostgreSql.ID: addColumns(sql, "ADD COLUMN "); break; case MsSql.ID: sql.append("ADD "); addColumns(sql, ""); break; default: sql.append("ADD ("); addColumns(sql, ""); sql.append(")"); } return sql.toString(); }
@Test public void add_columns_on_mssql() { assertThat(createSampleBuilder(new MsSql()).build()) .isEqualTo("ALTER TABLE issues ADD date_in_ms BIGINT NULL, name NVARCHAR (10) NOT NULL, col_with_default BIT DEFAULT 0 NOT NULL, varchar_col_with_default NVARCHAR (3) DEFAULT 'foo' NOT NULL"); }
public static String generateNameFromStagePayload(ExecutableStagePayload stagePayload) { StringBuilder sb = new StringBuilder(); RunnerApi.Components components = stagePayload.getComponents(); final int transformsCount = stagePayload.getTransformsCount(); sb.append("[").append(transformsCount).append("]"); Collection<String> names = new ArrayList<>(); for (int i = 0; i < transformsCount; i++) { String name = components.getTransformsOrThrow(stagePayload.getTransforms(i)).getUniqueName(); // Java: Remove the 'ParMultiDo(Anonymous)' suffix which just makes the name longer name = name.replaceFirst("/ParMultiDo\\(Anonymous\\)$", ""); names.add(name); } sb.append(generateNameFromTransformNames(names, true)); return sb.toString(); }
@Test /* Test for generating readable operator names during translation. */ public void testOperatorNameGeneration() throws Exception { Pipeline p = Pipeline.create(); p.apply(Impulse.create()) // Anonymous ParDo .apply( ParDo.of( new DoFn<byte[], String>() { @ProcessElement public void processElement( ProcessContext processContext, OutputReceiver<String> outputReceiver) {} })) // Name ParDo .apply( "MyName", ParDo.of( new DoFn<String, Integer>() { @ProcessElement public void processElement( ProcessContext processContext, OutputReceiver<Integer> outputReceiver) {} })) .apply( // Avoid nested Anonymous ParDo "Composite/Nested/ParDo", ParDo.of( new DoFn<Integer, Integer>() { @ProcessElement public void processElement( ProcessContext processContext, OutputReceiver<Integer> outputReceiver) {} })); ExecutableStage firstEnvStage = GreedyPipelineFuser.fuse(PipelineTranslation.toProto(p)).getFusedStages().stream() .findFirst() .get(); RunnerApi.ExecutableStagePayload basePayload = RunnerApi.ExecutableStagePayload.parseFrom( firstEnvStage.toPTransform("foo").getSpec().getPayload()); String executableStageName = ExecutableStageTranslation.generateNameFromStagePayload(basePayload); assertThat(executableStageName, is("[3]{ParDo(Anonymous), MyName, Composite}")); }
static public boolean createMissingParentDirectories(File file) { File parent = file.getParentFile(); if (parent == null) { // Parent directory not specified, therefore it's a request to // create nothing. Done! ;) return true; } // File.mkdirs() creates the parent directories only if they don't // already exist; and it's okay if they do. parent.mkdirs(); return parent.exists(); }
@Test public void checkDeeperParentCreationInquiryAndSubsequentCreation() { File file = new File(CoreTestConstants.OUTPUT_DIR_PREFIX + "/fu" + diff + "/bla/testing.txt"); // these will be deleted later cleanupList.add(file); cleanupList.add(file.getParentFile()); cleanupList.add(file.getParentFile().getParentFile()); assertFalse(file.getParentFile().exists()); assertTrue(FileUtil.createMissingParentDirectories(file)); assertTrue(file.getParentFile().exists()); }
protected Authorization parseAuthLine(String line) throws ParseException { String[] tokens = line.split("\\s+"); String keyword = tokens[0].toLowerCase(); switch (keyword) { case "topic": return createAuthorization(line, tokens); case "user": m_parsingUsersSpecificSection = true; m_currentUser = tokens[1]; m_parsingPatternSpecificSection = false; return null; case "pattern": m_parsingUsersSpecificSection = false; m_currentUser = ""; m_parsingPatternSpecificSection = true; return createAuthorization(line, tokens); default: throw new ParseException(String.format("invalid line definition found %s", line), 1); } }
@Test public void testParseAuthLineValid_write() throws ParseException { Authorization authorization = authorizator.parseAuthLine("topic write /weather/italy/anemometer"); // Verify assertEquals(W_ANEMOMETER, authorization); }
@Override public Cursor<Tuple> zScan(byte[] key, ScanOptions options) { return new KeyBoundCursor<Tuple>(key, 0, options) { private RedisClient client; @Override protected ScanIteration<Tuple> doScan(byte[] key, long cursorId, ScanOptions options) { if (isQueueing() || isPipelined()) { throw new UnsupportedOperationException("'ZSCAN' cannot be called in pipeline / transaction mode."); } List<Object> args = new ArrayList<Object>(); args.add(key); args.add(Long.toUnsignedString(cursorId)); if (options.getPattern() != null) { args.add("MATCH"); args.add(options.getPattern()); } if (options.getCount() != null) { args.add("COUNT"); args.add(options.getCount()); } RFuture<ListScanResult<Tuple>> f = executorService.readAsync(client, key, ByteArrayCodec.INSTANCE, ZSCAN, args.toArray()); ListScanResult<Tuple> res = syncFuture(f); client = res.getRedisClient(); return new ScanIteration<Tuple>(Long.parseUnsignedLong(res.getPos()), res.getValues()); } }.open(); }
@Test public void testZScan() { connection.zAdd("key".getBytes(), 1, "value1".getBytes()); connection.zAdd("key".getBytes(), 2, "value2".getBytes()); Cursor<RedisZSetCommands.Tuple> t = connection.zScan("key".getBytes(), ScanOptions.scanOptions().build()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value1".getBytes()); assertThat(t.hasNext()).isTrue(); assertThat(t.next().getValue()).isEqualTo("value2".getBytes()); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowMaskRulesStatement sqlStatement, final ContextManager contextManager) { return rule.getConfiguration().getTables().stream().filter(each -> null == sqlStatement.getTableName() || each.getName().equals(sqlStatement.getTableName())) .map(each -> buildColumnData(each, rule.getConfiguration().getMaskAlgorithms())).flatMap(Collection::stream).collect(Collectors.toList()); }
@Test void assertGetRowData() throws SQLException { engine.executeQuery(); Collection<LocalDataQueryResultRow> actual = engine.getRows(); assertThat(actual.size(), is(1)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); LocalDataQueryResultRow row = iterator.next(); assertThat(row.getCell(1), is("t_mask")); assertThat(row.getCell(2), is("user_id")); assertThat(row.getCell(3), is("md5")); assertThat(row.getCell(4), is("")); }
public void tick() { // The main loop does two primary things: 1) drive the group membership protocol, responding to rebalance events // as they occur, and 2) handle external requests targeted at the leader. All the "real" work of the herder is // performed in this thread, which keeps synchronization straightforward at the cost of some operations possibly // blocking up this thread (especially those in callbacks due to rebalance events). try { // if we failed to read to end of log before, we need to make sure the issue was resolved before joining group // Joining and immediately leaving for failure to read configs is exceedingly impolite if (!canReadConfigs) { if (readConfigToEnd(workerSyncTimeoutMs)) { canReadConfigs = true; } else { return; // Safe to return and tick immediately because readConfigToEnd will do the backoff for us } } log.debug("Ensuring group membership is still active"); String stageDescription = "ensuring membership in the cluster"; member.ensureActive(() -> new TickThreadStage(stageDescription)); completeTickThreadStage(); // Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin if (!handleRebalanceCompleted()) return; } catch (WakeupException e) { // May be due to a request from another thread, or might be stopping. If the latter, we need to check the // flag immediately. If the former, we need to re-run the ensureActive call since we can't handle requests // unless we're in the group. log.trace("Woken up while ensure group membership is still active"); return; } if (fencedFromConfigTopic) { if (isLeader()) { // We were accidentally fenced out, possibly by a zombie leader try { log.debug("Reclaiming write privileges for config topic after being fenced out"); try (TickThreadStage stage = new TickThreadStage("reclaiming write privileges for the config topic")) { configBackingStore.claimWritePrivileges(); } fencedFromConfigTopic = false; log.debug("Successfully reclaimed write privileges for config topic after being fenced out"); } catch (Exception e) { log.warn("Unable to claim write privileges for config topic. Will backoff and possibly retry if still the leader", e); backoff(CONFIG_TOPIC_WRITE_PRIVILEGES_BACKOFF_MS); return; } } else { log.trace("Relinquished write privileges for config topic after being fenced out, since worker is no longer the leader of the cluster"); // We were meant to be fenced out because we fell out of the group and a new leader was elected fencedFromConfigTopic = false; } } long now = time.milliseconds(); if (checkForKeyRotation(now)) { log.debug("Distributing new session key"); keyExpiration = Long.MAX_VALUE; try { SessionKey newSessionKey = new SessionKey(keyGenerator.generateKey(), now); writeToConfigTopicAsLeader( "writing a new session key to the config topic", () -> configBackingStore.putSessionKey(newSessionKey) ); } catch (Exception e) { log.info("Failed to write new session key to config topic; forcing a read to the end of the config topic before possibly retrying", e); canReadConfigs = false; return; } } // Process any external requests // TODO: Some of these can be performed concurrently or even optimized away entirely. // For example, if three different connectors are slated to be restarted, it's fine to // restart all three at the same time instead. // Another example: if multiple configurations are submitted for the same connector, // the only one that actually has to be written to the config topic is the // most-recently one. Long scheduledTick = null; while (true) { final DistributedHerderRequest next = peekWithoutException(); if (next == null) { break; } else if (now >= next.at) { currentRequest = requests.pollFirst(); } else { scheduledTick = next.at; break; } runRequest(next.action(), next.callback()); } // Process all pending connector restart requests processRestartRequests(); if (scheduledRebalance < Long.MAX_VALUE) { scheduledTick = scheduledTick != null ? Math.min(scheduledTick, scheduledRebalance) : scheduledRebalance; rebalanceResolved = false; log.debug("Scheduled rebalance at: {} (now: {} scheduledTick: {}) ", scheduledRebalance, now, scheduledTick); } if (isLeader() && internalRequestValidationEnabled() && keyExpiration < Long.MAX_VALUE) { scheduledTick = scheduledTick != null ? Math.min(scheduledTick, keyExpiration) : keyExpiration; log.debug("Scheduled next key rotation at: {} (now: {} scheduledTick: {}) ", keyExpiration, now, scheduledTick); } // Process any configuration updates AtomicReference<Set<String>> connectorConfigUpdatesCopy = new AtomicReference<>(); AtomicReference<Set<String>> connectorTargetStateChangesCopy = new AtomicReference<>(); AtomicReference<Set<ConnectorTaskId>> taskConfigUpdatesCopy = new AtomicReference<>(); boolean shouldReturn; if (member.currentProtocolVersion() == CONNECT_PROTOCOL_V0) { shouldReturn = updateConfigsWithEager(connectorConfigUpdatesCopy, connectorTargetStateChangesCopy); // With eager protocol we should return immediately if needsReconfigRebalance has // been set to retain the old workflow if (shouldReturn) { return; } if (connectorConfigUpdatesCopy.get() != null) { processConnectorConfigUpdates(connectorConfigUpdatesCopy.get()); } if (connectorTargetStateChangesCopy.get() != null) { processTargetStateChanges(connectorTargetStateChangesCopy.get()); } } else { shouldReturn = updateConfigsWithIncrementalCooperative(connectorConfigUpdatesCopy, connectorTargetStateChangesCopy, taskConfigUpdatesCopy); if (connectorConfigUpdatesCopy.get() != null) { processConnectorConfigUpdates(connectorConfigUpdatesCopy.get()); } if (connectorTargetStateChangesCopy.get() != null) { processTargetStateChanges(connectorTargetStateChangesCopy.get()); } if (taskConfigUpdatesCopy.get() != null) { processTaskConfigUpdatesWithIncrementalCooperative(taskConfigUpdatesCopy.get()); } if (shouldReturn) { return; } } // Let the group take any actions it needs to try { long nextRequestTimeoutMs = scheduledTick != null ? Math.max(scheduledTick - time.milliseconds(), 0L) : Long.MAX_VALUE; log.trace("Polling for group activity; will wait for {}ms or until poll is interrupted by " + "either config backing store updates or a new external request", nextRequestTimeoutMs); String pollDurationDescription = scheduledTick != null ? "for up to " + nextRequestTimeoutMs + "ms or " : ""; String stageDescription = "polling the group coordinator " + pollDurationDescription + "until interrupted"; member.poll(nextRequestTimeoutMs, () -> new TickThreadStage(stageDescription)); completeTickThreadStage(); // Ensure we're in a good state in our group. If not restart and everything should be setup to rejoin handleRebalanceCompleted(); } catch (WakeupException e) { // FIXME should not be WakeupException log.trace("Woken up while polling for group activity"); // Ignore. Just indicates we need to check the exit flag, for requested actions, etc. } }
@Test public void testFailedToWriteSessionKey() { // First tick -- after joining the group, we try to write a new // session key to the config topic, and fail when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); doThrow(new ConnectException("Oh no!")).when(configBackingStore).putSessionKey(any(SessionKey.class)); herder.tick(); // Second tick -- we read to the end of the config topic first, // then ensure we're still active in the group // then try a second time to write a new session key, // then finally begin polling for group activity expectConfigRefreshAndSnapshot(SNAPSHOT); doNothing().when(configBackingStore).putSessionKey(any(SessionKey.class)); herder.tick(); verify(member, times(2)).ensureActive(any()); verify(member, times(1)).poll(anyLong(), any()); verify(configBackingStore, times(2)).putSessionKey(any(SessionKey.class)); }
@Override public int size() { return get(sizeAsync()); }
@Test public void testSize() { Set<Integer> set = redisson.getSet("set"); set.add(1); set.add(2); set.add(3); set.add(3); set.add(4); set.add(5); set.add(5); Assertions.assertEquals(5, set.size()); }
@Override public String create(UserDto user) { UserDto userDto = requireNonNull(user, "User cannot be null"); return hash(requireNonNull(emptyToNull(userDto.getEmail()), "Email cannot be null")); }
@Test public void fail_with_NP_when_email_is_null() { assertThatThrownBy(() -> underTest.create(UserTesting.newUserDto("john", "John", null))) .isInstanceOf(NullPointerException.class) .hasMessage("Email cannot be null"); }
public static Optional<int[]> smallestKey(@Nullable Set<ImmutableBitSet> upsertKeys) { if (null == upsertKeys || upsertKeys.isEmpty()) { return Optional.empty(); } return upsertKeys.stream() .map(ImmutableBitSet::toArray) .reduce( (k1, k2) -> { if (k1.length < k2.length) { return k1; } if (k1.length == k2.length) { for (int index = 0; index < k1.length; index++) { if (k1[index] < k2[index]) { return k1; } } } return k2; }); }
@Test void testSmallestKey() { assertThat(UpsertKeyUtil.getSmallestKey(null)).isEqualTo(emptyKey); assertThat(UpsertKeyUtil.getSmallestKey(new HashSet<>())).isEqualTo(emptyKey); ImmutableBitSet smallestKey = ImmutableBitSet.of(0, 1); ImmutableBitSet middleKey = ImmutableBitSet.of(0, 2); ImmutableBitSet longKey = ImmutableBitSet.of(0, 1, 2); Set<ImmutableBitSet> upsertKeys = new HashSet<>(); upsertKeys.add(smallestKey); upsertKeys.add(middleKey); assertThat(UpsertKeyUtil.getSmallestKey(upsertKeys)).isEqualTo(smallestKey.toArray()); upsertKeys.clear(); upsertKeys.add(smallestKey); upsertKeys.add(longKey); assertThat(UpsertKeyUtil.getSmallestKey(upsertKeys)).isEqualTo(smallestKey.toArray()); }
@Override public boolean readBoolean() { return readByte() != 0; }
@Test public void testReadBooleanAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().readBoolean(); } }); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ServerConfigAbility that = (ServerConfigAbility) o; return supportRemoteMetrics == that.supportRemoteMetrics; }
@Test void testEquals() { ServerConfigAbility ability = new ServerConfigAbility(); ability.setSupportRemoteMetrics(true); assertEquals(ability, ability); assertNotEquals(null, ability); assertNotEquals(ability, new ClientConfigAbility()); ServerConfigAbility newOne = new ServerConfigAbility(); assertNotEquals(ability, newOne); newOne.setSupportRemoteMetrics(true); assertEquals(ability, newOne); }
public static void main(String[] args) { // initialize creatures list var creatures = List.of( new Goblin(), new Octopus(), new Dragon(), new Shark(), new Troll(), new KillerBee() ); // so-called "hard-coded" specification LOGGER.info("Demonstrating hard-coded specification :"); // find all walking creatures LOGGER.info("Find all walking creatures"); print(creatures, new MovementSelector(Movement.WALKING)); // find all dark creatures LOGGER.info("Find all dark creatures"); print(creatures, new ColorSelector(Color.DARK)); LOGGER.info("\n"); // so-called "parameterized" specification LOGGER.info("Demonstrating parameterized specification :"); // find all creatures heavier than 500kg LOGGER.info("Find all creatures heavier than 600kg"); print(creatures, new MassGreaterThanSelector(600.0)); // find all creatures heavier than 500kg LOGGER.info("Find all creatures lighter than or weighing exactly 500kg"); print(creatures, new MassSmallerThanOrEqSelector(500.0)); LOGGER.info("\n"); // so-called "composite" specification LOGGER.info("Demonstrating composite specification :"); // find all red and flying creatures LOGGER.info("Find all red and flying creatures"); var redAndFlying = new ColorSelector(Color.RED).and(new MovementSelector(Movement.FLYING)); print(creatures, redAndFlying); // find all creatures dark or red, non-swimming, and heavier than or equal to 400kg LOGGER.info("Find all scary creatures"); var scaryCreaturesSelector = new ColorSelector(Color.DARK) .or(new ColorSelector(Color.RED)).and(new MovementSelector(Movement.SWIMMING).not()) .and(new MassGreaterThanSelector(400.0).or(new MassEqualSelector(400.0))); print(creatures, scaryCreaturesSelector); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
static String resolveRegion(AwsConfig awsConfig, AwsMetadataApi metadataApi, Environment environment) { if (!isNullOrEmptyAfterTrim(awsConfig.getRegion())) { return awsConfig.getRegion(); } if (environment.isRunningOnEcs()) { return regionFrom(metadataApi.availabilityZoneEcs()); } return regionFrom(metadataApi.availabilityZoneEc2()); }
@Test public void resolveRegionEcsMetadata() { // given AwsConfig awsConfig = AwsConfig.builder().build(); AwsMetadataApi awsMetadataApi = mock(AwsMetadataApi.class); Environment environment = mock(Environment.class); given(environment.isRunningOnEcs()).willReturn(true); given(awsMetadataApi.availabilityZoneEcs()).willReturn("us-east-1a"); // when String result = resolveRegion(awsConfig, awsMetadataApi, environment); // then assertEquals("us-east-1", result); }
@Override public QualityGate.Condition apply(Condition input) { String metricKey = input.getMetric().getKey(); ConditionStatus conditionStatus = statusPerConditions.get(input); checkState(conditionStatus != null, "Missing ConditionStatus for condition on metric key %s", metricKey); return builder .setStatus(convert(conditionStatus.getStatus())) .setMetricKey(metricKey) .setOperator(convert(input.getOperator())) .setErrorThreshold(input.getErrorThreshold()) .setValue(conditionStatus.getValue()) .build(); }
@Test public void apply_throws_ISE_if_there_is_no_ConditionStatus_for_Condition_argument() { ConditionToCondition underTest = new ConditionToCondition(NO_STATUS_PER_CONDITIONS); assertThatThrownBy(() -> underTest.apply(SOME_CONDITION)) .isInstanceOf(IllegalStateException.class) .hasMessage("Missing ConditionStatus for condition on metric key " + METRIC_KEY); }
public static void main(String[] args) throws Exception { AbiTypesGenerator abiTypesGenerator = new AbiTypesGenerator(); if (args.length == 1) { abiTypesGenerator.generate(args[0]); } else { abiTypesGenerator.generate(System.getProperty("user.dir") + "/abi/src/main/java/"); } }
@Test public void testGeneration() throws Exception { AbiTypesGenerator.main(new String[] {tempDirPath}); }
public long getLong(@NotNull final String key) throws InvalidSettingException { try { return Long.parseLong(getString(key)); } catch (NumberFormatException ex) { throw new InvalidSettingException("Could not convert property '" + key + "' to a long.", ex); } }
@Test public void testGetLong() throws InvalidSettingException { String key = "SomeNumber"; long expResult = 300L; getSettings().setString(key, "300"); long result = getSettings().getLong(key); Assert.assertEquals(expResult, result); }
@Override public boolean recycle() { clear(); this.capacity = 0; return recyclers.recycle(this, handle); }
@Test public void testRecycle() { final RecyclableByteBufferList object = RecyclableByteBufferList.newInstance(); object.recycle(); final RecyclableByteBufferList object2 = RecyclableByteBufferList.newInstance(); Assert.assertSame(object, object2); object2.recycle(); }
public synchronized static QueueMetrics forQueue(String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { return forQueue(DefaultMetricsSystem.instance(), queueName, parent, enableUserMetrics, conf); }
@Test public void testCollectAllMetrics() { String queueName = "single"; QueueMetrics.forQueue(ms, queueName, null, false, conf); MetricsSource queueSource = queueSource(ms, queueName); AppMetricsChecker.create() .checkAgainst(queueSource, true); try { // do not collect all metrics AppMetricsChecker.create() .checkAgainst(queueSource, false); Assert.fail(); } catch (AssertionError e) { Assert.assertTrue( e.getMessage().contains("Expected exactly one metric for name ")); } // collect all metrics AppMetricsChecker.create() .checkAgainst(queueSource, true); }
@Override public void profileAppend(String property, String value) { }
@Test public void testProfileAppend() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); Set<String> values = new HashSet<>(); values.add("12"); values.add("123"); mSensorsAPI.profileAppend("abcde", values); }
@Override public List<List<ColumnStatistics>> getPartitionColumnStatistics(String catName, String dbName, String tableName, List<String> partNames, List<String> colNames) throws MetaException, NoSuchObjectException { // Note: this will get stats without verifying ACID. boolean committed = false; Query query = null; List<List<ColumnStatistics>> result = new ArrayList<>(); try { openTransaction(); query = pm.newQuery(MPartitionColumnStatistics.class); query.setFilter("partition.table.tableName == t1 && partition.table.database.name == t2 && partition.table.database.catalogName == t3"); query.declareParameters("java.lang.String t1, java.lang.String t2, java.lang.String t3"); query.setResult("DISTINCT engine"); Collection names = (Collection) query.execute(tableName, dbName, catName); List<String> engines = new ArrayList<>(); for (Iterator i = names.iterator(); i.hasNext();) { engines.add((String) i.next()); } for (String e : engines) { List<ColumnStatistics> cs = getPartitionColumnStatisticsInternal( catName, dbName, tableName, partNames, colNames, e, true, true); if (cs != null) { result.add(cs); } } committed = commitTransaction(); return result; } finally { LOG.debug("Done executing getTableColumnStatistics with status : {}", committed); rollbackAndCleanup(committed, query); } }
@Test public void testGetPartitionStatistics() throws Exception { createPartitionedTable(true, true); List<List<ColumnStatistics>> stat; try (AutoCloseable c = deadline()) { stat = objectStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, Arrays.asList("test_part_col=a0", "test_part_col=a1", "test_part_col=a2"), Collections.singletonList("test_part_col")); } Assert.assertEquals(1, stat.size()); Assert.assertEquals(3, stat.get(0).size()); Assert.assertEquals(ENGINE, stat.get(0).get(0).getEngine()); Assert.assertEquals(1, stat.get(0).get(0).getStatsObj().size()); ColumnStatisticsData computedStats = stat.get(0).get(0).getStatsObj().get(0).getStatsData(); ColumnStatisticsData expectedStats = new ColStatsBuilder<>(long.class).numNulls(1).numDVs(2) .low(3L).high(4L).hll(3, 4).kll(3, 4).build(); assertEqualStatistics(expectedStats, computedStats); }
@Override public <T> ResponseFuture<T> sendRequest(Request<T> request, RequestContext requestContext) { doEvaluateDisruptContext(request, requestContext); return _client.sendRequest(request, requestContext); }
@Test public void testSendRequest8() { when(_builder.build()).thenReturn(_request); when(_controller.getDisruptContext(any(String.class), any(ResourceMethod.class))).thenReturn(_disrupt); _client.sendRequest(_builder, _callback); verify(_underlying, times(1)).sendRequest(eq(_request), any(RequestContext.class), eq(_callback)); }
public Optional<Measure> toMeasure(@Nullable LiveMeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getDataAsString(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(value, data); case LONG: return toLongMeasure(value, data); case DOUBLE: return toDoubleMeasure(value, data); case BOOLEAN: return toBooleanMeasure(value, data); case STRING: return toStringMeasure(data); case LEVEL: return toLevelMeasure(data); case NO_VALUE: return toNoValueMeasure(); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_int_part_of_value_in_dto_for_Int_Metric() { Optional<Measure> measure = underTest.toMeasure(new LiveMeasureDto().setValue(1.5d), SOME_INT_METRIC); assertThat(measure).isPresent(); assertThat(measure.get().getValueType()).isEqualTo(Measure.ValueType.INT); assertThat(measure.get().getIntValue()).isOne(); }
public String onCancelOption() { return hasCancelTask() ? getTask().getTypeForDisplay() : ""; }
@Test public void shouldReturnTheOnCancelOptionBasedOnWhatTheOnCancelTaskIs() { assertThat(new OnCancelConfig().onCancelOption(), is("")); assertThat(new OnCancelConfig(new ExecTask()).onCancelOption(), is("Custom Command")); assertThat(new OnCancelConfig(new AntTask()).onCancelOption(), is("Ant")); assertThat(new OnCancelConfig(new RakeTask()).onCancelOption(), is("Rake")); }
@ApiOperation(value = "Delete a member from a group", tags = { "Groups" }, code = 204) @ApiResponses(value = { @ApiResponse(code = 204, message = "Indicates the group was found and the member has been deleted. The response body is left empty intentionally."), @ApiResponse(code = 404, message = "Indicates the requested group was not found or that the user is not a member of the group. The status description contains additional information about the error.") }) @DeleteMapping("/identity/groups/{groupId}/members/{userId}") @ResponseStatus(HttpStatus.NO_CONTENT) public void deleteMembership(@ApiParam(name = "groupId") @PathVariable("groupId") String groupId, @ApiParam(name = "userId") @PathVariable("userId") String userId) { Group group = getGroupFromRequest(groupId); // Check if user is not a member of group since API does not return typed exception if (identityService.createUserQuery().memberOfGroup(group.getId()).userId(userId).count() != 1) { throw new FlowableObjectNotFoundException("User '" + userId + "' is not part of group '" + group.getId() + "'.", null); } identityService.deleteMembership(userId, group.getId()); }
@Test public void testDeleteMembership() throws Exception { try { Group testGroup = identityService.newGroup("testgroup"); testGroup.setName("Test group"); testGroup.setType("Test type"); identityService.saveGroup(testGroup); User testUser = identityService.newUser("testuser"); identityService.saveUser(testUser); identityService.createMembership("testuser", "testgroup"); HttpDelete httpDelete = new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_GROUP_MEMBERSHIP, "testgroup", "testuser")); CloseableHttpResponse response = executeRequest(httpDelete, HttpStatus.SC_NO_CONTENT); closeResponse(response); // Check if membership is actually deleted assertThat(identityService.createUserQuery().memberOfGroup("testgroup").singleResult()).isNull(); } finally { try { identityService.deleteGroup("testgroup"); } catch (Throwable ignore) { // Ignore, since the group may not have been created in the test // or already deleted } try { identityService.deleteUser("testuser"); } catch (Throwable ignore) { // Ignore, since the group may not have been created in the test // or already deleted } } }
@Override public double calculate(double amount) { return amount * TAX_PERCENTAGE / 100.0; }
@Test void testTaxCalculation() { target = new ForeignTaxCalculator(); var tax = target.calculate(100.0); Assertions.assertEquals(tax, 60.0); }
@Override public T build(ConfigurationSourceProvider provider, String path) throws IOException, ConfigurationException { try (InputStream input = provider.open(requireNonNull(path))) { final JsonNode node = mapper.readTree(createParser(input)); if (node == null) { throw ConfigurationParsingException .builder("Configuration at " + path + " must not be empty") .build(path); } return build(node, path); } catch (JsonParseException e) { throw ConfigurationParsingException .builder("Malformed " + formatName) .setCause(e) .setLocation(e.getLocation()) .setDetail(e.getMessage()) .build(path); } }
@Test void throwsAnExceptionOnMalformedFiles() { assertThatExceptionOfType(ConfigurationParsingException.class) .isThrownBy(() -> factory.build(configurationSourceProvider, malformedFile)) .withMessageContaining(malformedFileError); }
public Optional<ShardingTable> findShardingTableByActualTable(final String actualTableName) { for (ShardingTable each : shardingTables.values()) { if (each.isExisted(actualTableName)) { return Optional.of(each); } } return Optional.empty(); }
@Test void assertNotFindTableRuleByActualTable() { assertFalse(createMaximumShardingRule().findShardingTableByActualTable("table_3").isPresent()); }
public static List<CharSequence> unescapeCsvFields(CharSequence value) { List<CharSequence> unescaped = new ArrayList<CharSequence>(2); StringBuilder current = InternalThreadLocalMap.get().stringBuilder(); boolean quoted = false; int last = value.length() - 1; for (int i = 0; i <= last; i++) { char c = value.charAt(i); if (quoted) { switch (c) { case DOUBLE_QUOTE: if (i == last) { // Add the last field and return unescaped.add(current.toString()); return unescaped; } char next = value.charAt(++i); if (next == DOUBLE_QUOTE) { // 2 double-quotes should be unescaped to one current.append(DOUBLE_QUOTE); break; } if (next == COMMA) { // This is the end of a field. Let's start to parse the next field. quoted = false; unescaped.add(current.toString()); current.setLength(0); break; } // double-quote followed by other character is invalid throw newInvalidEscapedCsvFieldException(value, i - 1); default: current.append(c); } } else { switch (c) { case COMMA: // Start to parse the next field unescaped.add(current.toString()); current.setLength(0); break; case DOUBLE_QUOTE: if (current.length() == 0) { quoted = true; break; } // double-quote appears without being enclosed with double-quotes // fall through case LINE_FEED: // fall through case CARRIAGE_RETURN: // special characters appears without being enclosed with double-quotes throw newInvalidEscapedCsvFieldException(value, i); default: current.append(c); } } } if (quoted) { throw newInvalidEscapedCsvFieldException(value, last); } unescaped.add(current.toString()); return unescaped; }
@Test public void unescapeCsvFieldsWithQuote3() { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { unescapeCsvFields("a\"b,a"); } }); }
public static Identifier parse(String stringValue) { return parse(stringValue, -1); }
@Test(expected = IllegalArgumentException.class) public void testParseIntegerWayTooBig() { Identifier.parse("3133742"); }
@InvokeOnHeader(Web3jConstants.SHH_ADD_TO_GROUP) void shhAddToGroup(Message message) throws IOException { String identityAddress = message.getHeader(Web3jConstants.ADDRESS, configuration::getAddress, String.class); Request<?, ShhAddToGroup> request = web3j.shhAddToGroup(identityAddress); setRequestId(message, request); ShhAddToGroup response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.addedToGroup()); } }
@Test public void shhAddToGroupTest() throws Exception { ShhAddToGroup response = Mockito.mock(ShhAddToGroup.class); Mockito.when(mockWeb3j.shhAddToGroup(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Mockito.when(response.addedToGroup()).thenReturn(Boolean.TRUE); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.SHH_ADD_TO_GROUP); template.send(exchange); Boolean body = exchange.getIn().getBody(Boolean.class); assertTrue(body); }
public static String create() { return UuidFactoryImpl.INSTANCE.create(); }
@Test public void create_unique() { Set<String> all = new HashSet<>(); for (int i = 0; i < 50; i++) { String uuid = Uuids.create(); assertThat(uuid).isNotEmpty(); all.add(uuid); } assertThat(all).hasSize(50); }
@Override public void isEqualTo(@Nullable Object expected) { if (sameClassMessagesWithDifferentDescriptors(actual, expected)) { // This can happen with DynamicMessages, and it's very confusing if they both have the // same string. failWithoutActual( simpleFact("Not true that messages compare equal; they have different descriptors."), fact("expected", expected), fact("with descriptor", ((Message) expected).getDescriptorForType()), fact("but was", actual), fact("with descriptor", actual.getDescriptorForType())); } else if (notMessagesWithSameDescriptor(actual, expected)) { super.isEqualTo(expected); } else { DiffResult diffResult = makeDifferencer((Message) expected).diffMessages(actual, (Message) expected); if (!diffResult.isMatched()) { failWithoutActual( simpleFact( "Not true that messages compare equal.\n" + diffResult.printToString(config.reportMismatchesOnly()))); } } }
@Test public void testFullDiffOnlyWhenRelevant() { // There are no matches, so 'Full diff' should not be printed. expectFailureWhenTesting().that(parse("o_int: 3")).isEqualTo(parse("o_int: 4")); expectThatFailure().hasMessageThat().doesNotContain("Full diff"); // r_string is matched, so the 'Full diff' contains extra information. expectFailureWhenTesting() .that(parse("o_int: 3 r_string: 'abc'")) .isEqualTo(parse("o_int: 4 r_string: 'abc'")); expectThatFailure().hasMessageThat().contains("Full diff"); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new) .addToQueueAndTryProcess(msg, ctx, this::processMsgAsync); }
@Test public void test_2_plus_2_meta() { var node = initNode(TbRuleNodeMathFunctionType.ADD, new TbMathResult(TbMathArgumentType.MESSAGE_METADATA, "result", 0, false, false, null), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "a"), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "b") ); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 2).put("b", 2).toString()); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, timeout(TIMEOUT)).tellSuccess(msgCaptor.capture()); TbMsg resultMsg = msgCaptor.getValue(); assertNotNull(resultMsg); assertNotNull(resultMsg.getData()); assertNotNull(resultMsg.getMetaData()); var result = resultMsg.getMetaData().getValue("result"); assertNotNull(result); assertEquals("4", result); }
@Override @ManagedOperation(description = "Adds the key to the store") public boolean add(String key) { if (!contains(key)) { return setOperations.add(repositoryName, key) != null; } else { return false; } }
@Test public void shouldAddKey() { idempotentRepository.add(KEY); verify(setOperations).add(REPOSITORY, KEY); }
@SuppressWarnings("PMD.AvoidInstantiatingObjectsInLoops") @VisibleForTesting static List<WorkflowRollupOverview> getRollupsForStepsFromPreviousRuns( String workflowId, Long workflowInstanceId, WorkflowInstanceAggregatedInfo aggregatedInfo, Map<String, StepTransition> runtimeDag, Set<String> foreachAndSubworkflowStepIds) { List<WorkflowRollupOverview> rollupOverviews = new ArrayList<>(); Set<Map.Entry<String, StepAggregatedView>> aggregatedStepsInfo = aggregatedInfo.getStepAggregatedViews().entrySet(); for (Map.Entry<String, StepAggregatedView> entry : aggregatedStepsInfo) { String stepId = entry.getKey(); if (runtimeDag.containsKey(stepId) || (foreachAndSubworkflowStepIds != null && foreachAndSubworkflowStepIds.contains(stepId))) { // we want to reset any steps that would've been restarted, so we don't want to add those // we also want to skip foreach and subworkflow steps from aggregated view because // we calculate rollups for those separately continue; } StepInstance.Status status = entry.getValue().getStatus(); WorkflowRollupOverview.CountReference ref = new WorkflowRollupOverview.CountReference(); ref.setCnt(1); if (status.isOverview()) { ref.setRef( Collections.singletonMap( getReference(workflowId, entry.getValue().getWorkflowRunId()), Collections.singletonList(getReference(workflowInstanceId, stepId, 0L)))); } rollupOverviews.add(WorkflowRollupOverview.of(1L, Collections.singletonMap(status, ref))); } return rollupOverviews; }
@Test public void testGetRollupsForStepsFromPreviousRuns() { Map<String, StepAggregatedView> stepMap = new HashMap<>(); stepMap.put( "step1", StepAggregatedView.builder() .status(StepInstance.Status.SUCCEEDED) .workflowRunId(1L) .build()); stepMap.put( "step2", StepAggregatedView.builder() .status(StepInstance.Status.FATALLY_FAILED) .workflowRunId(1L) .build()); WorkflowInstanceAggregatedInfo aggregatedInfo = new WorkflowInstanceAggregatedInfo(); aggregatedInfo.setStepAggregatedViews(stepMap); Map<String, StepTransition> runtimeDag = new LinkedHashMap<>(); runtimeDag.put("step3", new StepTransition()); runtimeDag.put("step4", new StepTransition()); runtimeDag.put("step5", new StepTransition()); Set<String> foreachAndSubworkflowStepIds = new HashSet<>(Collections.singletonList("step4")); List<WorkflowRollupOverview> rollups = RollupAggregationHelper.getRollupsForStepsFromPreviousRuns( "workflow_id", 1L, aggregatedInfo, runtimeDag, foreachAndSubworkflowStepIds); assertEquals(2, rollups.size()); assertNotNull(rollups.get(0).getOverview().get(StepInstance.Status.FATALLY_FAILED)); assertNotNull(rollups.get(1).getOverview().get(StepInstance.Status.SUCCEEDED)); // empty aggregated info steps rollups = RollupAggregationHelper.getRollupsForStepsFromPreviousRuns( "workflow_id", 1L, new WorkflowInstanceAggregatedInfo(), runtimeDag, foreachAndSubworkflowStepIds); assertEquals(0, rollups.size()); // empty foreach and subworkflow steps list rollups = RollupAggregationHelper.getRollupsForStepsFromPreviousRuns( "workflow_id", 1L, aggregatedInfo, runtimeDag, new HashSet<>()); assertEquals(2, rollups.size()); // empty runtimeDAG rollups = RollupAggregationHelper.getRollupsForStepsFromPreviousRuns( "workflow_id", 1L, aggregatedInfo, new HashMap<>(), foreachAndSubworkflowStepIds); assertEquals(2, rollups.size()); // scenario where some skipped because in runtimeDag, and some because of foreach or subworkflow stepMap.put( "step3", StepAggregatedView.builder() .status(StepInstance.Status.SUCCEEDED) .workflowRunId(1L) .build()); stepMap.put( "step4", StepAggregatedView.builder() .status(StepInstance.Status.SUCCEEDED) .workflowRunId(1L) .build()); stepMap.put( "step5", StepAggregatedView.builder() .status(StepInstance.Status.SUCCEEDED) .workflowRunId(1L) .build()); stepMap.put( "step6", StepAggregatedView.builder() .status(StepInstance.Status.SUCCEEDED) .workflowRunId(1L) .build()); stepMap.put( "step7", StepAggregatedView.builder().status(StepInstance.Status.STOPPED).workflowRunId(1L).build()); foreachAndSubworkflowStepIds.add("step6"); rollups = RollupAggregationHelper.getRollupsForStepsFromPreviousRuns( "workflow_id", 1L, aggregatedInfo, runtimeDag, foreachAndSubworkflowStepIds); assertEquals(3, rollups.size()); assertNotNull(rollups.get(0).getOverview().get(StepInstance.Status.STOPPED)); }
public boolean matches(KeyEvent e) { return matches(e, false); }
@Test public void testModifierOnlyKeybindReleaseWithOtherModifierKeyHeldDown() { Keybind keybind = new Keybind(VK_CONTROL, CTRL_DOWN_MASK); // when a modifier key is pressed, the keyevent will have the modifier in the modifiers field and in the keyCode field. // when it is released, it will only be in the keyCode field. Assert.assertTrue(keybind.matches(createKeyEvent(KEY_PRESSED, CTRL_DOWN_MASK, VK_CONTROL))); Assert.assertFalse(keybind.matches(createKeyEvent(KEY_PRESSED, SHIFT_DOWN_MASK | CTRL_DOWN_MASK, VK_SHIFT))); Assert.assertTrue(keybind.matches(createKeyEvent(KEY_RELEASED, SHIFT_DOWN_MASK, VK_CONTROL))); Assert.assertFalse(keybind.matches(createKeyEvent(KEY_RELEASED, 0, VK_SHIFT))); }
public static <K, V> Write<K, V> write() { return new AutoValue_KafkaIO_Write.Builder<K, V>() .setWriteRecordsTransform(writeRecords()) .build(); }
@Test public void testSinkWithSerializationErrors() throws Exception { // Attempt to write 10 elements to Kafka, but they will all fail to serialize, and be sent to // the DLQ int numElements = 10; try (MockProducerWrapper producerWrapper = new MockProducerWrapper(new FailingLongSerializer())) { ProducerSendCompletionThread completionThread = new ProducerSendCompletionThread(producerWrapper.mockProducer).start(); String topic = "test"; BadRecordErrorHandler<PCollection<Long>> eh = p.registerBadRecordErrorHandler(new ErrorSinkTransform()); p.apply(mkKafkaReadTransform(numElements, new ValueAsTimestampFn()).withoutMetadata()) .apply( KafkaIO.<Integer, Long>write() .withBootstrapServers("none") .withTopic(topic) .withKeySerializer(IntegerSerializer.class) .withValueSerializer(FailingLongSerializer.class) .withInputTimestamp() .withProducerFactoryFn(new ProducerFactoryFn(producerWrapper.producerKey)) .withBadRecordErrorHandler(eh)); eh.close(); PAssert.thatSingleton(Objects.requireNonNull(eh.getOutput())).isEqualTo(10L); p.run(); completionThread.shutdown(); verifyProducerRecords(producerWrapper.mockProducer, topic, 0, false, true); } }
private Mono<ServerResponse> search(ServerRequest request) { return Mono.fromSupplier( () -> new SearchParam(request.queryParams())) .map(param -> { var option = new SearchOption(); option.setIncludeTypes(List.of(PostHaloDocumentsProvider.POST_DOCUMENT_TYPE)); option.setKeyword(param.getKeyword()); option.setLimit(param.getLimit()); option.setHighlightPreTag(param.getHighlightPreTag()); option.setHighlightPostTag(param.getHighlightPostTag()); return option; }) .flatMap(this::performSearch) .flatMap(result -> ServerResponse.ok().bodyValue(result)); }
@Test void shouldResponseBadRequestIfRequestBodyValidationFailed() { var option = new SearchOption(); var errors = mock(Errors.class); when(searchService.search(any(SearchOption.class))) .thenReturn(Mono.error(new RequestBodyValidationException(errors))); client.post().uri("/indices/-/search") .bodyValue(option) .exchange() .expectStatus().isBadRequest(); }
@Override public void callExtensionPoint( LogChannelInterface log, Object object ) throws KettleException { if ( !( object instanceof JobExecutionExtension ) ) { return; } JobExecutionExtension extension = (JobExecutionExtension) object; Job job = extension.job; JobMeta jobMeta = job.getJobMeta(); final EmbeddedMetaStore embeddedMetaStore = jobMeta.getEmbeddedMetaStore(); RunConfigurationManager embeddedRunConfigurationManager = EmbeddedRunConfigurationManager.build( embeddedMetaStore ); //will load and save to meta all run configurations for ( JobEntryTrans trans : job.getActiveJobEntryTransformations().values() ) { RunConfiguration loadedRunConfiguration = runConfigurationManager.load( jobMeta.environmentSubstitute( trans.getRunConfiguration() ) ); embeddedRunConfigurationManager.save( loadedRunConfiguration ); } for ( JobEntryJob subJob : job.getActiveJobEntryJobs().values() ) { RunConfiguration loadedRunConfiguration = runConfigurationManager.load( jobMeta.environmentSubstitute( subJob.getRunConfiguration() ) ); embeddedRunConfigurationManager.save( loadedRunConfiguration ); } }
@Test public void testCallExtensionPoint() throws Exception { runConfigurationInjectExtensionPoint.callExtensionPoint( log, executionExt ); verify( runConfigurationManager, times( 2 ) ).load( eq( runConfName ) ); }
public static CreateSourceAsProperties from(final Map<String, Literal> literals) { try { return new CreateSourceAsProperties(literals, false); } catch (final ConfigException e) { final String message = e.getMessage().replace( "configuration", "property" ); throw new KsqlException(message, e); } }
@Test public void shouldThrowIfKeyFormatAndFormatProvided() { // When: final Exception e = assertThrows( KsqlException.class, () -> CreateSourceAsProperties.from( ImmutableMap.<String, Literal>builder() .put(KEY_FORMAT_PROPERTY, new StringLiteral("KAFKA")) .put(FORMAT_PROPERTY, new StringLiteral("JSON")) .build()) ); // Then: assertThat(e.getMessage(), containsString("Cannot supply both 'KEY_FORMAT' and 'FORMAT' properties, " + "as 'FORMAT' sets both key and value formats.")); assertThat(e.getMessage(), containsString("Either use just 'FORMAT', or use 'KEY_FORMAT' and 'VALUE_FORMAT'.")); }
@ConstantFunction(name = "divide", argTypes = {DOUBLE, DOUBLE}, returnType = DOUBLE) public static ConstantOperator divideDouble(ConstantOperator first, ConstantOperator second) { if (second.getDouble() == 0.0) { return ConstantOperator.createNull(Type.DOUBLE); } return ConstantOperator.createDouble(first.getDouble() / second.getDouble()); }
@Test public void divideDouble() { assertEquals(1.0, ScalarOperatorFunctions.divideDouble(O_DOUBLE_100, O_DOUBLE_100).getDouble(), 1); }
public long maximumTimeToWait() { return cachedMaximumTimeToWait; }
@Test public void testMaximumTimeToWait() { final int defaultHeartbeatIntervalMs = 1000; // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(heartbeatRequestManager))); when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) defaultHeartbeatIntervalMs); consumerNetworkThread.runOnce(); // After runOnce has been called, it takes the default heartbeat interval from the heartbeat request manager assertEquals(defaultHeartbeatIntervalMs, consumerNetworkThread.maximumTimeToWait()); }
public static String getRmPrincipal(Configuration conf) throws IOException { String principal = conf.get(YarnConfiguration.RM_PRINCIPAL); String prepared = null; if (principal != null) { prepared = getRmPrincipal(principal, conf); } return prepared; }
@Test public void testGetRMPrincipalHA_Configuration() throws IOException { Configuration conf = new Configuration(); conf.set(YarnConfiguration.RM_ADDRESS, "myhost"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); String result = YarnClientUtils.getRmPrincipal(conf); assertNull("The hostname translation did return null when the principal is " + "missing from the conf: " + result, result); conf = new Configuration(); conf.set(YarnConfiguration.RM_ADDRESS + ".rm0", "myhost"); conf.set(YarnConfiguration.RM_PRINCIPAL, "test/_HOST@REALM"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); conf.set(YarnConfiguration.RM_HA_IDS, "rm0"); result = YarnClientUtils.getRmPrincipal(conf); assertEquals("The hostname translation did not produce the expected " + "results: " + result, "test/myhost@REALM", result); conf = new Configuration(); conf.set(YarnConfiguration.RM_ADDRESS + ".rm0", "myhost"); conf.set(YarnConfiguration.RM_PRINCIPAL, "test/_HOST@REALM"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); try { result = YarnClientUtils.getRmPrincipal(conf); fail("The hostname translation succeeded even though no RM ids were " + "set: " + result); } catch (IOException ex) { // Expected } conf = new Configuration(); conf.set(YarnConfiguration.RM_ADDRESS + ".rm0", "myhost"); conf.set(YarnConfiguration.RM_PRINCIPAL, "test/_HOST@REALM"); conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true); conf.set(YarnConfiguration.RM_HA_ID, "rm0"); result = YarnClientUtils.getRmPrincipal(conf); assertEquals("The hostname translation did not produce the expected " + "results: " + result, "test/myhost@REALM", result); conf.set(YarnConfiguration.RM_PRINCIPAL, "test/yourhost@REALM"); result = YarnClientUtils.getRmPrincipal(conf); assertEquals("The hostname translation did not produce the expected " + "results: " + result, "test/yourhost@REALM", result); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyRespectsDuplicates() { ImmutableListMultimap<Integer, String> actual = ImmutableListMultimap.of(3, "one", 3, "two", 3, "one", 4, "five", 4, "five"); ImmutableListMultimap<Integer, String> expected = ImmutableListMultimap.of(3, "two", 4, "five", 3, "one", 4, "five", 3, "one"); assertThat(actual).containsExactlyEntriesIn(expected); }
public String title() { return title; }
@Test public void changeTitle() { basic(); pp.title(TITLE_NEW); assertEquals("wrong title", TITLE_NEW, pp.title()); }
@Override public final Object getValue(final int columnIndex, final Class<?> type) throws SQLException { ShardingSpherePreconditions.checkNotContains(INVALID_MEMORY_TYPES, type, () -> new SQLFeatureNotSupportedException(String.format("Get value from `%s`", type.getName()))); Object result = currentResultSetRow.getCell(columnIndex); wasNull = null == result; return result; }
@Test void assertGetValueForBlob() { assertThrows(SQLFeatureNotSupportedException.class, () -> memoryMergedResult.getValue(1, Blob.class)); }
public void close(ThreadLocal<DelegatingDbSessionSupplier> dbSessionThreadLocal, String label) { DelegatingDbSessionSupplier delegatingDbSessionSupplier = dbSessionThreadLocal.get(); boolean getCalled = delegatingDbSessionSupplier.isPopulated(); if (getCalled) { try { DbSession res = delegatingDbSessionSupplier.get(); res.close(); } catch (Exception e) { LOG.error(format("Failed to close %s connection in %s", label, currentThread()), e); } } }
@Test void openSession_with_caching_returns_DbSession_that_does_not_roll_back_on_close_if_any_mutation_call_was_followed_by_rollback_with_parameters() throws SQLException { boolean force = random.nextBoolean(); DbSession dbSession = openSessionAndDoSeveralMutatingAndNeutralCalls(); dbSession.rollback(force); dbSession.close(); verify(myBatisDbSession, times(1)).rollback(force); verify(myBatisDbSession, times(0)).rollback(); }
static int encodingBytesLength(String fieldName) { Preconditions.checkArgument(fieldName.length() > 0); for (int i = 0; i < fieldName.length(); i++) { char c = fieldName.charAt(i); if (c < 48) { return 8; } if (c > 57 && c < 65) { return 8; } if (c > 90 && c < 97) { return 8; } if (c > 122) { return 8; } } // Every char range: 10 + 26 * 2 = 62, which can be represented by 6 bits(0~63(0b111111)) return (int) Math.ceil(fieldName.length() * 6.0 / 8); }
@Test public void testEncodingBytesLength() { Assert.assertEquals(encodingBytesLength("abc"), 3); Assert.assertEquals(encodingBytesLength("abcd12345"), 7); Assert.assertEquals(encodingBytesLength("abcd1234abcd"), 9); }
@Override public List<Predicate> getOperands() { return operands; }
@Test void requireThatConstructorsWork() { Predicate foo = SimplePredicates.newString("foo"); Predicate bar = SimplePredicates.newString("bar"); Disjunction node = new Disjunction(foo, bar); assertEquals(List.of(foo, bar), node.getOperands()); node = new Disjunction(List.of(foo, bar)); assertEquals(List.of(foo, bar), node.getOperands()); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { ctx.logJsEvalRequest(); withCallback(scriptEngine.executeFilterAsync(msg), filterResult -> { ctx.logJsEvalResponse(); ctx.tellNext(msg, filterResult ? TbNodeConnectionType.TRUE : TbNodeConnectionType.FALSE); }, t -> { ctx.tellFailure(msg, t); ctx.logJsEvalFailure(); }, ctx.getDbCallbackExecutor()); }
@Test public void falseEvaluationDoNotSendMsg() throws TbNodeException { initWithScript(); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, null, TbMsgMetaData.EMPTY, TbMsgDataType.JSON, TbMsg.EMPTY_JSON_OBJECT, ruleChainId, ruleNodeId); when(scriptEngine.executeFilterAsync(msg)).thenReturn(Futures.immediateFuture(false)); node.onMsg(ctx, msg); verify(ctx).getDbCallbackExecutor(); verify(ctx).tellNext(msg, TbNodeConnectionType.FALSE); }
public static UIf create( UExpression condition, UStatement thenStatement, UStatement elseStatement) { return new AutoValue_UIf(condition, thenStatement, elseStatement); }
@Test public void inlineWithElse() { UIf ifTree = UIf.create( UFreeIdent.create("cond"), UBlock.create( UExpressionStatement.create( UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("y")))), UBlock.create( UExpressionStatement.create( UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("z"))))); bind(new UFreeIdent.Key("cond"), parseExpression("true")); bind(new UFreeIdent.Key("x"), parseExpression("x")); bind(new UFreeIdent.Key("y"), parseExpression("\"foo\"")); bind(new UFreeIdent.Key("z"), parseExpression("\"bar\"")); assertInlines( Joiner.on(System.lineSeparator()) .join( "if (true) {", // " x = \"foo\";", "} else {", " x = \"bar\";", "}"), ifTree); }