focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Properties getConfig(RedisClusterNode node, String pattern) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_GET, pattern); List<String> r = syncFuture(f); if (r != null) { return Converters.toProperties(r); } return null; }
@Test public void testGetConfig() { RedisClusterNode master = getFirstMaster(); Properties config = connection.getConfig(master, "*"); assertThat(config.size()).isGreaterThan(20); }
static int encodedBufferSize(int len, boolean breakLines) { // Cast len to long to prevent overflow long len43 = ((long) len << 2) / 3; // Account for padding long ret = (len43 + 3) & ~3; if (breakLines) { ret += len43 / MAX_LINE_LENGTH; } return ret < Integer.MAX_VALUE ? (int) ret : Integer.MAX_VALUE; }
@Test public void testOverflowEncodedBufferSize() { assertEquals(Integer.MAX_VALUE, Base64.encodedBufferSize(Integer.MAX_VALUE, true)); assertEquals(Integer.MAX_VALUE, Base64.encodedBufferSize(Integer.MAX_VALUE, false)); }
public static boolean isMonomorphic(Class<?> targetType) { if (targetType.isArray()) { return isMonomorphic(targetType.getComponentType()); } // Although enum itself can be non-final, but it's subclass can only be anonymous // inner class, which is final, and we serialize enum by value, which already // identify the actual class, so we can take enum as monomorphic. return targetType.isEnum() || Modifier.isFinal(targetType.getModifiers()); }
@Test public void testMonomorphic() { Assert.assertTrue(ReflectionUtils.isMonomorphic(MonomorphicTestEnum1.class)); Assert.assertTrue(ReflectionUtils.isMonomorphic(MonomorphicTestEnum2.class)); Assert.assertTrue(ReflectionUtils.isMonomorphic(MonomorphicTestEnum1[].class)); Assert.assertTrue(ReflectionUtils.isMonomorphic(MonomorphicTestEnum2[].class)); }
public CeQueueDto setTaskType(String s) { checkArgument(s.length() <= 40, "Value of task type is too long: %s", s); this.taskType = s; return this; }
@Test void setTaskType_accepts_empty_and_string_15_chars_or_less() { assertThatNoException().isThrownBy(() -> underTest.setTaskType("")); assertThatNoException().isThrownBy(() -> underTest.setTaskType("bar")); assertThatNoException().isThrownBy(() -> underTest.setTaskType(STR_15_CHARS)); }
public static InetAddress bindToLocalAddress(InetAddress localAddr, boolean bindWildCardAddress) { if (!bindWildCardAddress) { return localAddr; } return null; }
@Test public void testBindToLocalAddress() throws Exception { assertNotNull(NetUtils .bindToLocalAddress(NetUtils.getLocalInetAddress("127.0.0.1"), false)); assertNull(NetUtils .bindToLocalAddress(NetUtils.getLocalInetAddress("127.0.0.1"), true)); }
@Override public void registerInstance(String serviceName, String ip, int port) throws NacosException { registerInstance(serviceName, ip, port, Constants.DEFAULT_CLUSTER_NAME); }
@Test void testRegisterInstance5() throws NacosException { //given String serviceName = "service1"; Instance instance = new Instance(); //when client.registerInstance(serviceName, instance); //then verify(proxy, times(1)).registerService(serviceName, Constants.DEFAULT_GROUP, instance); }
public String generateComplexTypeColumnTask(long tableId, long dbId, String tableName, String dbName, List<ColumnStats> complexTypeStats) { String sep = ", "; String prefix = "INSERT INTO " + STATISTICS_DB_NAME + "." + SAMPLE_STATISTICS_TABLE_NAME + " VALUES "; StringJoiner joiner = new StringJoiner(sep, prefix, ";"); for (ColumnStats columnStats : complexTypeStats) { StringBuilder builder = new StringBuilder(); builder.append("("); builder.append(tableId).append(sep); builder.append(addSingleQuote(columnStats.getColumnName())).append(sep); builder.append(dbId).append(sep); builder.append("'").append(StringEscapeUtils.escapeSql(dbName)).append(".") .append(StringEscapeUtils.escapeSql(tableName)).append("'").append(sep); builder.append(addSingleQuote(dbName)).append(sep); builder.append(columnStats.getRowCount()).append(sep); builder.append(columnStats.getDateSize()).append(sep); builder.append(columnStats.getDistinctCount(0L)).append(sep); builder.append(columnStats.getNullCount()).append(sep); builder.append(columnStats.getMax()).append(sep); builder.append(columnStats.getMin()).append(sep); builder.append("NOW()"); builder.append(")"); joiner.add(builder); } return joiner.toString(); }
@Test public void generateComplexTypeColumnTask() { SampleInfo sampleInfo = tabletSampleManager.generateSampleInfo("test", "t_struct"); List<String> columnNames = table.getColumns().stream().map(Column::getName).collect(Collectors.toList()); List<Type> columnTypes = table.getColumns().stream().map(Column::getType).collect(Collectors.toList()); ColumnSampleManager columnSampleManager = ColumnSampleManager.init(columnNames, columnTypes, table, sampleInfo); String complexSql = sampleInfo.generateComplexTypeColumnTask(table.getId(), db.getId(), table.getName(), db.getFullName(), columnSampleManager.getComplexTypeStats()); List<StatementBase> stmt = SqlParser.parse(complexSql, connectContext.getSessionVariable()); Assert.assertTrue(stmt.get(0) instanceof InsertStmt); InsertStmt insertStmt = (InsertStmt) stmt.get(0); Assert.assertTrue(insertStmt.getQueryStatement().getQueryRelation() instanceof ValuesRelation); ValuesRelation valuesRelation = (ValuesRelation) insertStmt.getQueryStatement().getQueryRelation(); Assert.assertTrue(valuesRelation.getRows().size() == 3); Assert.assertTrue(valuesRelation.getRows().get(0).size() == 12); }
public Object format(long timestamp) { return dateTimeFormatter == null ? timestamp : dateTimeFormatter.format(Instant.ofEpochMilli(timestamp)); }
@Test void testDoNotFormatTimestamp() { TimestampFormatter timestampFormatter = new TimestampFormatter(null, ZoneId.of("GMT+01:00")); assertThat(timestampFormatter.format(timestamp)).isEqualTo(timestamp); }
public String getMountedExternalStorageDirectoryPath() { String path = null; String state = Environment.getExternalStorageState(); if (Environment.MEDIA_MOUNTED.equals(state) || Environment.MEDIA_MOUNTED_READ_ONLY.equals(state)) { path = getExternalStorageDirectoryPath(); } return path; }
@Test public void getMountedExternalStorageDirectoryPathReturnsNullWhenEjecting() { ShadowEnvironment.setExternalStorageState(Environment.MEDIA_EJECTING); assertThat(contextUtil.getMountedExternalStorageDirectoryPath(), is(nullValue())); }
@Override public RowData nextRecord(RowData reuse) { // return the next row row.setRowId(this.nextRow++); return row; }
@Test void testReadFileWithPartitionValues() throws IOException { FileInputSplit[] splits = createSplits(testFileFlat, 4); long cnt = 0; long totalF0 = 0; // read all splits for (FileInputSplit split : splits) { try (OrcColumnarRowSplitReader reader = createReader(new int[] {2, 0, 1}, testSchemaFlat, new HashMap<>(), split)) { // read and count all rows while (!reader.reachedEnd()) { RowData row = reader.nextRecord(null); assertThat(row.isNullAt(0)).isFalse(); assertThat(row.isNullAt(1)).isFalse(); assertThat(row.isNullAt(2)).isFalse(); assertThat(row.getString(0).toString()).isNotNull(); totalF0 += row.getInt(1); assertThat(row.getString(2).toString()).isNotNull(); cnt++; } } } // check that all rows have been read assertThat(cnt).isEqualTo(1920800); assertThat(totalF0).isEqualTo(1844737280400L); }
public Object execute(GlobalLockExecutor executor) throws Throwable { boolean alreadyInGlobalLock = RootContext.requireGlobalLock(); if (!alreadyInGlobalLock) { RootContext.bindGlobalLockFlag(); } // set my config to config holder so that it can be access in further execution // for example, LockRetryController can access it with config holder GlobalLockConfig myConfig = executor.getGlobalLockConfig(); GlobalLockConfig previousConfig = GlobalLockConfigHolder.setAndReturnPrevious(myConfig); try { return executor.execute(); } finally { // only unbind when this is the root caller. // otherwise, the outer caller would lose global lock flag if (!alreadyInGlobalLock) { RootContext.unbindGlobalLockFlag(); } // if previous config is not null, we need to set it back // so that the outer logic can still use their config if (previousConfig != null) { GlobalLockConfigHolder.setAndReturnPrevious(previousConfig); } else { GlobalLockConfigHolder.remove(); } } }
@Test void testNested() { assertDoesNotThrow(() -> { template.execute(new GlobalLockExecutor() { @Override public Object execute() { assertTrue(RootContext.requireGlobalLock(), "fail to bind global lock flag"); assertSame(config1, GlobalLockConfigHolder.getCurrentGlobalLockConfig(), "global lock config changed during execution"); assertDoesNotThrow(() -> { template.execute(new GlobalLockExecutor() { @Override public Object execute() { assertTrue(RootContext.requireGlobalLock(), "inner lost global lock flag"); assertSame(config2, GlobalLockConfigHolder.getCurrentGlobalLockConfig(), "fail to set inner global lock config"); return null; } @Override public GlobalLockConfig getGlobalLockConfig() { return config2; } }); }); assertTrue(RootContext.requireGlobalLock(), "outer lost global lock flag"); assertSame(config1, GlobalLockConfigHolder.getCurrentGlobalLockConfig(), "outer global lock config was not restored"); return null; } @Override public GlobalLockConfig getGlobalLockConfig() { return config1; } }); }); }
public static byte[] toDerFormat(ECDSASignature signature) { try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { DERSequenceGenerator seq = new DERSequenceGenerator(baos); seq.addObject(new ASN1Integer(signature.r)); seq.addObject(new ASN1Integer(signature.s)); seq.close(); return baos.toByteArray(); } catch (IOException ex) { return new byte[0]; } }
@Test void toDerFormat() { byte[] signDER = CryptoUtils.toDerFormat(ecdsaSignatureExample); assertArrayEquals(Numeric.hexStringToByteArray(TX_SIGN_FORMAT_DER_HEX), signDER); }
@Override public Iterator<Part> iterator() { return Iterators.unmodifiableIterator(parts.iterator()); }
@Test public void testBasic() { UriSpec spec = new UriSpec("{one}{two}three-four-five{six}seven{eight}"); Iterator<UriSpec.Part> iterator = spec.iterator(); checkPart(iterator.next(), "one", true); checkPart(iterator.next(), "two", true); checkPart(iterator.next(), "three-four-five", false); checkPart(iterator.next(), "six", true); checkPart(iterator.next(), "seven", false); checkPart(iterator.next(), "eight", true); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (!(o instanceof CreateOptions)) { return false; } CreateOptions that = (CreateOptions) o; return Objects.equal(mAcl, that.mAcl) && (mCreateParent == that.mCreateParent) && (mEnsureAtomic == that.mEnsureAtomic) && Objects.equal(mOwner, that.mOwner) && Objects.equal(mGroup, that.mGroup) && Objects.equal(mMode, that.mMode); }
@Test public void equalsTest() throws Exception { new EqualsTester() .addEqualityGroup( CreateOptions.defaults(mConfiguration), CreateOptions.defaults(mConfiguration)) .testEquals(); }
static <T> T executeCallable(Observation observation, Callable<T> callable) throws Exception { return decorateCallable(observation, callable).call(); }
@Test public void shouldExecuteCallable() throws Throwable { given(helloWorldService.returnHelloWorldWithException()).willReturn("Hello world"); String value = Observations .executeCallable(observation, helloWorldService::returnHelloWorldWithException); assertThatObservationWasStartedAndFinishedWithoutErrors(); assertThat(value).isEqualTo("Hello world"); then(helloWorldService).should(times(1)).returnHelloWorldWithException(); }
private RemotingCommand receiveReplyMessage(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); long receiveTime = System.currentTimeMillis(); ReplyMessageRequestHeader requestHeader = (ReplyMessageRequestHeader) request.decodeCommandCustomHeader(ReplyMessageRequestHeader.class); try { MessageExt msg = new MessageExt(); msg.setTopic(requestHeader.getTopic()); msg.setQueueId(requestHeader.getQueueId()); msg.setStoreTimestamp(requestHeader.getStoreTimestamp()); if (requestHeader.getBornHost() != null) { msg.setBornHost(NetworkUtil.string2SocketAddress(requestHeader.getBornHost())); } if (requestHeader.getStoreHost() != null) { msg.setStoreHost(NetworkUtil.string2SocketAddress(requestHeader.getStoreHost())); } byte[] body = request.getBody(); int sysFlag = requestHeader.getSysFlag(); if ((sysFlag & MessageSysFlag.COMPRESSED_FLAG) == MessageSysFlag.COMPRESSED_FLAG) { try { Compressor compressor = CompressorFactory.getCompressor(MessageSysFlag.getCompressionType(sysFlag)); body = compressor.decompress(body); } catch (IOException e) { logger.warn("err when uncompress constant", e); } } msg.setBody(body); msg.setFlag(requestHeader.getFlag()); MessageAccessor.setProperties(msg, MessageDecoder.string2messageProperties(requestHeader.getProperties())); MessageAccessor.putProperty(msg, MessageConst.PROPERTY_REPLY_MESSAGE_ARRIVE_TIME, String.valueOf(receiveTime)); msg.setBornTimestamp(requestHeader.getBornTimestamp()); msg.setReconsumeTimes(requestHeader.getReconsumeTimes() == null ? 0 : requestHeader.getReconsumeTimes()); logger.debug("receive reply message :{}", msg); processReplyMessage(msg); response.setCode(ResponseCode.SUCCESS); response.setRemark(null); } catch (Exception e) { logger.warn("unknown err when receiveReplyMsg", e); response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("process reply message fail"); } return response; }
@Test public void testReceiveReplyMessage() throws Exception { ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); RemotingCommand request = mock(RemotingCommand.class); when(request.getCode()).thenReturn(RequestCode.PUSH_REPLY_MESSAGE_TO_CLIENT); when(request.getBody()).thenReturn(getMessageResult()); when(request.decodeCommandCustomHeader(ReplyMessageRequestHeader.class)).thenReturn(createReplyMessageRequestHeader()); when(request.getBody()).thenReturn(new byte[1]); RemotingCommand command = processor.processRequest(ctx, request); assertNotNull(command); assertEquals(ResponseCode.SUCCESS, command.getCode()); }
public static final String encryptPassword( String password ) { return encoder.encode( password, false ); }
@Test public void testEncryptPassword() throws KettleValueException { String encryption; encryption = Encr.encryptPassword( null ); assertTrue( "".equals( encryption ) ); encryption = Encr.encryptPassword( "" ); assertTrue( "".equals( encryption ) ); encryption = Encr.encryptPassword( " " ); assertTrue( "2be98afc86aa7f2e4cb79ce309ed2ef9a".equals( encryption ) ); encryption = Encr.encryptPassword( "Test of different encryptions!!@#$%" ); assertTrue( "54657374206f6620646966666572656e742067d0fbddb11ad39b8ba50aef31fed1eb9f".equals( encryption ) ); encryption = Encr.encryptPassword( " Spaces left" ); assertTrue( "2be98afe84af48285a81cbd30d297a9ce".equals( encryption ) ); encryption = Encr.encryptPassword( "Spaces right" ); assertTrue( "2be98afc839d79387ae0aee62d795a7ce".equals( encryption ) ); encryption = Encr.encryptPassword( " Spaces " ); assertTrue( "2be98afe84a87d2c49809af73db81ef9a".equals( encryption ) ); encryption = Encr.encryptPassword( "1234567890" ); assertTrue( "2be98afc86aa7c3d6f84dfb2689caf68a".equals( encryption ) ); }
@Override public DataNodeDto stopNode(String nodeId) throws NodeNotFoundException { final DataNodeDto node = nodeService.byNodeId(nodeId); if (node.getDataNodeStatus() != DataNodeStatus.AVAILABLE) { throw new IllegalArgumentException("Only running data nodes can be stopped."); } DataNodeLifecycleEvent e = DataNodeLifecycleEvent.create(node.getNodeId(), DataNodeLifecycleTrigger.STOP); clusterEventBus.post(e); return node; }
@Test public void stopNodePublishesClusterEvent() throws NodeNotFoundException { final String testNodeId = "node"; nodeService.registerServer(buildTestNode(testNodeId, DataNodeStatus.AVAILABLE)); classUnderTest.stopNode(testNodeId); verify(clusterEventBus).post(DataNodeLifecycleEvent.create(testNodeId, DataNodeLifecycleTrigger.STOP)); }
public static void main(String[] args) { // initialize and wire the system var menuStore = new MenuStore(); Dispatcher.getInstance().registerStore(menuStore); var contentStore = new ContentStore(); Dispatcher.getInstance().registerStore(contentStore); var menuView = new MenuView(); menuStore.registerView(menuView); var contentView = new ContentView(); contentStore.registerView(contentView); // render initial view menuView.render(); contentView.render(); // user clicks another menu item // this triggers action dispatching and eventually causes views to render with new content menuView.itemClicked(MenuItem.COMPANY); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public Object evaluateReasonCodeValue(final ProcessingDTO processingDTO) { final List<String> orderedReasonCodes = processingDTO.getOrderedReasonCodes(); if (rank != null) { int index = rank - 1; String resultCode = null; if (index < orderedReasonCodes.size()) { resultCode = orderedReasonCodes.get(index); } return commonEvaluate(resultCode, dataType); } else { return null; } }
@Test void evaluateReasonCodeValue() { KiePMMLOutputField kiePMMLOutputField = KiePMMLOutputField.builder("outputfield", Collections.emptyList()) .withResultFeature(RESULT_FEATURE.REASON_CODE) .withRank(4) .build(); final List<String> reasonCodes = IntStream.range(0, 3).mapToObj(i -> "reasonCode-" + i) .collect(Collectors.toList()); ProcessingDTO processingDTO = getProcessingDTO(Collections.emptyList(), Collections.emptyList(), reasonCodes); assertThat(kiePMMLOutputField.evaluate(processingDTO)).isNull(); final String variableValue = "reasonCode-3"; reasonCodes.add(variableValue); processingDTO = getProcessingDTO(Collections.emptyList(), Collections.emptyList(), reasonCodes); Object retrieved = kiePMMLOutputField.evaluate(processingDTO); assertThat(retrieved).isNotNull(); assertThat(retrieved).isEqualTo(variableValue); }
String generateJwtAssertion() { final long utcPlusWindow = Clock.systemUTC().millis() / 1000 + JWT_CLAIM_WINDOW; final String audience = config.getJwtAudience() != null ? config.getJwtAudience() : config.getLoginUrl(); final StringBuilder claim = new StringBuilder().append("{\"iss\":\"").append(config.getClientId()) .append("\",\"sub\":\"").append(config.getUserName()) .append("\",\"aud\":\"").append(audience).append("\",\"exp\":\"").append(utcPlusWindow) .append("\"}"); final StringBuilder token = new StringBuilder(JWT_HEADER).append('.') .append(Base64.getUrlEncoder().encodeToString(claim.toString().getBytes(StandardCharsets.UTF_8))); final KeyStoreParameters keyStoreParameters = config.getKeystore(); keyStoreParameters.setCamelContext(camelContext); try { final KeyStore keystore = keyStoreParameters.createKeyStore(); final Enumeration<String> aliases = keystore.aliases(); String alias = null; while (aliases.hasMoreElements()) { String tmp = aliases.nextElement(); if (keystore.isKeyEntry(tmp)) { if (alias == null) { alias = tmp; } else { throw new IllegalArgumentException( "The given keystore `" + keyStoreParameters.getResource() + "` contains more than one key entry, expecting only one"); } } } PrivateKey key = (PrivateKey) keystore.getKey(alias, keyStoreParameters.getPassword().toCharArray()); Signature signature = Signature.getInstance(JWT_SIGNATURE_ALGORITHM); signature.initSign(key); signature.update(token.toString().getBytes(StandardCharsets.UTF_8)); byte[] signed = signature.sign(); token.append('.').append(Base64.getUrlEncoder().encodeToString(signed)); // Clean the private key from memory try { key.destroy(); } catch (javax.security.auth.DestroyFailedException ex) { LOG.debug("Error destroying private key: {}", ex.getMessage()); } } catch (IOException | GeneralSecurityException e) { throw new IllegalStateException(e); } return token.toString(); }
@Test public void shouldGenerateJwtTokens() { final SalesforceLoginConfig config = new SalesforceLoginConfig("https://login.salesforce.com", "ABCD", "username", parameters, true); final SalesforceSession session = new SalesforceSession(new DefaultCamelContext(), mock(SalesforceHttpClient.class), TIMEOUT, config); final String jwtAssertion = session.generateJwtAssertion(); assertNotNull(jwtAssertion); }
public ConfigHistoryInfo getConfigHistoryInfo(String dataId, String group, String namespaceId, Long nid) throws AccessException { ConfigHistoryInfo configHistoryInfo = historyConfigInfoPersistService.detailConfigHistory(nid); if (Objects.isNull(configHistoryInfo)) { return null; } // check if history config match the input checkHistoryInfoPermission(configHistoryInfo, dataId, group, namespaceId); String encryptedDataKey = configHistoryInfo.getEncryptedDataKey(); Pair<String, String> pair = EncryptionHandler .decryptHandler(dataId, encryptedDataKey, configHistoryInfo.getContent()); configHistoryInfo.setContent(pair.getSecond()); return configHistoryInfo; }
@Test void testGetConfigHistoryInfo() throws Exception { ConfigHistoryInfo configHistoryInfo = new ConfigHistoryInfo(); configHistoryInfo.setDataId(TEST_DATA_ID); configHistoryInfo.setGroup(TEST_GROUP); configHistoryInfo.setContent(TEST_CONTENT); configHistoryInfo.setTenant(TEST_TENANT); configHistoryInfo.setCreatedTime(new Timestamp(new Date().getTime())); configHistoryInfo.setLastModifiedTime(new Timestamp(new Date().getTime())); when(historyConfigInfoPersistService.detailConfigHistory(1L)).thenReturn(configHistoryInfo); ConfigHistoryInfo resConfigHistoryInfo = historyService.getConfigHistoryInfo(TEST_DATA_ID, TEST_GROUP, TEST_TENANT, 1L); verify(historyConfigInfoPersistService).detailConfigHistory(1L); assertEquals(configHistoryInfo.getDataId(), resConfigHistoryInfo.getDataId()); assertEquals(configHistoryInfo.getGroup(), resConfigHistoryInfo.getGroup()); assertEquals(configHistoryInfo.getContent(), resConfigHistoryInfo.getContent()); }
public void setSendFullErrorException(boolean sendFullErrorException) { this.sendFullErrorException = sendFullErrorException; }
@Test void handleFlowableContentNotSupportedExceptionWithoutSendFullErrorException() throws Exception { testController.exceptionSupplier = () -> new FlowableContentNotSupportedException("other test content not supported"); handlerAdvice.setSendFullErrorException(false); String body = mockMvc.perform(get("/")) .andExpect(status().isUnsupportedMediaType()) .andReturn() .getResponse() .getContentAsString(); assertThatJson(body) .isEqualTo("{" + " message: 'Content is not supported'," + " exception: 'other test content not supported'" + "}"); }
public static String toOperationDesc(Operation op) { Class<? extends Operation> operationClass = op.getClass(); if (PartitionIteratingOperation.class.isAssignableFrom(operationClass)) { PartitionIteratingOperation partitionIteratingOperation = (PartitionIteratingOperation) op; OperationFactory operationFactory = partitionIteratingOperation.getOperationFactory(); String desc = DESCRIPTORS.get(operationFactory.getClass().getName()); if (desc == null) { desc = PartitionIteratingOperation.class.getSimpleName() + "(" + operationFactory.getClass().getName() + ")"; DESCRIPTORS.put(operationFactory.getClass().getName(), desc); } return desc; } else if (Backup.class.isAssignableFrom(operationClass)) { Backup backup = (Backup) op; Operation backupOperation = backup.getBackupOp(); String desc = DESCRIPTORS.get(backupOperation.getClass().getName()); if (desc == null) { desc = Backup.class.getSimpleName() + "(" + backup.getBackupOp().getClass().getName() + ")"; DESCRIPTORS.put(backupOperation.getClass().getName(), desc); } return desc; } else { return operationClass.getName(); } }
@Test public void testBackupOperation() throws UnknownHostException { Backup backup = new Backup(new DummyBackupOperation(), new Address("127.0.0.1", 5701), new long[]{}, false); String result = toOperationDesc(backup); assertEquals(format("Backup(%s)", DummyBackupOperation.class.getName()), result); }
public static PostgreSQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find PostgreSQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetInt2BinaryProtocolValue() { PostgreSQLBinaryProtocolValue binaryProtocolValue = PostgreSQLBinaryProtocolValueFactory.getBinaryProtocolValue(PostgreSQLColumnType.INT2); assertThat(binaryProtocolValue, instanceOf(PostgreSQLInt2BinaryProtocolValue.class)); }
public <OutT> void registerInputMessageStreams( PValue pvalue, List<? extends InputDescriptor<KV<?, OpMessage<OutT>>, ?>> inputDescriptors) { registerInputMessageStreams(pvalue, inputDescriptors, this::registerMessageStream); }
@Test public void testRegisterInputMessageStreams() { final PCollection output = mock(PCollection.class); List<String> topics = Arrays.asList("stream1", "stream2"); List inputDescriptors = topics.stream() .map(topicName -> createSamzaInputDescriptor(topicName, topicName)) .collect(Collectors.toList()); translationContext.registerInputMessageStreams(output, inputDescriptors); assertNotNull(translationContext.getMessageStream(output)); }
public static List<Transformation<?>> optimize(List<Transformation<?>> transformations) { final Map<Transformation<?>, Set<Transformation<?>>> outputMap = buildOutputMap(transformations); final LinkedHashSet<Transformation<?>> chainedTransformations = new LinkedHashSet<>(); final Set<Transformation<?>> alreadyTransformed = Sets.newIdentityHashSet(); final Queue<Transformation<?>> toTransformQueue = Queues.newArrayDeque(transformations); while (!toTransformQueue.isEmpty()) { final Transformation<?> transformation = toTransformQueue.poll(); if (!alreadyTransformed.contains(transformation)) { alreadyTransformed.add(transformation); final ChainInfo chainInfo = chainWithInputIfPossible(transformation, outputMap); chainedTransformations.add(chainInfo.newTransformation); chainedTransformations.removeAll(chainInfo.oldTransformations); alreadyTransformed.addAll(chainInfo.oldTransformations); // Add the chained transformation and its inputs to the to-optimize list toTransformQueue.add(chainInfo.newTransformation); toTransformQueue.addAll(chainInfo.newTransformation.getInputs()); } } return new ArrayList<>(chainedTransformations); }
@Test void testChainingMultipleOperators() { ExternalPythonKeyedProcessOperator<?> keyedProcessOperator = createKeyedProcessOperator( "f1", new RowTypeInfo(Types.INT(), Types.INT()), Types.STRING()); ExternalPythonProcessOperator<?, ?> processOperator1 = createProcessOperator("f2", Types.STRING(), Types.LONG()); ExternalPythonProcessOperator<?, ?> processOperator2 = createProcessOperator("f3", Types.LONG(), Types.INT()); Transformation<?> sourceTransformation = mock(SourceTransformation.class); OneInputTransformation<?, ?> keyedProcessTransformation = new OneInputTransformation( sourceTransformation, "keyedProcess", keyedProcessOperator, keyedProcessOperator.getProducedType(), 2); Transformation<?> processTransformation1 = new OneInputTransformation( keyedProcessTransformation, "process", processOperator1, processOperator1.getProducedType(), 2); Transformation<?> processTransformation2 = new OneInputTransformation( processTransformation1, "process", processOperator2, processOperator2.getProducedType(), 2); List<Transformation<?>> transformations = new ArrayList<>(); transformations.add(sourceTransformation); transformations.add(keyedProcessTransformation); transformations.add(processTransformation1); transformations.add(processTransformation2); List<Transformation<?>> optimized = PythonOperatorChainingOptimizer.optimize(transformations); assertThat(optimized).hasSize(2); OneInputTransformation<?, ?> chainedTransformation = (OneInputTransformation<?, ?>) optimized.get(1); assertThat(sourceTransformation.getOutputType()) .isEqualTo(chainedTransformation.getInputType()); assertThat(processOperator2.getProducedType()) .isEqualTo(chainedTransformation.getOutputType()); OneInputStreamOperator<?, ?> chainedOperator = chainedTransformation.getOperator(); assertThat(chainedOperator).isInstanceOf(ExternalPythonKeyedProcessOperator.class); validateChainedPythonFunctions( ((ExternalPythonKeyedProcessOperator<?>) chainedOperator).getPythonFunctionInfo(), "f3", "f2", "f1"); }
public GetLoanResponse createLoan(CreateLoanRequest createLoanRequest) { String loanId = UUID.randomUUID().toString(); LoanDto loanDto = new LoanDto( loanId, createLoanRequest.term(), createLoanRequest.originatedAmount(), createLoanRequest.currency(), createLoanRequest.targetInterestRate(), createLoanRequest.effectiveInterestRate(), createLoanRequest.externalReference(), createLoanRequest.startDate(), createLoanRequest.endDate(), LoanStatus.CREATED, createLoanRequest.timezone(), createLoanRequest.region(), createLoanRequest.state() ); List<Installment> newInstallments = calculator.newInstallments(LoanTransformer.transformForNewInstallments(createLoanRequest)); List<LoanInstallmentDto> loanInstallmentDtos = LoanInstallmentTransformer.transform(newInstallments, loanId); // TODO(hubert): Add transactions loanInstallmentDtos.forEach(loanInstallmentDao::insert); loanDao.insert(loanDto); loanInstallmentDtos = loanInstallmentDao.findByLoanId(loanId); LOGGER.info("Creating new Loan: %s\nInstallments: %s".formatted(loanDto, loanInstallmentDtos)); return new GetLoanResponse( LoanTransformer.transformToLoanInfo( loanDto, loanInstallmentDtos )); }
@Test public void testCreateLoan_BNPL() { int term = 4; BigDecimal originatedAmount = BigDecimal.valueOf(100.0); String currency = "USD"; String externalReference = UUID.randomUUID().toString(); LocalDate startDate = LocalDate.of(2023, 1, 1); LocalDate endDate = startDate; String timezone = "America/Los_Angeles"; String region = "USA"; String state = "CA"; CreateLoanRequest createLoanRequest = new CreateLoanRequest( term, originatedAmount, currency, BigDecimal.valueOf(0), BigDecimal.valueOf(0), externalReference, startDate, endDate, timezone, region, state ); GetLoanResponse loanResponse = loanResourceManager.createLoan(createLoanRequest); // Verify that all expected fields are set correctly in the response assertEquals(loanResponse.loanInfo().term(), term); assertEquals(loanResponse.loanInfo().targetInterestRate(), BigDecimal.valueOf(0)); assertEquals(loanResponse.loanInfo().effectiveInterestRate(), BigDecimal.valueOf(0)); assertEquals(loanResponse.loanInfo().currency(), currency); assertEquals(loanResponse.loanInfo().externalReference(), externalReference); assertEquals(loanResponse.loanInfo().startDate(), startDate); assertEquals(loanResponse.loanInfo().endDate(), endDate); assertEquals(loanResponse.loanInfo().timezone(), timezone); assertEquals(loanResponse.loanInfo().region(), region); assertEquals(loanResponse.loanInfo().state(), state); // Verify that the installment schedule is correct List<LoanInstallmentInfo> loanInstallmentInfoList = loanResponse.loanInfo().loanInstallments(); assertEquals(loanInstallmentInfoList.size(), 4); // Each installment's principal should be $100/4 = $25 and its interest should be $0 for (int i = 0; i < 4; i++) { LoanInstallmentInfo installment = loanInstallmentInfoList.get(i); assertEquals(installment.loanId(), loanResponse.loanInfo().loanId()); assertEquals(installment.status(), InstallmentStatus.OWED); assertEquals(installment.principalAmount(), BigDecimal.valueOf(25).setScale(2)); assertEquals(installment.interestAmount(), BigDecimal.valueOf(0).setScale(2)); } }
@Override public void unregister(InetSocketAddress address) { // stop heartbeat if (heartBeatScheduledFuture != null && !heartBeatScheduledFuture.isCancelled()) { heartBeatScheduledFuture.cancel(true); } NetUtil.validAddress(address); Instance instance = Instance.getInstance(); instance.setTransaction(new Node.Endpoint(address.getAddress().getHostAddress(), address.getPort(), "netty")); for (String urlSuffix : getNamingAddrs()) { String url = HTTP_PREFIX + urlSuffix + "/naming/v1/unregister?"; String unit = instance.getUnit(); String jsonBody = instance.toJsonString(); String params = "unit=" + unit; params = params + "&cluster=" + instance.getClusterName(); params = params + "&namespace=" + instance.getNamespace(); url += params; Map<String, String> header = new HashMap<>(); header.put(HTTP.CONTENT_TYPE, ContentType.APPLICATION_JSON.getMimeType()); try (CloseableHttpResponse response = HttpClientUtil.doPost(url, jsonBody, header, 3000)) { int statusCode = response.getStatusLine().getStatusCode(); if (statusCode == 200) { LOGGER.info("instance has been unregistered successfully:{}", statusCode); } else { LOGGER.warn("instance has been unregistered unsuccessfully:{}", statusCode); } } catch (Exception e) { LOGGER.error("instance has been unregistered failed in namingserver {}", url, e); } } }
@Test public void testUnregister() throws Exception { RegistryService registryService = new NamingserverRegistryProvider().provide(); InetSocketAddress inetSocketAddress1 = new InetSocketAddress("127.0.0.1", 8088); //1.register registryService.register(inetSocketAddress1); //2.create vGroup in cluster String namespace = FILE_CONFIG.getConfig("registry.namingserver.namespace"); createGroupInCluster(namespace, "group1", "cluster1"); //3.get instances List list = registryService.lookup("group1"); assertEquals(list.size(), 1); //4.unregister registryService.unregister(inetSocketAddress1); //5.get instances List list1 = registryService.lookup("group1"); assertEquals(list1.size(), 0); }
public List<StepMeta> findPreviousSteps( StepMeta stepMeta ) { return findPreviousSteps( stepMeta, true ); }
@Test public void findPreviousStepsNullMeta( ) { TransMeta transMeta = new TransMeta( new Variables() ); List<StepMeta> result = transMeta.findPreviousSteps( null, false ); assertThat( 0, equalTo( result.size() ) ); assertThat( result, equalTo( new ArrayList<>() ) ); }
@Override public Path move(final Path source, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { final Path copy = proxy.copy(source, renamed, status, connectionCallback, new DisabledStreamListener()); delete.delete(Collections.singletonMap(source, status), connectionCallback, callback); return copy; }
@Test public void testMoveWithServerSideEncryptionBucketPolicy() throws Exception { final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)); final GoogleStorageTouchFeature touch = new GoogleStorageTouchFeature(session); final TransferStatus status = new TransferStatus(); status.setEncryption(new Encryption.Algorithm("AES256", null)); touch.touch(test, status); assertTrue(new GoogleStorageFindFeature(session).find(test)); final Path renamed = new Path(container, new AsciiRandomStringService().random(), EnumSet.of(Path.Type.file)); new GoogleStorageMoveFeature(session).move(test, renamed, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new GoogleStorageFindFeature(session).find(test)); assertTrue(new GoogleStorageFindFeature(session).find(renamed)); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(renamed), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static byte[] encode(final Object obj) { if (obj == null) { return null; } final String json = toJson(obj, false); return json.getBytes(CHARSET_UTF8); }
@Test public void testEncode() { class Foo extends RemotingSerializable { Map<Long, String> map = new HashMap<>(); Foo() { map.put(0L, "Test"); } public Map<Long, String> getMap() { return map; } } Foo foo = new Foo(); String invalid = new String(foo.encode(), Charset.defaultCharset()); String valid = new String(foo.encode(SerializerFeature.BrowserCompatible, SerializerFeature.QuoteFieldNames, SerializerFeature.MapSortField), Charset.defaultCharset()); Gson gson = new Gson(); final TypeAdapter<JsonElement> strictAdapter = gson.getAdapter(JsonElement.class); try { strictAdapter.fromJson(invalid); Assert.fail("Should have thrown"); } catch (IOException ignore) { } try { strictAdapter.fromJson(valid); } catch (IOException ignore) { Assert.fail("Should not throw"); } }
public synchronized void quitElection(boolean needFence) { LOG.info("Yielding from election"); if (!needFence && state == State.ACTIVE) { // If active is gracefully going back to standby mode, remove // our permanent znode so no one fences us. tryDeleteOwnBreadCrumbNode(); } reset(); wantToBeInElection = false; }
@Test public void testQuitElection() throws Exception { elector.joinElection(data); Mockito.verify(mockZK, Mockito.times(0)).close(); elector.quitElection(true); Mockito.verify(mockZK, Mockito.times(1)).close(); // no watches added verifyExistCall(0); byte[] data = new byte[8]; elector.joinElection(data); // getNewZooKeeper called 2 times. once in constructor and once now Assert.assertEquals(2, count); elector.processResult(Code.NODEEXISTS.intValue(), ZK_LOCK_NAME, mockZK, ZK_LOCK_NAME); Mockito.verify(mockApp, Mockito.times(1)).becomeStandby(); verifyExistCall(1); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "overridden generic resource interface default methods") public void testTicket3149() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(MainResource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /test:\n" + " post:\n" + " tags:\n" + " - Test inheritance on default implementation in interfaces\n" + " operationId: firstEndpoint\n" + " requestBody:\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " $ref: '#/components/schemas/SampleDTO'\n" + " responses:\n" + " \"201\":\n" + " description: Created\n" + " \"400\":\n" + " description: Bad Request\n" + " \"403\":\n" + " description: Forbidden\n" + " \"404\":\n" + " description: Not Found\n" + " /test/{id}:\n" + " get:\n" + " tags:\n" + " - Test inheritance on default implementation in interfaces\n" + " operationId: secondEnpoint\n" + " requestBody:\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " $ref: '#/components/schemas/SampleOtherDTO'\n" + " responses:\n" + " \"200\":\n" + " description: OK\n" + " \"400\":\n" + " description: Bad Request\n" + " \"403\":\n" + " description: Forbidden\n" + " \"404\":\n" + " description: Not Found\n" + " /test/original/{id}:\n" + " get:\n" + " tags:\n" + " - Test inheritance on default implementation in interfaces\n" + " operationId: originalEndpoint\n" + " requestBody:\n" + " content:\n" + " '*/*':\n" + " schema:\n" + " $ref: '#/components/schemas/SampleOtherDTO'\n" + " responses:\n" + " \"200\":\n" + " description: OK\n" + " \"400\":\n" + " description: Bad Request\n" + " \"403\":\n" + " description: Forbidden\n" + " \"404\":\n" + " description: Not Found\n" + "components:\n" + " schemas:\n" + " SampleDTO:\n" + " type: object\n" + " properties:\n" + " name:\n" + " type: string\n" + " SampleOtherDTO:\n" + " type: object\n" + " properties:\n" + " label:\n" + " type: string"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
@Override public Object intercept(final Invocation invocation) throws Throwable { StatementHandler statementHandler = (StatementHandler) invocation.getTarget(); //Elegant access to object properties through MetaObject, here is access to the properties of statementHandler; //MetaObject is an object provided by Mybatis for easy and elegant access to object properties, // through which you can simplify the code, no need to try/catch various reflect exceptions, // while it supports the operation of JavaBean, Collection, Map three types of object operations. // MetaObject metaObject = MetaObject // .forObject(statementHandler, SystemMetaObject.DEFAULT_OBJECT_FACTORY, SystemMetaObject.DEFAULT_OBJECT_WRAPPER_FACTORY, // new DefaultReflectorFactory()); //First intercept to RoutingStatementHandler, there is a StatementHandler type delegate variable, // its implementation class is BaseStatementHandler, and then to the BaseStatementHandler member variable mappedStatement // MappedStatement mappedStatement = (MappedStatement) metaObject.getValue("delegate.mappedStatement"); // String id = mappedStatement.getId(); mapper method full path // String sqlCommandType = mappedStatement.getSqlCommandType().toString(); sql method eg: insert update delete select BoundSql boundSql = statementHandler.getBoundSql(); // get original sql file // reflect modify sql file Field field = boundSql.getClass().getDeclaredField("sql"); field.setAccessible(true); // replace desc and true String replaceSql = boundSql.getSql().toLowerCase() .replace("`desc`", "\"desc\"") .replace("true", "'true'"); // replace` replaceSql = replaceSql.replace("`", ""); // replace resource if (replaceSql.contains("resource")) { replaceSql = replaceSql.replace("into resource", "into \"resource\"") .replace("from resource", "from \"resource\"") .replace("update resource", "update \"resource\""); } // replace batch insert into if (replaceSql.contains("insert into") && replaceSql.split("\\(").length > 3) { replaceSql = replaceSql.replaceAll("\r|\n|\\s", "") .replace("insertinto", "insert into ") .replace("values", " SELECT * FROM (") .replace("(?", " SELECT ?") .replace("),", " FROM dual UNION ALL") .replace("?)", " ? FROM dual)"); } // replace limit 1 if (replaceSql.contains("select")) { if (replaceSql.contains("where")) { replaceSql = replaceSql.replace("limit 1", "and rownum = 1"); } else { replaceSql = replaceSql.replace("limit 1", "where rownum = 1"); } } field.set(boundSql, replaceSql); return invocation.proceed(); }
@Test public void interceptTest() { final OracleSQLPrepareInterceptor oracleSQLPrepareInterceptor = new OracleSQLPrepareInterceptor(); final Invocation invocation = mock(Invocation.class); final StatementHandler statementHandler = mock(StatementHandler.class); when(invocation.getTarget()).thenReturn(statementHandler); final BoundSql boundSql = mock(BoundSql.class); when(statementHandler.getBoundSql()).thenReturn(boundSql); when(boundSql.getSql()).thenReturn("select * from users where name = 1 limit 1"); Assertions.assertDoesNotThrow(() -> oracleSQLPrepareInterceptor.intercept(invocation)); when(boundSql.getSql()).thenReturn("select * from users limit 1"); Assertions.assertDoesNotThrow(() -> oracleSQLPrepareInterceptor.intercept(invocation)); when(boundSql.getSql()).thenReturn("INSERT INTO `users` VALUES ('1529402613204172899', '34')('1529402613204172899', '34')('1529402613204172899', '34'); "); Assertions.assertDoesNotThrow(() -> oracleSQLPrepareInterceptor.intercept(invocation)); when(boundSql.getSql()).thenReturn("INSERT INTO `resource` VALUES ('1529402613204172899', '34')('1529402613204172899', '34')('1529402613204172899', '34'); "); Assertions.assertDoesNotThrow(() -> oracleSQLPrepareInterceptor.intercept(invocation)); }
public static String ofString(final Supplier<String> supplier) { return GsonUtils.getInstance().toJson(initQueryParams(supplier.get())); }
@Test public void testOfString() { assertEquals("{\"a\":\"1\",\"b\":\"2\"}", HttpParamConverter.ofString(() -> "a=1&b=2")); }
public static ColumnWriter createColumnWriter( int nodeIndex, int sequence, List<OrcType> orcTypes, Type type, ColumnWriterOptions columnWriterOptions, OrcEncoding orcEncoding, DateTimeZone hiveStorageTimeZone, DwrfEncryptionInfo dwrfEncryptors, MetadataWriter metadataWriter) { requireNonNull(type, "type is null"); OrcType orcType = orcTypes.get(nodeIndex); Optional<DwrfDataEncryptor> dwrfEncryptor = dwrfEncryptors.getEncryptorByNodeId(nodeIndex); switch (orcType.getOrcTypeKind()) { case BOOLEAN: return new BooleanColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, metadataWriter); case FLOAT: return new FloatColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, metadataWriter); case DOUBLE: return new DoubleColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, metadataWriter); case BYTE: return new ByteColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, metadataWriter); case DATE: checkArgument(orcEncoding != DWRF, "DWRF does not support %s type", type); return new LongColumnWriter(nodeIndex, DEFAULT_SEQUENCE_ID, type, columnWriterOptions, dwrfEncryptor, orcEncoding, DateStatisticsBuilder::new, metadataWriter); case SHORT: return new LongColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, IntegerStatisticsBuilder::new, metadataWriter); case INT: case LONG: if (columnWriterOptions.isIntegerDictionaryEncodingEnabled() && orcEncoding == DWRF) { // ORC V1 does not support Integer Dictionary encoding. DWRF supports Integer dictionary encoding. return new LongDictionaryColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, metadataWriter); } return new LongColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, IntegerStatisticsBuilder::new, metadataWriter); case DECIMAL: checkArgument(orcEncoding != DWRF, "DWRF does not support %s type", type); return new DecimalColumnWriter(nodeIndex, type, columnWriterOptions, orcEncoding, metadataWriter); case TIMESTAMP: case TIMESTAMP_MICROSECONDS: return new TimestampColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, hiveStorageTimeZone, metadataWriter); case BINARY: return new SliceDirectColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, BinaryStatisticsBuilder::new, metadataWriter); case CHAR: checkArgument(orcEncoding != DWRF, "DWRF does not support %s type", type); // fall through case VARCHAR: case STRING: if (columnWriterOptions.isStringDictionaryEncodingEnabled()) { return new SliceDictionaryColumnWriter(nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, metadataWriter); } int stringStatisticsLimit = columnWriterOptions.getStringStatisticsLimit(); return new SliceDirectColumnWriter( nodeIndex, sequence, type, columnWriterOptions, dwrfEncryptor, orcEncoding, () -> new StringStatisticsBuilder(stringStatisticsLimit), metadataWriter); case LIST: { Type fieldType = type.getTypeParameters().get(0); ColumnWriter elementWriter = createColumnWriter( orcType.getFieldTypeIndex(0), sequence, orcTypes, fieldType, columnWriterOptions, orcEncoding, hiveStorageTimeZone, dwrfEncryptors, metadataWriter); return new ListColumnWriter(nodeIndex, sequence, columnWriterOptions, dwrfEncryptor, orcEncoding, elementWriter, metadataWriter); } case MAP: { if (columnWriterOptions.getFlattenedNodes().contains(nodeIndex)) { checkArgument(orcEncoding == DWRF, "%s does not support flat maps", orcEncoding); Type valueType = type.getTypeParameters().get(1); OrcType keyOrcType = orcTypes.get(orcType.getFieldTypeIndex(0)); Supplier<StatisticsBuilder> keyStatisticsBuilderSupplier = createStatisticsBuilderSupplier(keyOrcType, columnWriterOptions); // value writers should not create their own expensive dictionaries, instead they should use shared dictionaries ColumnWriterOptions valueWriterColumnWriterOptions = columnWriterOptions.copyWithDisabledDictionaryEncoding(); // Flat map writer needs to provide column statistics for the value node(s) even if there are no values. // This lambda will provide empty column statistics right away instead of creating an expensive empty // value writer and getting empty stats from it. int valueNodeIndex = orcType.getFieldTypeIndex(1); Supplier<Map<Integer, ColumnStatistics>> emptyColumnStatisticsSupplier = () -> createEmptyColumnStatistics(orcTypes, valueNodeIndex, columnWriterOptions); IntFunction<ColumnWriter> valueWriterSupplier = (valueSequence) -> createColumnWriter( valueNodeIndex, valueSequence, orcTypes, valueType, valueWriterColumnWriterOptions, orcEncoding, hiveStorageTimeZone, dwrfEncryptors, metadataWriter); return new MapFlatColumnWriter( nodeIndex, orcType.getFieldTypeIndex(0), valueNodeIndex, type.getTypeParameters().get(0), valueType, keyStatisticsBuilderSupplier, columnWriterOptions, dwrfEncryptor, metadataWriter, valueWriterSupplier, emptyColumnStatisticsSupplier); } ColumnWriter keyWriter = createColumnWriter( orcType.getFieldTypeIndex(0), sequence, orcTypes, type.getTypeParameters().get(0), columnWriterOptions, orcEncoding, hiveStorageTimeZone, dwrfEncryptors, metadataWriter); ColumnWriter valueWriter = createColumnWriter( orcType.getFieldTypeIndex(1), sequence, orcTypes, type.getTypeParameters().get(1), columnWriterOptions, orcEncoding, hiveStorageTimeZone, dwrfEncryptors, metadataWriter); return new MapColumnWriter(nodeIndex, sequence, columnWriterOptions, dwrfEncryptor, orcEncoding, keyWriter, valueWriter, metadataWriter); } case STRUCT: { ImmutableList.Builder<ColumnWriter> fieldWriters = ImmutableList.builder(); for (int fieldId = 0; fieldId < orcType.getFieldCount(); fieldId++) { int childNodeIndex = orcType.getFieldTypeIndex(fieldId); Type fieldType = type.getTypeParameters().get(fieldId); fieldWriters.add(createColumnWriter( childNodeIndex, sequence, orcTypes, fieldType, columnWriterOptions, orcEncoding, hiveStorageTimeZone, dwrfEncryptors, metadataWriter)); } return new StructColumnWriter(nodeIndex, sequence, columnWriterOptions, dwrfEncryptor, fieldWriters.build(), metadataWriter); } } throw new IllegalArgumentException("Unsupported type: " + type); }
@Test(dataProvider = "dataForSequenceIdTest") public void testSequenceIdPassedAllColumnWriters(List<OrcType> orcTypes, Type type, Block block) throws IOException { ColumnWriterOptions columnWriterOptions = ColumnWriterOptions.builder() .setCompressionKind(CompressionKind.ZLIB) .build(); int nodeId = 0; ColumnWriter columnWriter = createColumnWriter( nodeId, SEQUENCE, orcTypes, type, columnWriterOptions, DWRF, UTC, UNENCRYPTED, DWRF.createMetadataWriter()); columnWriter.beginRowGroup(); columnWriter.writeBlock(block); columnWriter.finishRowGroup(); columnWriter.close(); ImmutableList<StreamDataOutput> streams = ImmutableList.<StreamDataOutput>builder() .addAll(columnWriter.getIndexStreams(Optional.empty())) .addAll(columnWriter.getDataStreams()) .build(); for (StreamDataOutput stream : streams) { assertEquals(stream.getStream().getSequence(), SEQUENCE); } }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldShowHintWhenFailingToCreateQueryIfSelectingFromSourceNameWithMisspelling() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream \"Bar\" as select * from test1;", ksqlConfig, Collections.emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "select * from \"bAr\";", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(is( "Exception while preparing statement: bAr does not exist.\n" + "Did you mean \"Bar\"? Hint: wrap the source name in double quotes to make it case-sensitive."))); assertThat(e, statementText(is("select * from \"bAr\";"))); }
@Override protected void handleTcClientConnectRequest(CommandTcClientConnectRequest command) { checkArgument(state == State.Connected); final long requestId = command.getRequestId(); final TransactionCoordinatorID tcId = TransactionCoordinatorID.get(command.getTcId()); if (log.isDebugEnabled()) { log.debug("Receive tc client connect request {} to transaction meta store {} from {}.", requestId, tcId, remoteAddress); } if (!checkTransactionEnableAndSendError(requestId)) { return; } TransactionMetadataStoreService transactionMetadataStoreService = service.pulsar().getTransactionMetadataStoreService(); transactionMetadataStoreService.handleTcClientConnect(tcId).thenAccept(connection -> { if (log.isDebugEnabled()) { log.debug("Handle tc client connect request {} to transaction meta store {} from {} success.", requestId, tcId, remoteAddress); } commandSender.sendTcClientConnectResponse(requestId); }).exceptionally(e -> { log.error("Handle tc client connect request {} to transaction meta store {} from {} fail.", requestId, tcId, remoteAddress, e.getCause()); commandSender.sendTcClientConnectResponse(requestId, BrokerServiceException.getClientErrorCode(e), e.getMessage()); return null; }); }
@Test(expectedExceptions = IllegalArgumentException.class) public void shouldFailHandleTcClientConnectRequest() throws Exception { ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS); Field stateUpdater = ServerCnx.class.getDeclaredField("state"); stateUpdater.setAccessible(true); stateUpdater.set(serverCnx, ServerCnx.State.Failed); serverCnx.handleTcClientConnectRequest(any()); }
@Override public Service queryService(String serviceName) throws NacosException { return queryService(serviceName, Constants.DEFAULT_GROUP); }
@Test void testQueryService1() throws NacosException { //given String serviceName = "service1"; String groupName = "group1"; //when nacosNamingMaintainService.queryService(serviceName, groupName); //then verify(serverProxy, times(1)).queryService(serviceName, groupName); }
@Override public UfsDirectoryStatus copy() { return new UfsDirectoryStatus(this); }
@Test public void copy() { short mode = 077; UfsDirectoryStatus statusToCopy = new UfsDirectoryStatus("name", "owner", "group", mode); UfsDirectoryStatus status = new UfsDirectoryStatus(statusToCopy); assertEquals(statusToCopy, status); }
protected void hideAllAfterModel(EpoxyModel<?> model) { hideModels(getAllModelsAfter(model)); }
@Test public void testHideAllAfterModel() { List<TestModel> models = new ArrayList<>(); int modelCount = 10; for (int i = 0; i < modelCount; i++) { TestModel model = new TestModel(); models.add(model); testAdapter.addModels(model); } int hideIndex = 5; testAdapter.hideAllAfterModel(models.get(hideIndex)); for (int i = hideIndex + 1; i < modelCount; i++) { verify(observer).onItemRangeChanged(i, 1, null); } for (int i = 0; i < modelCount; i++) { assertEquals(i <= hideIndex, models.get(i).isShown()); } checkDifferState(); }
public static Boolean judge(final ConditionData conditionData, final String realData) { if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) { return false; } PredicateJudge predicateJudge = newInstance(conditionData.getOperator()); if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) { return false; } return predicateJudge.judge(conditionData, realData); }
@Test public void testTimerBeforeJudge() { conditionData.setOperator(OperatorEnum.TIME_BEFORE.getAlias()); //Because the realData can't be empty, so the realDate must fill in the values conditionData.setParamValue(MAX_TIME); assertTrue(PredicateJudgeFactory.judge(conditionData, MAX_TIME)); conditionData.setParamValue(FIRST_TIME); assertFalse(PredicateJudgeFactory.judge(conditionData, MAX_TIME)); conditionData.setParamName(OperatorEnum.TIME_BEFORE.name()); conditionData.setParamValue(MAX_TIME); assertTrue(PredicateJudgeFactory.judge(conditionData, FIRST_TIME)); assertFalse(PredicateJudgeFactory.judge(conditionData, MAX_TIME)); }
public static FileOutputStream createForWrite(File f, int permissions) throws IOException { if (skipSecurity) { return insecureCreateForWrite(f, permissions); } else { return NativeIO.getCreateForWriteFileOutputStream(f, permissions); } }
@Test(timeout = 10000) public void testCreateForWrite() throws IOException { try { SecureIOUtils.createForWrite(testFilePathIs, 0777); fail("Was able to create file at " + testFilePathIs); } catch (SecureIOUtils.AlreadyExistsException aee) { // expected } }
public <T> T parse(String input, Class<T> cls) { return readFlow(input, cls, type(cls)); }
@Test void invalidTask() { ConstraintViolationException exception = assertThrows( ConstraintViolationException.class, () -> this.parse("flows/invalids/invalid-task.yaml") ); assertThat(exception.getConstraintViolations().size(), is(2)); assertThat(exception.getConstraintViolations().stream().filter(e -> e.getMessage().contains("Invalid type")).findFirst().orElseThrow().getMessage(), containsString("Invalid type: io.kestra.plugin.core.debug.MissingOne")); }
@Override public void updateLength(SizedWritable<?> key, SizedWritable<?> value) throws IOException { key.length = keySerializer.getLength(key.v); value.length = valueSerializer.getLength(value.v); return; }
@Test public void testUpdateLength() throws IOException { Mockito.mock(DataOutputStream.class); int kvLength = 0; for (int i = 0; i < inputArraySize; i++) { key.reset(inputArray[i].key); value.reset(inputArray[i].value); serializer.updateLength(key, value); // verify whether the size increase Assert.assertTrue(key.length + value.length > kvLength); kvLength = key.length + value.length; } }
@Override public Ring<T> createRing(Map<T, Integer> pointsMap) { return _ringFactory.createRing(pointsMap); }
@Test(groups = { "small", "back-end" }) public void testRandomChangePoints() throws URISyntaxException { int pointNum = 5; int loopNum = 100; Map<String, Integer> pointsMp = buildPointsMap(pointNum); Map<String, Integer> maxPoints = new HashMap<>(pointNum); Random random = new Random(); for (String uri : pointsMp.keySet()) { maxPoints.put(uri, 100); } PointBasedConsistentHashRingFactory<String> ringFactory = new PointBasedConsistentHashRingFactory<>(new DegraderLoadBalancerStrategyConfig(1L)); Ring<String> ring = ringFactory.createRing(pointsMp); assertNotNull(ring.get(1000)); for (int i = 0; i < loopNum; ++i) { // new point list for (String uri : pointsMp.keySet()) { int newPoints = random.nextInt(200); if (newPoints == 0) { continue; } pointsMp.put(uri, newPoints); if (newPoints > maxPoints.get(uri)) { maxPoints.put(uri, ((newPoints + 3) / 4) * 4); } } ring = ringFactory.createRing(pointsMp); assertNotNull(ring.get(1000)); Map<String, List<Point<String>>> pointList = ringFactory.getPointsMap(); for (String uri : pointsMp.keySet()) { assertEquals ((int)maxPoints.get(uri), pointList.get(uri).size()); } } }
static Set<PipelineOptionSpec> getOptionSpecs( Class<? extends PipelineOptions> optionsInterface, boolean skipHidden) { Iterable<Method> methods = ReflectHelpers.getClosureOfMethodsOnInterface(optionsInterface); Multimap<String, Method> propsToGetters = getPropertyNamesToGetters(methods); ImmutableSet.Builder<PipelineOptionSpec> setBuilder = ImmutableSet.builder(); for (Map.Entry<String, Method> propAndGetter : propsToGetters.entries()) { String prop = propAndGetter.getKey(); Method getter = propAndGetter.getValue(); @SuppressWarnings("unchecked") Class<? extends PipelineOptions> declaringClass = (Class<? extends PipelineOptions>) getter.getDeclaringClass(); if (!PipelineOptions.class.isAssignableFrom(declaringClass)) { continue; } if (skipHidden && declaringClass.isAnnotationPresent(Hidden.class)) { continue; } setBuilder.add(PipelineOptionSpec.of(declaringClass, prop, getter)); } return setBuilder.build(); }
@Test public void testBaseClassOptions() { Set<PipelineOptionSpec> props = PipelineOptionsReflector.getOptionSpecs(ExtendsSimpleOptions.class, true); assertThat(props, hasItem(allOf(hasName("foo"), hasClass(SimpleOptions.class)))); assertThat(props, hasItem(allOf(hasName("foo"), hasClass(ExtendsSimpleOptions.class)))); assertThat(props, hasItem(allOf(hasName("bar"), hasClass(ExtendsSimpleOptions.class)))); }
public static String simpleTypeDescription(Type input) { StringBuilder builder = new StringBuilder(); format(builder, input); return builder.toString(); }
@Test public void testTypeFormatterOnClasses() throws Exception { assertEquals("Integer", ReflectHelpers.simpleTypeDescription(Integer.class)); assertEquals("int", ReflectHelpers.simpleTypeDescription(int.class)); assertEquals("Map", ReflectHelpers.simpleTypeDescription(Map.class)); assertEquals(getClass().getSimpleName(), ReflectHelpers.simpleTypeDescription(getClass())); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComPingPacket() { assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_PING, payload, connectionSession), instanceOf(MySQLComPingPacket.class)); }
@Override public void execute(Context context) { long count = 0; try (StreamWriter<ProjectDump.Rule> writer = dumpWriter.newStreamWriter(DumpElement.RULES)) { ProjectDump.Rule.Builder ruleBuilder = ProjectDump.Rule.newBuilder(); for (Rule rule : ruleRepository.getAll()) { ProjectDump.Rule ruleMessage = toRuleMessage(ruleBuilder, rule); writer.write(ruleMessage); count++; } LoggerFactory.getLogger(getClass()).debug("{} rules exported", count); } catch (Exception e) { throw new IllegalStateException(format("Rule Export failed after processing %d rules successfully", count), e); } }
@Test public void execute_writes_all_rules_in_order_returned_by_repository() { String[] keys = new String[10]; for (int i = 0; i < 10; i++) { String key = "key_" + i; ruleRepository.add(key); keys[i] = key; } underTest.execute(new TestComputationStepContext()); List<ProjectDump.Rule> rules = dumpWriter.getWrittenMessagesOf(DumpElement.RULES); assertThat(rules).extracting(ProjectDump.Rule::getKey).containsExactly(keys); assertThat(rules).extracting(ProjectDump.Rule::getRepository).containsOnly(REPOSITORY); }
@Override public void validatePostList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<PostDO> posts = postMapper.selectBatchIds(ids); Map<Long, PostDO> postMap = convertMap(posts, PostDO::getId); // 校验 ids.forEach(id -> { PostDO post = postMap.get(id); if (post == null) { throw exception(POST_NOT_FOUND); } if (!CommonStatusEnum.ENABLE.getStatus().equals(post.getStatus())) { throw exception(POST_NOT_ENABLE, post.getName()); } }); }
@Test public void testValidatePostList_notFound() { // 准备参数 List<Long> ids = singletonList(randomLongId()); // 调用, 并断言异常 assertServiceException(() -> postService.validatePostList(ids), POST_NOT_FOUND); }
@VisibleForTesting public static Domain getDomain(Type type, long rowCount, ColumnStatistics columnStatistics) { if (rowCount == 0) { return Domain.none(type); } if (columnStatistics == null) { return Domain.all(type); } if (columnStatistics.hasNumberOfValues() && columnStatistics.getNumberOfValues() == 0) { return Domain.onlyNull(type); } boolean hasNullValue = columnStatistics.getNumberOfValues() != rowCount; if (type.getJavaType() == boolean.class && columnStatistics.getBooleanStatistics() != null) { BooleanStatistics booleanStatistics = columnStatistics.getBooleanStatistics(); boolean hasTrueValues = (booleanStatistics.getTrueValueCount() != 0); boolean hasFalseValues = (columnStatistics.getNumberOfValues() != booleanStatistics.getTrueValueCount()); if (hasTrueValues && hasFalseValues) { return Domain.all(BOOLEAN); } if (hasTrueValues) { return Domain.create(ValueSet.of(BOOLEAN, true), hasNullValue); } if (hasFalseValues) { return Domain.create(ValueSet.of(BOOLEAN, false), hasNullValue); } } else if (isShortDecimal(type) && columnStatistics.getDecimalStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDecimalStatistics(), value -> rescale(value, (DecimalType) type).unscaledValue().longValue()); } else if (isLongDecimal(type) && columnStatistics.getDecimalStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDecimalStatistics(), value -> encodeUnscaledValue(rescale(value, (DecimalType) type).unscaledValue())); } else if (isCharType(type) && columnStatistics.getStringStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getStringStatistics(), value -> truncateToLengthAndTrimSpaces(value, type)); } else if (isVarcharType(type) && columnStatistics.getStringStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getStringStatistics()); } else if (type.getTypeSignature().getBase().equals(StandardTypes.DATE) && columnStatistics.getDateStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDateStatistics(), value -> (long) value); } else if (type.getJavaType() == long.class && columnStatistics.getIntegerStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getIntegerStatistics()); } else if (type.getJavaType() == double.class && columnStatistics.getDoubleStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDoubleStatistics()); } else if (REAL.equals(type) && columnStatistics.getDoubleStatistics() != null) { return createDomain(type, hasNullValue, columnStatistics.getDoubleStatistics(), value -> (long) floatToRawIntBits(value.floatValue())); } return Domain.create(ValueSet.all(type), hasNullValue); }
@Test public void testDouble() { assertEquals(getDomain(DOUBLE, 0, null), Domain.none(DOUBLE)); assertEquals(getDomain(DOUBLE, 10, null), Domain.all(DOUBLE)); assertEquals(getDomain(DOUBLE, 0, doubleColumnStats(null, null, null)), Domain.none(DOUBLE)); assertEquals(getDomain(DOUBLE, 0, doubleColumnStats(0L, null, null)), Domain.none(DOUBLE)); assertEquals(getDomain(DOUBLE, 0, doubleColumnStats(0L, 42.24, 42.24)), Domain.none(DOUBLE)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(0L, null, null)), onlyNull(DOUBLE)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(10L, null, null)), notNull(DOUBLE)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(10L, 42.24, 42.24)), singleValue(DOUBLE, 42.24)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(10L, 3.3, 42.24)), create(ValueSet.ofRanges(range(DOUBLE, 3.3, true, 42.24, true)), false)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(10L, null, 42.24)), create(ValueSet.ofRanges(lessThanOrEqual(DOUBLE, 42.24)), false)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(10L, 3.3, null)), create(ValueSet.ofRanges(greaterThanOrEqual(DOUBLE, 3.3)), false)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(5L, 3.3, 42.24)), create(ValueSet.ofRanges(range(DOUBLE, 3.3, true, 42.24, true)), true)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(5L, null, 42.24)), create(ValueSet.ofRanges(lessThanOrEqual(DOUBLE, 42.24)), true)); assertEquals(getDomain(DOUBLE, 10, doubleColumnStats(5L, 3.3, null)), create(ValueSet.ofRanges(greaterThanOrEqual(DOUBLE, 3.3)), true)); }
@ScalarOperator(ADD) @SqlType(StandardTypes.TINYINT) public static long add(@SqlType(StandardTypes.TINYINT) long left, @SqlType(StandardTypes.TINYINT) long right) { try { return SignedBytes.checkedCast(left + right); } catch (IllegalArgumentException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("tinyint addition overflow: %s + %s", left, right), e); } }
@Test public void testAdd() { assertFunction("TINYINT'37' + TINYINT'37'", TINYINT, (byte) (37 + 37)); assertFunction("TINYINT'37' + TINYINT'17'", TINYINT, (byte) (37 + 17)); assertFunction("TINYINT'17' + TINYINT'37'", TINYINT, (byte) (17 + 37)); assertFunction("TINYINT'17' + TINYINT'17'", TINYINT, (byte) (17 + 17)); assertNumericOverflow(format("TINYINT'%s' + TINYINT'1'", Byte.MAX_VALUE), "tinyint addition overflow: 127 + 1"); }
public void writeUbyte(int value) throws IOException { if (value < 0 || value > 0xFF) { throw new ExceptionWithContext("Unsigned byte value out of range: %d", value); } write(value); }
@Test(expected=ExceptionWithContext.class) public void testWriteUbyteOutOfBounds() throws IOException { writer.writeUbyte(-1); }
public int getInteger(HazelcastProperty property) { return Integer.parseInt(getString(property)); }
@Test public void testGet_whenFunctionAvailable_andNoOtherSettings() { Properties props = new Properties(); HazelcastProperty p = new HazelcastProperty("key", (Function<HazelcastProperties, Integer>) properties -> 23); HazelcastProperties properties = new HazelcastProperties(props); assertEquals(23, properties.getInteger(p)); }
@ConstantFunction(name = "years_sub", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true) public static ConstantOperator yearsSub(ConstantOperator date, ConstantOperator year) { return ConstantOperator.createDatetimeOrNull(date.getDatetime().minusYears(year.getInt())); }
@Test public void yearsSub() { assertEquals("2005-03-23T09:23:55", ScalarOperatorFunctions.yearsSub(O_DT_20150323_092355, O_INT_10).getDatetime().toString()); }
@Override public CompletionStage<V> putAsync(K key, V value) { return map.putAsync(key, value); }
@Test(expected = MethodNotAvailableException.class) public void testPutAsyncWithExpiryPolicy() { ExpiryPolicy expiryPolicy = new HazelcastExpiryPolicy(1, 1, 1, TimeUnit.MILLISECONDS); adapter.putAsync(42, "value", expiryPolicy); }
public static long encodeShortScaledValue(BigDecimal value, int scale) { checkArgument(scale >= 0); return value.setScale(scale, UNNECESSARY).unscaledValue().longValueExact(); }
@Test public void testEncodeShortScaledValue() { assertEquals(encodeShortScaledValue(new BigDecimal("2.00"), 2), 200L); assertEquals(encodeShortScaledValue(new BigDecimal("2.13"), 2), 213L); assertEquals(encodeShortScaledValue(new BigDecimal("172.60"), 2), 17260L); assertEquals(encodeShortScaledValue(new BigDecimal("2"), 2), 200L); assertEquals(encodeShortScaledValue(new BigDecimal("172.6"), 2), 17260L); assertEquals(encodeShortScaledValue(new BigDecimal("-2.00"), 2), -200L); assertEquals(encodeShortScaledValue(new BigDecimal("-2.13"), 2), -213L); assertEquals(encodeShortScaledValue(new BigDecimal("-2"), 2), -200L); }
public String buildFilename() { boolean forceSameOutputFile = "Y".equalsIgnoreCase( System.getProperty( Const.KETTLE_JSON_OUTPUT_FORCE_SAME_OUTPUT_FILE, "N" ) ); if ( forceSameOutputFile ) { return meta.buildFilename( environmentSubstitute( meta.getFileName() ), startProcessingDate ); } return meta.buildFilename( meta.getParentStepMeta().getParentTransMeta(), getCopy() + "", null, data.splitnr + "", false ); }
@Test public void testBuildFilenameWithoutForceSameOutputFile() { StepMeta stepMeta = mock( StepMeta.class ); JsonOutputData jsonOutputData = mock( JsonOutputData.class ); JsonOutputMeta jsonOutputMeta = mock( JsonOutputMeta.class ); int copyNr = 1; TransMeta transMeta = mock( TransMeta.class ); Trans trans = mock( Trans.class ); RowMetaInterface rowMetaInterface = mock( RowMetaInterface.class ); when( stepMeta.getName() ).thenReturn( UUID.randomUUID().toString() ); when( stepMeta.hasTerminator() ).thenReturn( false ); when( transMeta.findStep( any( String.class ) ) ).thenReturn( stepMeta ); when( stepMeta.getStepMetaInterface() ).thenReturn( jsonOutputMeta ); when( jsonOutputMeta.getParentStepMeta() ).thenReturn( stepMeta ); JsonOutput jsonOutput = spy( new JsonOutput( stepMeta, jsonOutputData, copyNr, transMeta, trans ) ); ReflectionTestUtils.setField( jsonOutput, "meta", jsonOutputMeta ); ReflectionTestUtils.setField( jsonOutput, "data", jsonOutputData ); System.setProperty( Const.KETTLE_JSON_OUTPUT_FORCE_SAME_OUTPUT_FILE, "N" ); jsonOutput.buildFilename(); verify( jsonOutputMeta, times( 0 ) ).buildFilename( anyString(), any( Date.class ) ); }
@Override public Time getTime(final int columnIndex) throws SQLException { return (Time) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, Time.class), Time.class); }
@Test void assertGetTimeAndCalendarWithColumnLabel() throws SQLException { Calendar calendar = Calendar.getInstance(); when(mergeResultSet.getCalendarValue(1, Time.class, calendar)).thenReturn(new Time(0L)); assertThat(shardingSphereResultSet.getTime("label", calendar), is(new Time(0L))); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); List<MemberInfo> memberInfoList = new ArrayList<>(); for (Map.Entry<String, Subscription> memberSubscription : subscriptions.entrySet()) { assignment.put(memberSubscription.getKey(), new ArrayList<>()); memberInfoList.add(new MemberInfo(memberSubscription.getKey(), memberSubscription.getValue().groupInstanceId())); } CircularIterator<MemberInfo> assigner = new CircularIterator<>(Utils.sorted(memberInfoList)); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek().memberId).topics().contains(topic)) assigner.next(); assignment.get(assigner.next().memberId).add(partition); } return assignment; }
@Test public void testMultipleConsumersMixedTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = setupPartitionsPerTopicWithTwoTopics(3, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); consumers.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic1, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertEquals(partitions(tp(topic1, 2)), assignment.get(consumer3)); }
public TaskRunScheduler getTaskRunScheduler() { return taskRunScheduler; }
@Test public void testTaskRunMergeRedundant1() { TaskRunManager taskRunManager = new TaskRunManager(); Task task = new Task("test"); task.setDefinition("select 1"); long taskId = 1; TaskRun taskRun1 = makeTaskRun(taskId, task, makeExecuteOption(true, false)); TaskRun taskRun2 = makeTaskRun(taskId, task, makeExecuteOption(true, true)); // If it's a sync refresh, no merge redundant anyway SubmitResult result = taskRunManager.submitTaskRun(taskRun1, taskRun1.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.SUBMITTED); result = taskRunManager.submitTaskRun(taskRun2, taskRun2.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.SUBMITTED); TaskRunScheduler taskRunScheduler = taskRunManager.getTaskRunScheduler(); Collection<TaskRun> taskRuns = taskRunScheduler.getPendingTaskRunsByTaskId(taskId); Assert.assertTrue(taskRuns != null); Assert.assertEquals(2, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(2, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); // If it's a sync refresh, no merge redundant anyway TaskRun taskRun3 = makeTaskRun(taskId, task, makeExecuteOption(false, true)); result = taskRunManager.submitTaskRun(taskRun3, taskRun3.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.SUBMITTED); Assert.assertEquals(3, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(3, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); // merge it TaskRun taskRun4 = makeTaskRun(taskId, task, makeExecuteOption(true, false)); result = taskRunManager.submitTaskRun(taskRun4, taskRun4.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.SUBMITTED); Assert.assertEquals(3, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(3, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); // no merge it TaskRun taskRun5 = makeTaskRun(taskId, task, makeExecuteOption(false, false)); result = taskRunManager.submitTaskRun(taskRun5, taskRun5.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.SUBMITTED); Assert.assertEquals(4, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(4, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); for (int i = 4; i < Config.task_runs_queue_length; i++) { TaskRun taskRun = makeTaskRun(taskId, task, makeExecuteOption(false, false)); result = taskRunManager.submitTaskRun(taskRun, taskRun.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.SUBMITTED); Assert.assertEquals(i + 1, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(i + 1, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); } // no assign it: exceed queue's size TaskRun taskRun6 = makeTaskRun(taskId, task, makeExecuteOption(false, false)); result = taskRunManager.submitTaskRun(taskRun6, taskRun6.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.REJECTED); Assert.assertEquals(Config.task_runs_queue_length, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(Config.task_runs_queue_length, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); // no assign it: exceed queue's size TaskRun taskRun7 = makeTaskRun(taskId, task, makeExecuteOption(false, false)); result = taskRunManager.submitTaskRun(taskRun7, taskRun7.getExecuteOption()); Assert.assertTrue(result.getStatus() == SubmitResult.SubmitStatus.REJECTED); Assert.assertEquals(Config.task_runs_queue_length, taskRunScheduler.getPendingQueueCount()); Assert.assertEquals(Config.task_runs_queue_length, taskRunScheduler.getPendingTaskRunsByTaskId(taskId).size()); }
@Override public GetLabelsToNodesResponse getLabelsToNodes( GetLabelsToNodesRequest request) throws YarnException, IOException { if (request == null) { routerMetrics.incrLabelsToNodesFailedRetrieved(); String msg = "Missing getNodesToLabels request."; RouterAuditLogger.logFailure(user.getShortUserName(), GET_LABELSTONODES, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, null); } long startTime = clock.getTime(); ClientMethod remoteMethod = new ClientMethod("getLabelsToNodes", new Class[] {GetLabelsToNodesRequest.class}, new Object[] {request}); Collection<GetLabelsToNodesResponse> labelNodes = null; try { labelNodes = invokeConcurrent(remoteMethod, GetLabelsToNodesResponse.class); } catch (Exception ex) { routerMetrics.incrLabelsToNodesFailedRetrieved(); String msg = "Unable to get label node due to exception."; RouterAuditLogger.logFailure(user.getShortUserName(), GET_LABELSTONODES, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, ex); } long stopTime = clock.getTime(); routerMetrics.succeededGetLabelsToNodesRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), GET_LABELSTONODES, TARGET_CLIENT_RM_SERVICE); // Merge the LabelsToNodesResponse return RouterYarnClientUtils.mergeLabelsToNodes(labelNodes); }
@Test public void testGetLabelsToNodesRequest() throws Exception { LOG.info("Test FederationClientInterceptor : Get Labels To Node request."); // null request LambdaTestUtils.intercept(YarnException.class, "Missing getLabelsToNodes request.", () -> interceptor.getLabelsToNodes(null)); // normal request. GetLabelsToNodesResponse response = interceptor.getLabelsToNodes(GetLabelsToNodesRequest.newInstance()); Assert.assertEquals(0, response.getLabelsToNodes().size()); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestStartRecording2() { internalEncodeLogHeader(buffer, 0, 32, 64, () -> 5_600_000_000L); final StartRecordingRequest2Encoder requestEncoder = new StartRecordingRequest2Encoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(5) .correlationId(13) .streamId(7) .sourceLocation(SourceLocation.REMOTE) .autoStop(BooleanType.TRUE) .channel("foo"); dissectControlRequest(CMD_IN_START_RECORDING2, buffer, 0, builder); assertEquals("[5.600000000] " + CONTEXT + ": " + CMD_IN_START_RECORDING2.name() + " [32/64]:" + " controlSessionId=5" + " correlationId=13" + " streamId=7" + " sourceLocation=" + SourceLocation.REMOTE + " autoStop=" + BooleanType.TRUE + " channel=foo", builder.toString()); }
public static boolean deleteQuietly(@Nullable File file) { if (file == null) { return false; } return deleteQuietly(file.toPath()); }
@Test public void deleteQuietly_deletes_directory_and_content() throws IOException { Path target = temporaryFolder.newFolder().toPath(); Path childFile1 = Files.createFile(target.resolve("file1.txt")); Path childDir1 = Files.createDirectory(target.resolve("subDir1")); Path childFile2 = Files.createFile(childDir1.resolve("file2.txt")); Path childDir2 = Files.createDirectory(childDir1.resolve("subDir2")); assertThat(target).isDirectory(); assertThat(childFile1).isRegularFile(); assertThat(childDir1).isDirectory(); assertThat(childFile2).isRegularFile(); assertThat(childDir2).isDirectory(); FileUtils.deleteQuietly(target.toFile()); assertThat(target).doesNotExist(); assertThat(childFile1).doesNotExist(); assertThat(childDir1).doesNotExist(); assertThat(childFile2).doesNotExist(); assertThat(childDir2).doesNotExist(); }
public synchronized Schema create(URI id, String refFragmentPathDelimiters) { URI normalizedId = id.normalize(); if (!schemas.containsKey(normalizedId)) { URI baseId = removeFragment(id).normalize(); if (!schemas.containsKey(baseId)) { logger.debug("Reading schema: " + baseId); final JsonNode baseContent = contentResolver.resolve(baseId); schemas.put(baseId, new Schema(baseId, baseContent, null)); } final Schema baseSchema = schemas.get(baseId); if (normalizedId.toString().contains("#")) { JsonNode childContent = fragmentResolver.resolve(baseSchema.getContent(), '#' + id.getFragment(), refFragmentPathDelimiters); schemas.put(normalizedId, new Schema(normalizedId, childContent, baseSchema)); } } return schemas.get(normalizedId); }
@Test public void createWithFragmentResolution() throws URISyntaxException { URI addressSchemaUri = getClass().getResource("/schema/address.json").toURI(); SchemaStore schemaStore = new SchemaStore(); Schema addressSchema = schemaStore.create(addressSchemaUri, "#/."); Schema innerSchema = schemaStore.create(addressSchema, "#/properties/post-office-box", "#/."); String expectedUri = addressSchemaUri.toString() + "#/properties/post-office-box"; assertThat(innerSchema, is(notNullValue())); assertThat(innerSchema.getId(), is(equalTo(URI.create(expectedUri)))); assertThat(innerSchema.getContent().has("type"), is(true)); assertThat(innerSchema.getContent().get("type").asText(), is("string")); }
protected T removeMin() { if (tail == null) { return null; } size--; count -= tail.count; T minElement = tail.element; tail = tail.prev; if (tail != null) { tail.next = null; } sampleMap.remove(minElement); return minElement; }
@Test public void testRemoveMin() { // Empty set assertNull(set.removeMin()); assertEquals(0, set.size()); assertEquals(0L, set.count()); // Maintaining order set.put(e[0]); for (int i = 0; i < 2; i++) { set.put(e[1]); } for (int i = 0; i < 3; i++) { set.put(e[2]); } assertEquals(3, set.size()); assertEquals(6L, set.count()); assertEquals(e[0], set.removeMin()); assertEquals(2, set.size()); assertEquals(5L, set.count()); assertEquals(e[1], set.removeMin()); assertEquals(1, set.size()); assertEquals(3L, set.count()); assertEquals(e[2], set.removeMin()); assertEquals(0, set.size()); assertEquals(0L, set.count()); assertEquals(null, set.removeMin()); assertEquals(0, set.size()); assertEquals(0L, set.count()); }
public void setCve(List<String> cve) { this.cve = cve; }
@Test @SuppressWarnings("squid:S2699") public void testSetCve() { //already tested, this is just left so the IDE doesn't recreate it. }
@Override public ResultSet getTables(Connection connection, String dbName) throws SQLException { return connection.getMetaData().getTables(connection.getCatalog(), dbName, null, new String[] {"TABLE", "VIEW", "MATERIALIZED VIEW", "FOREIGN TABLE"}); }
@Test public void testListTableNames() throws SQLException { new Expectations() { { dataSource.getConnection(); result = connection; minTimes = 0; connection.getCatalog(); result = "t1"; minTimes = 0; connection.getMetaData().getTables("t1", "test", null, new String[] {"TABLE", "VIEW", "MATERIALIZED VIEW", "FOREIGN TABLE"}); result = tableResult; minTimes = 0; } }; try { JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); List<String> result = jdbcMetadata.listTableNames("test"); List<String> expectResult = Lists.newArrayList("tbl1", "tbl2", "tbl3"); Assert.assertEquals(expectResult, result); } catch (Exception e) { Assert.fail(); } }
public static Serializer getSerializer(String type) { return SERIALIZER_MAP.get(type.toLowerCase()); }
@Test void testGetSerializer() { Serializer serializer = SerializeFactory.getSerializer("JSON"); assertTrue(serializer instanceof JacksonSerializer); }
long loadFSEdits(EditLogInputStream edits, long expectedStartingTxId) throws IOException { return loadFSEdits(edits, expectedStartingTxId, Long.MAX_VALUE, null, null); }
@Test public void testLoadFSEditLogThrottling() throws Exception { FSNamesystem namesystem = mock(FSNamesystem.class); namesystem.dir = mock(FSDirectory.class); FakeTimer timer = new FakeTimer(); FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0, timer); FSEditLogLoader.LOAD_EDITS_LOG_HELPER.reset(); LogCapturer capture = LogCapturer.captureLogs(FSImage.LOG); loader.loadFSEdits(getFakeEditLogInputStream(1, 10), 1); assertTrue(capture.getOutput().contains("Start loading edits file " + FAKE_EDIT_STREAM_NAME)); assertTrue(capture.getOutput().contains("Loaded 1 edits file(s)")); assertFalse(capture.getOutput().contains("suppressed")); timer.advance(FSEditLogLoader.LOAD_EDIT_LOG_INTERVAL_MS / 2); capture.clearOutput(); loader.loadFSEdits(getFakeEditLogInputStream(11, 20), 11); assertFalse(capture.getOutput().contains("Start loading edits file")); assertFalse(capture.getOutput().contains("edits file(s)")); timer.advance(FSEditLogLoader.LOAD_EDIT_LOG_INTERVAL_MS); capture.clearOutput(); loader.loadFSEdits(getFakeEditLogInputStream(21, 30), 21); assertTrue(capture.getOutput().contains("Start loading edits file " + FAKE_EDIT_STREAM_NAME)); assertTrue(capture.getOutput().contains("suppressed logging 1 times")); assertTrue(capture.getOutput().contains("Loaded 2 edits file(s)")); assertTrue(capture.getOutput().contains("total size 2.0")); }
@Override public ListenableFuture<HttpResponse> sendAsync(HttpRequest httpRequest) { return sendAsync(httpRequest, null); }
@Test public void sendAsync_whenRequestFailed_returnsFutureWithException() { ListenableFuture<HttpResponse> responseFuture = httpClient.sendAsync(get("http://unknownhost/path").withEmptyHeaders().build()); ExecutionException ex = assertThrows(ExecutionException.class, responseFuture::get); assertThat(ex).hasCauseThat().isInstanceOf(IOException.class); }
public static boolean notMarkedWithNoAutoStart(Object o) { if (o == null) { return false; } Class<?> clazz = o.getClass(); NoAutoStart a = findAnnotation(clazz, NoAutoStart.class); return a == null; }
@Test public void commonObject() { Object o = new Object(); assertTrue(NoAutoStartUtil.notMarkedWithNoAutoStart(o)); }
public void encodeAndOwn(byte[] value, OutputStream outStream, Context context) throws IOException, CoderException { if (!context.isWholeStream) { VarInt.encode(value.length, outStream); outStream.write(value); } else { if (outStream instanceof ExposedByteArrayOutputStream) { ((ExposedByteArrayOutputStream) outStream).writeAndOwn(value); } else { outStream.write(value); } } }
@Test public void testEncodeAndOwn() throws Exception { for (byte[] value : TEST_VALUES) { byte[] encodedSlow = CoderUtils.encodeToByteArray(TEST_CODER, value); byte[] encodedFast = encodeToByteArrayAndOwn(TEST_CODER, value); assertThat(encodedSlow, equalTo(encodedFast)); } }
@Override public boolean hasMagicHeader() { switch (super.getVersion()) { case DEFAULT_VERSION: return true; default: return true; } }
@Test public void testHasMagicHeader() { assertTrue(verDefault.hasMagicHeader()); assertTrue(verCurrent.hasMagicHeader()); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback connectionCallback) throws BackgroundException { final MantaHttpHeaders headers = new MantaHttpHeaders(); try { try { if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); headers.setByteRange(range.getStart(), range.getEnd() < 0 ? null : range.getEnd()); } // Requesting an empty file as an InputStream doesn't work, but we also don't want to // perform a HEAD request for every read so we'll opt to handle the exception instead // see https://github.com/joyent/java-manta/issues/248 return session.getClient().getAsInputStream(file.getAbsolute(), headers); } catch(UnsupportedOperationException e) { final MantaObject probablyEmptyFile = session.getClient().head(file.getAbsolute()); if(probablyEmptyFile.getContentLength() != 0) { throw new AccessDeniedException(); } return new NullInputStream(0L); } } catch(MantaException e) { throw new MantaExceptionMappingService().map("Download {0} failed", e, file); } catch(MantaClientHttpResponseException e) { throw new MantaHttpExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test(expected = NotfoundException.class) public void testReadNotFound() throws Exception { final TransferStatus status = new TransferStatus(); try { final Path drive = new MantaDirectoryFeature(session).mkdir(randomDirectory(), new TransferStatus()); new MantaReadFeature(session).read(new Path(drive, "nosuchname", EnumSet.of(Path.Type.file)), status, new DisabledConnectionCallback()); } catch(NotfoundException e) { assertEquals("Not Found. Please contact your web hosting service provider for assistance.", e.getDetail()); throw e; } }
@Override public Object collect() { return value; }
@Test public void test_default() { MinSqlAggregation aggregation = new MinSqlAggregation(); assertThat(aggregation.collect()).isNull(); }
@Override public ReservationListResponse listReservations( ReservationListRequest requestInfo) throws YarnException, IOException { // Check if reservation system is enabled checkReservationSystem(); ReservationListResponse response = recordFactory.newRecordInstance(ReservationListResponse.class); Plan plan = rValidator.validateReservationListRequest( reservationSystem, requestInfo); boolean includeResourceAllocations = requestInfo .getIncludeResourceAllocations(); ReservationId reservationId = null; if (requestInfo.getReservationId() != null && !requestInfo .getReservationId().isEmpty()) { reservationId = ReservationId.parseReservationId( requestInfo.getReservationId()); } checkReservationACLs(requestInfo.getQueue(), AuditConstants.LIST_RESERVATION_REQUEST, reservationId); long startTime = Math.max(requestInfo.getStartTime(), 0); long endTime = requestInfo.getEndTime() <= -1? Long.MAX_VALUE : requestInfo .getEndTime(); Set<ReservationAllocation> reservations; reservations = plan.getReservations(reservationId, new ReservationInterval( startTime, endTime)); List<ReservationAllocationState> info = ReservationSystemUtil.convertAllocationsToReservationInfo( reservations, includeResourceAllocations); response.setReservationAllocationState(info); return response; }
@Test public void testListReservationsByReservationId() { resourceManager = setupResourceManager(); ClientRMService clientService = resourceManager.getClientRMService(); Clock clock = new UTCClock(); long arrival = clock.getTime(); long duration = 60000; long deadline = (long) (arrival + 1.05 * duration); ReservationSubmissionRequest sRequest = submitReservationTestHelper(clientService, arrival, deadline, duration); ReservationId reservationID = sRequest.getReservationId(); ReservationListResponse response = null; ReservationListRequest request = ReservationListRequest.newInstance( ReservationSystemTestUtil.reservationQ, reservationID.toString(), -1, -1, false); try { response = clientService.listReservations(request); } catch (Exception e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(response); Assert.assertEquals(1, response.getReservationAllocationState().size()); Assert.assertEquals(response.getReservationAllocationState().get(0) .getReservationId().getId(), reservationID.getId()); Assert.assertEquals(response.getReservationAllocationState().get(0) .getResourceAllocationRequests().size(), 0); }
public Map<String, Object> statusStorageTopicSettings() { return topicSettings(STATUS_STORAGE_PREFIX); }
@Test public void shouldAllowSettingStatusTopicSettings() { Map<String, String> topicSettings = new HashMap<>(); topicSettings.put("foo", "foo value"); topicSettings.put("bar", "bar value"); topicSettings.put("baz.bim", "100"); Map<String, String> settings = configs(); topicSettings.forEach((k, v) -> settings.put(DistributedConfig.STATUS_STORAGE_PREFIX + k, v)); DistributedConfig config = new DistributedConfig(settings); assertEquals(topicSettings, config.statusStorageTopicSettings()); }
static Object parseCell(String cell, Schema.Field field) { Schema.FieldType fieldType = field.getType(); try { switch (fieldType.getTypeName()) { case STRING: return cell; case INT16: return Short.parseShort(cell); case INT32: return Integer.parseInt(cell); case INT64: return Long.parseLong(cell); case BOOLEAN: return Boolean.parseBoolean(cell); case BYTE: return Byte.parseByte(cell); case DECIMAL: return new BigDecimal(cell); case DOUBLE: return Double.parseDouble(cell); case FLOAT: return Float.parseFloat(cell); case DATETIME: return Instant.parse(cell); default: throw new UnsupportedOperationException( "Unsupported type: " + fieldType + ", consider using withCustomRecordParsing"); } } catch (IllegalArgumentException e) { throw new IllegalArgumentException( e.getMessage() + " field " + field.getName() + " was received -- type mismatch"); } }
@Test public void givenFloatWithSurroundingSpaces_parses() { Float floatNum = Float.parseFloat("3.141592"); DefaultMapEntry cellToExpectedValue = new DefaultMapEntry(" 3.141592 ", floatNum); Schema schema = Schema.builder() .addFloatField("a_float") .addInt32Field("an_integer") .addStringField("a_string") .build(); assertEquals( cellToExpectedValue.getValue(), CsvIOParseHelpers.parseCell( cellToExpectedValue.getKey().toString(), schema.getField("a_float"))); }
public B module(ModuleConfig module) { this.module = module; return getThis(); }
@Test void module() { ModuleConfig moduleConfig = new ModuleConfig(); InterfaceBuilder builder = new InterfaceBuilder(); builder.module(moduleConfig); Assertions.assertEquals(moduleConfig, builder.build().getModule()); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { SQLStatement dalStatement = sqlStatementContext.getSqlStatement(); if (dalStatement instanceof MySQLShowDatabasesStatement) { return new LocalDataMergedResult(Collections.singleton(new LocalDataQueryResultRow(databaseName))); } ShardingSphereSchema schema = getSchema(sqlStatementContext, database); if (dalStatement instanceof MySQLShowTablesStatement) { return new LogicTablesMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowTableStatusStatement) { return new ShowTableStatusMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowIndexStatement) { return new ShowIndexMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } if (dalStatement instanceof MySQLShowCreateTableStatement) { return new ShowCreateTableMergedResult(shardingRule, sqlStatementContext, schema, queryResults); } return new TransparentMergedResult(queryResults.get(0)); }
@Test void assertMergeForShowOtherStatement() throws SQLException { DALStatement dalStatement = new MySQLShowOtherStatement(); SQLStatementContext sqlStatementContext = mockSQLStatementContext(dalStatement); ShardingDALResultMerger resultMerger = new ShardingDALResultMerger(DefaultDatabase.LOGIC_NAME, null); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getName()).thenReturn(DefaultDatabase.LOGIC_NAME); assertThat(resultMerger.merge(queryResults, sqlStatementContext, database, mock(ConnectionContext.class)), instanceOf(TransparentMergedResult.class)); }
public static void localRunnerNotification(JobConf conf, JobStatus status) { JobEndStatusInfo notification = createNotification(conf, status); if (notification != null) { do { try { int code = httpNotification(notification.getUri(), notification.getTimeout()); if (code != 200) { throw new IOException("Invalid response status code: " + code); } else { break; } } catch (IOException ioex) { LOG.error("Notification error [" + notification.getUri() + "]", ioex); } catch (Exception ex) { LOG.error("Notification error [" + notification.getUri() + "]", ex); } try { Thread.sleep(notification.getRetryInterval()); } catch (InterruptedException iex) { LOG.error("Notification retry error [" + notification + "]", iex); } } while (notification.configureForRetry()); } }
@Test public void testLocalJobRunnerUriSubstitution() throws InterruptedException { JobStatus jobStatus = createTestJobStatus( "job_20130313155005308_0001", JobStatus.SUCCEEDED); JobConf jobConf = createTestJobConf( new Configuration(), 0, baseUrl + "jobend?jobid=$jobId&status=$jobStatus"); JobEndNotifier.localRunnerNotification(jobConf, jobStatus); // No need to wait for the notification to go through since calls are // synchronous // Validate params assertEquals(1, JobEndServlet.calledTimes); assertEquals("jobid=job_20130313155005308_0001&status=SUCCEEDED", JobEndServlet.requestUri.getQuery()); }
@Override public void updateSensitiveWord(SensitiveWordSaveVO updateReqVO) { // 校验唯一性 validateSensitiveWordExists(updateReqVO.getId()); validateSensitiveWordNameUnique(updateReqVO.getId(), updateReqVO.getName()); // 更新 SensitiveWordDO updateObj = BeanUtils.toBean(updateReqVO, SensitiveWordDO.class); sensitiveWordMapper.updateById(updateObj); // 刷新缓存 initLocalCache(); }
@Test public void testUpdateSensitiveWord_notExists() { // 准备参数 SensitiveWordSaveVO reqVO = randomPojo(SensitiveWordSaveVO.class); // 调用, 并断言异常 assertServiceException(() -> sensitiveWordService.updateSensitiveWord(reqVO), SENSITIVE_WORD_NOT_EXISTS); }
@Override public String getSessionId() { return sessionID; }
@Test public void testEditConfigRequestWithChunkedFraming() { log.info("Starting edit-config async"); assertNotNull("Incorrect sessionId", session3.getSessionId()); try { assertTrue("NETCONF edit-config command failed", session3.editConfig(RUNNING, null, SAMPLE_REQUEST)); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF edit-config test failed: " + e.getMessage()); } log.info("Finishing edit-config async"); }
@Override public int hashCode() { return sql.hashCode(); }
@Test public void testHashCode() { SqlPredicate sql = new SqlPredicate("foo='bar'"); assertEquals("foo='bar'".hashCode(), sql.hashCode()); }
@Override public long getAndDecrement(K key) { return complete(asyncCounterMap.getAndDecrement(key)); }
@Test public void testGetAndDecrement() { atomicCounterMap.put(KEY1, VALUE1); Long beforeDecrement = atomicCounterMap.getAndDecrement(KEY1); assertThat(beforeDecrement, is(VALUE1)); Long afterDecrement = atomicCounterMap.get(KEY1); assertThat(afterDecrement, is(VALUE1 - 1)); }
void generateDecodeCodeForAMessage(Map<String, MessageDecoderMethod> msgDecodeCode, Queue<Descriptors.Descriptor> queue, Set<String> fieldsToRead) { Descriptors.Descriptor descriptor = queue.remove(); String fullyQualifiedMsgName = ProtoBufUtils.getFullJavaName(descriptor); int varNum = 1; if (msgDecodeCode.containsKey(fullyQualifiedMsgName)) { return; } StringBuilder code = new StringBuilder(); String methodNameOfDecoder = getDecoderMethodName(fullyQualifiedMsgName); int indent = 1; // Creates decoder method for a message type. Example method signature: // public static Map<String, Object> decodeSample_SampleRecordMessage(Sample.SampleRecord msg) code.append(addIndent( String.format("public static Map<String, Object> %s(%s msg) {", methodNameOfDecoder, fullyQualifiedMsgName), indent)); code.append(completeLine("Map<String, Object> msgMap = new HashMap<>()", ++indent)); List<Descriptors.FieldDescriptor> descriptorsToDerive = new ArrayList<>(); if (fieldsToRead != null && !fieldsToRead.isEmpty()) { for (String fieldName: fieldsToRead.stream().sorted().collect(Collectors.toList())) { if (null == descriptor.findFieldByName(fieldName)) { LOGGER.debug("Field " + fieldName + " not found in the descriptor"); } else { descriptorsToDerive.add(descriptor.findFieldByName(fieldName)); } } } else { descriptorsToDerive = descriptor.getFields(); } for (Descriptors.FieldDescriptor desc : descriptorsToDerive) { Descriptors.FieldDescriptor.Type type = desc.getType(); String fieldNameInCode = ProtobufInternalUtils.underScoreToCamelCase(desc.getName(), true); switch (type) { case STRING: case INT32: case INT64: case UINT64: case FIXED64: case FIXED32: case UINT32: case SFIXED32: case SFIXED64: case SINT32: case SINT64: case DOUBLE: case FLOAT: /* Generate code for scalar field extraction Example: If field has presence if (msg.hasEmail()) { msgMap.put("email", msg.getEmail()); } OR if no presence: msgMap.put("email", msg.getEmail()); OR if repeated: if (msg.getEmailCount() > 0) { msgMap.put("email", msg.getEmailList().toArray()); } */ code.append(codeForScalarFieldExtraction(desc, fieldNameInCode, indent)); break; case BOOL: /* Generate code for boolean field extraction Example: If field has presence if (msg.hasIsRegistered()) { msgMap.put("is_registered", String.valueOf(msg.getIsRegistered())); } OR if no presence: msgMap.put("is_registered", String.valueOf(msg.getIsRegistered())); OR if repeated: List<Object> list1 = new ArrayList<>(); for (String row: msg.getIsRegisteredList()) { list3.add(String.valueOf(row)); } if (!list1.isEmpty()) { msgMap.put("is_registered", list1.toArray()); } */ code.append(codeForComplexFieldExtraction( desc, fieldNameInCode, "String", indent, ++varNum, "String.valueOf", "")); break; case BYTES: /* Generate code for bytes field extraction Example: If field has presence if (msg.hasEmail()) { msgMap.put("email", msg.getEmail().toByteArray()); } OR if no presence: msgMap.put("email", msg.getEmail().toByteArray()); OR if repeated: List<Object> list1 = new ArrayList<>(); for (com.google.protobuf.ByteString row: msg.getEmailList()) { list1.add(row.toByteArray()); } if (!list1.isEmpty()) { msgMap.put("email", list1.toArray()); } */ code.append(codeForComplexFieldExtraction( desc, fieldNameInCode, "com.google.protobuf.ByteString", indent, ++varNum, "", ".toByteArray()")); break; case ENUM: /* Generate code for enum field extraction Example: If field has presence if (msg.hasStatus()) { msgMap.put("status", msg.getStatus().name()); } OR if no presence: msgMap.put("status", msg.getStatus().name()); OR if repeated: List<Object> list1 = new ArrayList<>(); for (Status row: msg.getStatusList()) { list1.add(row.name()); } if (!list1.isEmpty()) { msgMap.put("status", list1.toArray()); } */ code.append(codeForComplexFieldExtraction( desc, fieldNameInCode, ProtoBufUtils.getFullJavaNameForEnum(desc.getEnumType()), indent, ++varNum, "", ".name()")); break; case MESSAGE: String messageType = ProtoBufUtils.getFullJavaName(desc.getMessageType()); if (desc.isMapField()) { // Generated code for Map extraction. The key for the map is always a scalar object in Protobuf. Descriptors.FieldDescriptor valueDescriptor = desc.getMessageType().findFieldByName("value"); if (valueDescriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE) { /* Generate code for map field extraction if the value type is a message Example: If field has presence if (msg.hasComplexMap()) { Map<Object, Map<String, Object>> map1 = new HashMap<>(); for (Map.Entry<String, ComplexTypes.TestMessage.NestedMessage> entry: msg.getComplexMapMap() .entrySet()) { map1.put(entry.getKey(), decodeComplexTypes_TestMessage_NestedMessageMessage(entry.getValue())); } msgMap.put("complex_map", map1); } OR if no presence: Map<Object, Map<String, Object>> map1 = new HashMap<>(); for (Map.Entry<String, ComplexTypes.TestMessage.NestedMessage> entry: msg.getComplexMapMap().entrySet()) { map1.put(entry.getKey(), decodeComplexTypes_TestMessage_NestedMessageMessage(entry.getValue())); } msgMap.put("complex_map", map1); */ String valueDescClassName = ProtoBufUtils.getFullJavaName(valueDescriptor.getMessageType()); if (!msgDecodeCode.containsKey(valueDescClassName)) { queue.add(valueDescriptor.getMessageType()); } code.append(codeForMapWithValueMessageType(desc, fieldNameInCode, valueDescClassName, indent, varNum)); break; } else { /* Generate code for map field extraction if the value type is a scalar msgMap.put("simple_map", msg.getSimpleMapMap()); */ code.append(completeLine(putFieldInMsgMapCode(desc.getName(), getProtoFieldMethodName(fieldNameInCode + "Map"), null, null), indent)); } } else { if (!msgDecodeCode.containsKey(messageType)) { queue.add(desc.getMessageType()); } code.append(codeForComplexFieldExtraction(desc, fieldNameInCode, messageType, indent, ++varNum, getDecoderMethodName(messageType), "")); } break; default: LOGGER.error(String.format("Protobuf type %s is not supported by pinot yet. Skipping this field %s", type, desc.getName())); break; } } code.append(completeLine("return msgMap", indent)); code.append(addIndent("}", --indent)); msgDecodeCode.put(fullyQualifiedMsgName, new MessageDecoderMethod(methodNameOfDecoder, code.toString())); }
@Test public void testGenerateDecodeCodeForAMessageForOnlySomeFieldsToRead() throws URISyntaxException, IOException { MessageCodeGen messageCodeGen = new MessageCodeGen(); Queue<Descriptors.Descriptor> queue = new ArrayDeque<>(); Map<String, MessageCodeGen.MessageDecoderMethod> msgDecodeCode = new HashMap<>(); Set<String> fieldsToRead = Set.of(STRING_FIELD, COMPLEX_MAP, REPEATED_NESTED_MESSAGES, REPEATED_BYTES, NULLABLE_DOUBLE_FIELD); queue.add(ComplexTypes.TestMessage.getDescriptor()); messageCodeGen.generateDecodeCodeForAMessage(msgDecodeCode, queue, fieldsToRead); Set<String> nameList = queue.stream() .map(Descriptors.Descriptor::getName) .collect(Collectors.toSet()); assertEquals(nameList, Set.of("NestedMessage")); assertEquals(msgDecodeCode.size(), 1); URL resource = getClass().getClassLoader().getResource("codegen_output/complex_type_some_method.txt"); String expectedCodeOutput = new String(Files.readAllBytes(Paths.get(resource.toURI()))); MessageCodeGen.MessageDecoderMethod messageDecoderMethod = msgDecodeCode.get("org.apache.pinot.plugin.inputformat.protobuf.ComplexTypes.TestMessage"); assertEquals(messageDecoderMethod.getCode(), expectedCodeOutput); assertEquals(messageDecoderMethod.getMethodName(), "decodeorg_apache_pinot_plugin_inputformat_protobuf_ComplexTypes_TestMessageMessage"); }
@VisibleForTesting DictTypeDO validateDictTypeExists(Long id) { if (id == null) { return null; } DictTypeDO dictType = dictTypeMapper.selectById(id); if (dictType == null) { throw exception(DICT_TYPE_NOT_EXISTS); } return dictType; }
@Test public void testValidateDictDataExists_success() { // mock 数据 DictTypeDO dbDictType = randomDictTypeDO(); dictTypeMapper.insert(dbDictType);// @Sql: 先插入出一条存在的数据 // 调用成功 dictTypeService.validateDictTypeExists(dbDictType.getId()); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotId() throws Exception { appendTwoSnapshots(); ScanContext scanContext = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_ID) .startSnapshotId(snapshot2.snapshotId()) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl(TABLE_RESOURCE.tableLoader().clone(), scanContext, null); ContinuousEnumerationResult initialResult = splitPlanner.planSplits(null); assertThat(initialResult.fromPosition()).isNull(); // For inclusive behavior of snapshot2, the initial result should point to snapshot1 (as // snapshot2's parent) assertThat(initialResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot1.snapshotId()); assertThat(initialResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot1.timestampMillis()); assertThat(initialResult.splits()).isEmpty(); ContinuousEnumerationResult secondResult = splitPlanner.planSplits(initialResult.toPosition()); assertThat(secondResult.fromPosition().snapshotId().longValue()) .isEqualTo(snapshot1.snapshotId()); assertThat(secondResult.fromPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot1.timestampMillis()); assertThat(secondResult.toPosition().snapshotId().longValue()) .isEqualTo(snapshot2.snapshotId()); assertThat(secondResult.toPosition().snapshotTimestampMs().longValue()) .isEqualTo(snapshot2.timestampMillis()); IcebergSourceSplit split = Iterables.getOnlyElement(secondResult.splits()); assertThat(split.task().files()).hasSize(1); Set<String> discoveredFiles = split.task().files().stream() .map(fileScanTask -> fileScanTask.file().path().toString()) .collect(Collectors.toSet()); // should discover dataFile2 appended in snapshot2 Set<String> expectedFiles = ImmutableSet.of(dataFile2.path().toString()); assertThat(discoveredFiles).containsExactlyElementsOf(expectedFiles); IcebergEnumeratorPosition lastPosition = secondResult.toPosition(); for (int i = 0; i < 3; ++i) { lastPosition = verifyOneCycle(splitPlanner, lastPosition).lastPosition; } }
@Override public long length() throws IOException { checkClosed(); return size; }
@Test void testPathConstructor() throws IOException, URISyntaxException { try (RandomAccessRead randomAccessSource = new RandomAccessReadMemoryMappedFile( Paths.get(getClass().getResource("RandomAccessReadFile1.txt").toURI()))) { assertEquals(130, randomAccessSource.length()); } }
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetBinaryProtocolValueWithMySQLTypeTiny() { assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.TINY), instanceOf(MySQLInt1BinaryProtocolValue.class)); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { if(containerService.isContainer(folder)) { final B2BucketResponse response = session.getClient().createBucket(containerService.getContainer(folder).getName(), null == status.getRegion() ? BucketType.valueOf(new B2BucketTypeFeature(session, fileid).getDefault().getIdentifier()) : BucketType.valueOf(status.getRegion())); final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.volume); return folder.withType(type).withAttributes(new B2AttributesFinderFeature(session, fileid).toAttributes(response)); } else { final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); return new B2TouchFeature(session, fileid).touch(folder.withType(type), status .withMime(MimeTypeService.DEFAULT_CONTENT_TYPE) .withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status))); } } catch(B2ApiException e) { throw new B2ExceptionMappingService(fileid).map("Cannot create folder {0}", e, folder); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot create folder {0}", e, folder); } }
@Test public void testCreateBucket() throws Exception { final Path bucket = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)); final B2VersionIdProvider fileid = new B2VersionIdProvider(session); final B2DirectoryFeature feature = new B2DirectoryFeature(session, fileid); assertTrue(feature.isSupported(bucket.getParent(), bucket.getName())); feature.mkdir(bucket, new TransferStatus()); assertThrows(ConflictException.class, () -> feature.mkdir(bucket, new TransferStatus())); new B2DeleteFeature(session, fileid).delete(Collections.singletonList(bucket), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@VisibleForTesting Object evaluate(final GenericRow row) { return term.getValue(new TermEvaluationContext(row)); }
@Test public void shouldEvaluateStruct() { // Given: final Expression expression1 = new CreateStructExpression( ImmutableList.of( new Field("A", new IntegerLiteral(10)), new Field("B", new StringLiteral("abc")) ) ); // When: InterpretedExpression interpreter1 = interpreter(expression1); // Then: assertThat(interpreter1.evaluate(ROW), is( new Struct(SchemaBuilder.struct().optional() .field("A", SchemaBuilder.int32().optional().build()) .field("B", SchemaBuilder.string().optional().build()) .build()) .put("A", 10) .put("B", "abc"))); }
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException { lock.lock(); try { checkArgument(!req.completed, () -> "given SendRequest has already been completed"); log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(), req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString()); // Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us // with the actual outputs that'll be used to gather the required amount of value. In this way, users // can customize coin selection policies. The call below will ignore immature coinbases and outputs // we don't have the keys for. List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW); // Connect (add a value amount) unconnected inputs List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs()); req.tx.clearInputs(); inputs.forEach(req.tx::addInput); // Warn if there are remaining unconnected inputs whose value we do not know // TODO: Consider throwing if there are inputs that we don't have a value for if (req.tx.getInputs().stream() .map(TransactionInput::getValue) .anyMatch(Objects::isNull)) log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee."); // If any inputs have already been added, we don't need to get their value from wallet Coin totalInput = req.tx.getInputSum(); // Calculate the amount of value we need to import. Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput); // Enforce the OP_RETURN limit if (req.tx.getOutputs().stream() .filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey())) .count() > 1) // Only 1 OP_RETURN per transaction allowed. throw new MultipleOpReturnRequested(); // Check for dusty sends if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet. if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust)) throw new DustySendRequested(); } // Filter out candidates that are already included in the transaction inputs List<TransactionOutput> candidates = prelimCandidates.stream() .filter(output -> alreadyIncluded(req.tx.getInputs(), output)) .collect(StreamUtils.toUnmodifiableList()); CoinSelection bestCoinSelection; TransactionOutput bestChangeOutput = null; List<Coin> updatedOutputValues = null; if (!req.emptyWallet) { // This can throw InsufficientMoneyException. FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates); bestCoinSelection = feeCalculation.bestCoinSelection; bestChangeOutput = feeCalculation.bestChangeOutput; updatedOutputValues = feeCalculation.updatedOutputValues; } else { // We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output // of the total value we can currently spend as determined by the selector, and then subtracting the fee. checkState(req.tx.getOutputs().size() == 1, () -> "empty wallet TX must have a single output only"); CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector; bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates); candidates = null; // Selector took ownership and might have changed candidates. Don't access again. req.tx.getOutput(0).setValue(bestCoinSelection.totalValue()); log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString()); } bestCoinSelection.outputs() .forEach(req.tx::addInput); if (req.emptyWallet) { if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee)) throw new CouldNotAdjustDownwards(); } if (updatedOutputValues != null) { for (int i = 0; i < updatedOutputValues.size(); i++) { req.tx.getOutput(i).setValue(updatedOutputValues.get(i)); } } if (bestChangeOutput != null) { req.tx.addOutput(bestChangeOutput); log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString()); } // Now shuffle the outputs to obfuscate which is the change. if (req.shuffleOutputs) req.tx.shuffleOutputs(); // Now sign the inputs, thus proving that we are entitled to redeem the connected outputs. if (req.signInputs) signTransaction(req); // Check size. final int size = req.tx.messageSize(); if (size > Transaction.MAX_STANDARD_TX_SIZE) throw new ExceededMaxTransactionSize(); // Label the transaction as being self created. We can use this later to spend its change output even before // the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much // point - the user isn't interested in a confidence transition they made themselves. getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF); // Label the transaction as being a user requested payment. This can be used to render GUI wallet // transaction lists more appropriately, especially when the wallet starts to generate transactions itself // for internal purposes. req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT); // Record the exchange rate that was valid when the transaction was completed. req.tx.setExchangeRate(req.exchangeRate); req.tx.setMemo(req.memo); req.completed = true; log.info(" completed: {}", req.tx); } finally { lock.unlock(); } }
@Test(expected = Wallet.DustySendRequested.class) public void sendDustAndOpReturnWithoutValueTest() throws Exception { // Tests sending dust and OP_RETURN without value, should throw DustySendRequested because sending sending dust is not allowed in any case. receiveATransactionAmount(wallet, myAddress, Coin.COIN); Transaction tx = new Transaction(); tx.addOutput(Coin.ZERO, ScriptBuilder.createOpReturnScript("hello world!".getBytes())); tx.addOutput(Coin.SATOSHI, OTHER_ADDRESS); SendRequest request = SendRequest.forTx(tx); request.ensureMinRequiredFee = true; wallet.completeTx(request); }
@Override public String toString() { return this.getClass().getSimpleName() + " " + type.getName() + " r:" + repetitionLevel + " d:" + definitionLevel + " " + Arrays.toString(fieldPath); }
@Test public void testSchema() { assertEquals(schemaString, schema.toString()); }
@NonNull @Override public FileObject getResolvedFileObject() { return resolvedFileObject; }
@Test public void testGetResolvedFileObject() { assertEquals( resolvedFileObject, fileObject.getResolvedFileObject() ); }
@Override public void onServerStart(Server server) { if (!done) { initCe(); this.done = true; } }
@Test public void clean_queue_then_start_scheduler_of_workers() { underTest.onServerStart(server); verify(processingScheduler).startScheduling(); verify(cleaningScheduler).startScheduling(); }
@Override public Schema getTargetSchema() { String registryUrl = getStringWithAltKeys(config, HoodieSchemaProviderConfig.SRC_SCHEMA_REGISTRY_URL); String targetRegistryUrl = getStringWithAltKeys(config, HoodieSchemaProviderConfig.TARGET_SCHEMA_REGISTRY_URL, registryUrl); try { return parseSchemaFromRegistry(targetRegistryUrl); } catch (Exception e) { throw new HoodieSchemaFetchException(String.format( "Error reading target schema from registry. Please check %s is configured correctly. If that is not configured then check %s. Truncated URL: %s", Config.SRC_SCHEMA_REGISTRY_URL_PROP, Config.TARGET_SCHEMA_REGISTRY_URL_PROP, StringUtils.truncate(targetRegistryUrl, 10, 10)), e); } }
@Test public void testGetTargetSchemaShouldRequestSchemaWithoutCreds() throws Exception { TypedProperties props = getProps(); props.put("hoodie.deltastreamer.schemaprovider.registry.url", "http://localhost/subjects/test/versions/latest"); SchemaRegistryProvider underTest = getUnderTest(props, -1, true); Schema actual = underTest.getTargetSchema(); assertNotNull(actual); assertEquals(getExpectedConvertedSchema(), actual); verify(mockRestService, never()).setHttpHeaders(any()); }