focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void decorate(final ShardingRule shardingRule, final ConfigurationProperties props, final SQLRewriteContext sqlRewriteContext, final RouteContext routeContext) { SQLStatementContext sqlStatementContext = sqlRewriteContext.getSqlStatementContext(); if (!isAlterOrDropIndexStatement(sqlStatementContext) && !isCursorAvailableStatement(sqlStatementContext) && !containsShardingTable(shardingRule, sqlStatementContext)) { return; } if (!sqlRewriteContext.getParameters().isEmpty()) { Collection<ParameterRewriter> parameterRewriters = new ShardingParameterRewriterBuilder(routeContext, sqlRewriteContext.getDatabase().getSchemas(), sqlStatementContext).getParameterRewriters(); rewriteParameters(sqlRewriteContext, parameterRewriters); } sqlRewriteContext.addSQLTokenGenerators(new ShardingTokenGenerateBuilder(shardingRule, routeContext, sqlStatementContext).getSQLTokenGenerators()); }
@Test void assertDecorate() { SQLRewriteContext sqlRewriteContext = mock(SQLRewriteContext.class); when(sqlRewriteContext.getDatabase()).thenReturn(mock(ShardingSphereDatabase.class)); when(sqlRewriteContext.getParameters()).thenReturn(Collections.singletonList(new Object())); when(sqlRewriteContext.getSqlStatementContext()).thenReturn(mock(SQLStatementContext.class, RETURNS_DEEP_STUBS)); new ShardingSQLRewriteContextDecorator().decorate(mock(ShardingRule.class), mock(ConfigurationProperties.class), sqlRewriteContext, mock(RouteContext.class)); assertTrue(sqlRewriteContext.getSqlTokens().isEmpty()); }
@Override public TopicAssignment place( PlacementSpec placement, ClusterDescriber cluster ) throws InvalidReplicationFactorException { RackList rackList = new RackList(random, cluster.usableBrokers()); throwInvalidReplicationFactorIfNonPositive(placement.numReplicas()); throwInvalidReplicationFactorIfZero(rackList.numUnfencedBrokers()); throwInvalidReplicationFactorIfTooFewBrokers(placement.numReplicas(), rackList.numTotalBrokers()); List<List<Integer>> placements = new ArrayList<>(placement.numPartitions()); for (int partition = 0; partition < placement.numPartitions(); partition++) { placements.add(rackList.place(placement.numReplicas())); } return new TopicAssignment( placements.stream().map(replicas -> new PartitionAssignment(replicas, cluster)).collect(Collectors.toList()) ); }
@Test public void testAvoidFencedReplicaIfPossibleOnSingleRack() { MockRandom random = new MockRandom(); RackList rackList = new RackList(random, Arrays.asList( new UsableBroker(3, Optional.empty(), false), new UsableBroker(1, Optional.empty(), true), new UsableBroker(0, Optional.empty(), false), new UsableBroker(4, Optional.empty(), false), new UsableBroker(2, Optional.empty(), false)).iterator()); assertEquals(5, rackList.numTotalBrokers()); assertEquals(4, rackList.numUnfencedBrokers()); assertEquals(Collections.singletonList(Optional.empty()), rackList.rackNames()); assertThrows(InvalidReplicationFactorException.class, () -> rackList.place(0)); assertThrows(InvalidReplicationFactorException.class, () -> rackList.place(-1)); assertEquals(Arrays.asList(3, 4, 0, 2), rackList.place(4)); assertEquals(Arrays.asList(4, 0, 2, 3), rackList.place(4)); assertEquals(Arrays.asList(0, 2, 3, 4), rackList.place(4)); assertEquals(Arrays.asList(2, 3, 4, 0), rackList.place(4)); assertEquals(Arrays.asList(0, 4, 3, 2), rackList.place(4)); }
public static List<String> getPythonUdfList(String udfFile) { return getPythonUdfList(SystemConfiguration.getInstances().getPythonHome(), udfFile); }
@Test @Ignore @Disabled("this is local test!") void pythonTest() throws ExecutionException { String pythonPath = "python"; String udfFile = "C:\\project\\companyProjects\\dinky-quickstart-python\\udtf.py"; List<String> pythonUdfList = UDFUtil.getPythonUdfList(pythonPath, udfFile); }
@Override public ResultSet getTables(Connection connection, String dbName) throws SQLException { return connection.getMetaData().getTables(connection.getCatalog(), dbName, null, new String[] {"TABLE", "VIEW", "MATERIALIZED VIEW", "FOREIGN TABLE"}); }
@Test public void testListTableNames() throws SQLException { new Expectations() { { dataSource.getConnection(); result = connection; minTimes = 0; connection.getCatalog(); result = "t1"; minTimes = 0; connection.getMetaData().getTables("t1", "test", null, new String[] {"TABLE", "VIEW", "MATERIALIZED VIEW", "FOREIGN TABLE"}); result = tableResult; minTimes = 0; } }; try { JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); List<String> result = jdbcMetadata.listTableNames("test"); List<String> expectResult = Lists.newArrayList("tbl1", "tbl2", "tbl3"); Assert.assertEquals(expectResult, result); } catch (Exception e) { Assert.fail(); } }
@Override public void uncaughtException(Thread thread, Throwable e) { try { logger.error("Uncaught exception in main thread. Exiting with status code 1.", e); System.err.println("Uncaught exception in main thread. Exiting with status code 1."); e.printStackTrace(); } catch (Throwable t) { PrintStream originalStdErr = DataflowWorkerLoggingInitializer.getOriginalStdErr(); if (originalStdErr != null) { originalStdErr.println("Uncaught exception in main thread. Exiting with status code 1."); e.printStackTrace(originalStdErr); originalStdErr.println( "UncaughtExceptionHandler caused another exception to be thrown, as follows:"); t.printStackTrace(originalStdErr); } } finally { runtime.halt(1); } }
@Test public void testUncaughtExceptionHandlerForciblyHaltsRuntime() { JvmRuntime runtime = mock(JvmRuntime.class); WorkerUncaughtExceptionHandler handler = new WorkerUncaughtExceptionHandler(runtime, LOG); try { handler.uncaughtException(Thread.currentThread(), new Exception("oh noes!")); } catch (Exception e) { // Ignore any exceptions being thrown and validate that the runtime is halted below. } verify(runtime).halt(1); }
@Override // NameNode public void stop() { stop(true); }
@Test public void testCanReadData() throws IOException { Path file1 = new Path("/fileToRead.dat"); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; FileSystem fileSys = null; BackupNode backup = null; try { // Start NameNode and BackupNode cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0).format(true).build(); fileSys = cluster.getFileSystem(); long txid = cluster.getNameNodeRpc().getTransactionID(); backup = startBackupNode(conf, StartupOption.BACKUP, 1); waitCheckpointDone(cluster, txid); // Setup dual NameNode configuration for DataNodes String rpcAddrKeyPreffix = DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + ".bnCluster"; String nnAddr = cluster.getNameNode().getNameNodeAddressHostPortString(); conf.get(DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); String bnAddr = backup.getNameNodeAddressHostPortString(); conf.set(DFSConfigKeys.DFS_NAMESERVICES, "bnCluster"); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, "bnCluster"); conf.set(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX + ".bnCluster", "nnActive, nnBackup"); conf.set(rpcAddrKeyPreffix + ".nnActive", nnAddr); conf.set(rpcAddrKeyPreffix + ".nnBackup", bnAddr); cluster.startDataNodes(conf, 3, true, StartupOption.REGULAR, null); DFSTestUtil.createFile( fileSys, file1, fileSize, fileSize, blockSize, (short)3, seed); // Read the same file from file systems pointing to NN and BN FileSystem bnFS = FileSystem.get( new Path("hdfs://" + bnAddr).toUri(), conf); String nnData = DFSTestUtil.readFile(fileSys, file1); String bnData = DFSTestUtil.readFile(bnFS, file1); assertEquals("Data read from BackupNode and NameNode is not the same.", nnData, bnData); } catch(IOException e) { LOG.error("Error in TestBackupNode: ", e); assertTrue(e.getLocalizedMessage(), false); } finally { if(fileSys != null) fileSys.close(); if(backup != null) backup.stop(); if(cluster != null) cluster.shutdown(); } }
public static ServiceInstances transferServersToServiceInstances(Flux<List<ServiceInstance>> servers, InstanceTransformer instanceTransformer) { List<Instance> instanceList = Collections.synchronizedList(new ArrayList<>()); servers.flatMap((Function<List<ServiceInstance>, Publisher<?>>) serviceInstances -> Flux.fromIterable(serviceInstances.stream() .map(instanceTransformer::transform) .collect(Collectors.toList()))).subscribe(instance -> instanceList.add((Instance) instance)); String serviceName = ""; Map<String, String> serviceMetadata = new HashMap<>(); if (!CollectionUtils.isEmpty(instanceList)) { serviceName = instanceList.get(0).getService(); serviceMetadata = instanceList.get(0).getServiceMetadata(); } ServiceKey serviceKey = new ServiceKey(MetadataContext.LOCAL_NAMESPACE, serviceName); return new DefaultServiceInstances(serviceKey, instanceList, serviceMetadata); }
@Test public void testTransferNotEmptyInstances() { try ( MockedStatic<ApplicationContextAwareUtils> mockedApplicationContextAwareUtils = mockStatic(ApplicationContextAwareUtils.class); MockedStatic<MetadataContextHolder> mockedMetadataContextHolder = mockStatic(MetadataContextHolder.class) ) { mockedApplicationContextAwareUtils.when(() -> ApplicationContextAwareUtils.getProperties(anyString())) .thenReturn(testNamespaceAndService); MetadataContext metadataContext = Mockito.mock(MetadataContext.class); mockedMetadataContextHolder.when(MetadataContextHolder::get).thenReturn(metadataContext); int instanceSize = 100; int weight = 50; List<ServiceInstance> instances = new ArrayList<>(); for (int i = 0; i < instanceSize; i++) { DefaultInstance instance = new DefaultInstance(); instance.setService(testNamespaceAndService); instance.setId("ins" + i); instance.setPort(8080); instance.setHost("127.0.0." + i); instance.setWeight(weight); instances.add(new PolarisServiceInstance(instance)); } ServiceInstances serviceInstances = RouterUtils.transferServersToServiceInstances(Flux.just(instances), new PolarisInstanceTransformer()); assertThat(serviceInstances.getInstances()).isNotNull(); assertThat(serviceInstances.getInstances().size()).isEqualTo(instanceSize); List<Instance> polarisInstances = serviceInstances.getInstances(); for (int i = 0; i < instanceSize; i++) { Instance instance = polarisInstances.get(i); assertThat(instance.getNamespace()).isEqualTo(testNamespaceAndService); assertThat(instance.getService()).isEqualTo(testNamespaceAndService); assertThat(instance.getId()).isEqualTo("ins" + i); assertThat(instance.getHost()).isEqualTo("127.0.0." + i); assertThat(instance.getPort()).isEqualTo(8080); assertThat(instance.getWeight()).isEqualTo(weight); } } }
protected Destination createDestination(String destName) throws JMSException { String simpleName = getSimpleName(destName); byte destinationType = getDestinationType(destName); if (destinationType == ActiveMQDestination.QUEUE_TYPE) { LOG.info("Creating queue: {}", destName); return getSession().createQueue(simpleName); } else if (destinationType == ActiveMQDestination.TOPIC_TYPE) { LOG.info("Creating topic: {}", destName); return getSession().createTopic(simpleName); } else { return createTemporaryDestination(destName); } }
@Test public void testCreateDestination_topic() throws JMSException { assertDestinationNameType("dest", TOPIC_TYPE, asAmqDest(jmsClient.createDestination("topic://dest"))); }
public BigMatrix tm(BigMatrix B) { if (m != B.m) { throw new IllegalArgumentException(String.format("Matrix multiplication A' * B: %d x %d vs %d x %d", m, n, B.m, B.n)); } BigMatrix C = new BigMatrix(n, B.n); C.mm(TRANSPOSE, this, NO_TRANSPOSE, B); return C; }
@Test public void testTm() { System.out.println("tm"); double[][] A = { {4.0, 1.2, 0.8}, {1.2, 9.0, 1.2}, {0.8, 1.2, 16.0} }; double[] B = {-4.0, 1.0, -3.0}; double[] C = {-1.0505, 0.2719, -0.1554}; BigMatrix a = BigMatrix.of(A).inverse(); BigMatrix b = BigMatrix.column(B); assertTrue(MathEx.equals((b.tm(a)).toArray()[0], C, 1E-4)); assertTrue(MathEx.equals((b.transpose().mm(a)).toArray()[0], C, 1E-4)); assertTrue(MathEx.equals((b.transpose(false).mm(a)).toArray()[0], C, 1E-4)); }
private VlanId() { super(UNTAGGED); }
@Test(expected = IllegalArgumentException.class) public void testIllicitVlanString() { VlanId.vlanId("5000"); }
@Override public ClassLoader getDefaultClassLoader() { return DEFAULT_CLASS_LOADER; }
@Test public void resources_found() { runWithClassloader(provider -> { try { var resources = provider.getDefaultClassLoader().getResources(""); assertThat(Collections.list(resources)).isNotEmpty(); } catch (IOException e) { throw new UncheckedIOException(e); } }); }
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers, final int maxRecords, final boolean checkCrcs) { // Creating an empty ShareInFlightBatch ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition); if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(cachedBatchException); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(cachedRecordException); cachedRecordException = null; return inFlightBatch; } if (isConsumed) return inFlightBatch; initializeNextAcquired(); try { int recordsInBatch = 0; while (recordsInBatch < maxRecords) { lastRecord = nextFetchedRecord(checkCrcs); if (lastRecord == null) { // Any remaining acquired records are gaps while (nextAcquired != null) { inFlightBatch.addGap(nextAcquired.offset); nextAcquired = nextAcquiredRecord(); } break; } while (nextAcquired != null) { if (lastRecord.offset() == nextAcquired.offset) { // It's acquired, so we parse it and add it to the batch Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord, nextAcquired.deliveryCount); inFlightBatch.addRecord(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); recordsInBatch++; nextAcquired = nextAcquiredRecord(); break; } else if (lastRecord.offset() < nextAcquired.offset) { // It's not acquired, so we skip it break; } else { // It's acquired, but there's no non-control record at this offset, so it's a gap inFlightBatch.addGap(nextAcquired.offset); } nextAcquired = nextAcquiredRecord(); } } } catch (SerializationException se) { nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(se); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); } } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(e); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); } } return inFlightBatch; }
@Test public void testNegativeFetchCount() { long firstMessageId = 0; int startingOffset = 0; int numRecords = 10; ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() .setRecords(newRecords(startingOffset, numRecords, firstMessageId)) .setAcquiredRecords(acquiredRecords(0L, 10)); try (final Deserializers<String, String> deserializers = newStringDeserializers()) { ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData); ShareInFlightBatch<String, String> batch = completedFetch.fetchRecords(deserializers, -10, true); List<ConsumerRecord<String, String>> records = batch.getInFlightRecords(); assertEquals(0, records.size()); Acknowledgements acknowledgements = batch.getAcknowledgements(); assertEquals(0, acknowledgements.size()); } }
public static FileRewriteCoordinator get() { return INSTANCE; }
@Test public void testCommitMultipleRewrites() throws NoSuchTableException, IOException { sql("CREATE TABLE %s (id INT, data STRING) USING iceberg", tableName); Dataset<Row> df = newDF(1000); // add first two files df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); Table table = validationCatalog.loadTable(tableIdent); String firstFileSetID = UUID.randomUUID().toString(); long firstFileSetSnapshotId = table.currentSnapshot().snapshotId(); ScanTaskSetManager taskSetManager = ScanTaskSetManager.get(); try (CloseableIterable<FileScanTask> tasks = table.newScan().planFiles()) { // stage first 2 files for compaction taskSetManager.stageTasks(table, firstFileSetID, Lists.newArrayList(tasks)); } // add two more files df.coalesce(1).writeTo(tableName).append(); df.coalesce(1).writeTo(tableName).append(); table.refresh(); String secondFileSetID = UUID.randomUUID().toString(); try (CloseableIterable<FileScanTask> tasks = table.newScan().appendsAfter(firstFileSetSnapshotId).planFiles()) { // stage 2 more files for compaction taskSetManager.stageTasks(table, secondFileSetID, Lists.newArrayList(tasks)); } ImmutableSet<String> fileSetIDs = ImmutableSet.of(firstFileSetID, secondFileSetID); for (String fileSetID : fileSetIDs) { // read and pack 2 files into 1 split Dataset<Row> scanDF = spark .read() .format("iceberg") .option(SparkReadOptions.SCAN_TASK_SET_ID, fileSetID) .option(SparkReadOptions.SPLIT_SIZE, Long.MAX_VALUE) .load(tableName); // write the combined data as one file scanDF .writeTo(tableName) .option(SparkWriteOptions.REWRITTEN_FILE_SCAN_TASK_SET_ID, fileSetID) .append(); } // commit both rewrites at the same time FileRewriteCoordinator rewriteCoordinator = FileRewriteCoordinator.get(); Set<DataFile> rewrittenFiles = fileSetIDs.stream() .flatMap(fileSetID -> taskSetManager.fetchTasks(table, fileSetID).stream()) .map(t -> t.asFileScanTask().file()) .collect(Collectors.toSet()); Set<DataFile> addedFiles = fileSetIDs.stream() .flatMap(fileSetID -> rewriteCoordinator.fetchNewFiles(table, fileSetID).stream()) .collect(Collectors.toSet()); table.newRewrite().rewriteFiles(rewrittenFiles, addedFiles).commit(); table.refresh(); Assert.assertEquals("Should produce 5 snapshots", 5, Iterables.size(table.snapshots())); Map<String, String> summary = table.currentSnapshot().summary(); Assert.assertEquals("Deleted files count must match", "4", summary.get("deleted-data-files")); Assert.assertEquals("Added files count must match", "2", summary.get("added-data-files")); Object rowCount = scalarSql("SELECT count(*) FROM %s", tableName); Assert.assertEquals("Row count must match", 4000L, rowCount); }
public void setConfiguration(InfinispanEmbeddedConfiguration configuration) { this.configuration = configuration; }
@Test public void checkAggregationFromOneRoute() throws Exception { InfinispanEmbeddedConfiguration configuration = new InfinispanEmbeddedConfiguration(); configuration.setCacheContainer(cacheContainer); InfinispanEmbeddedAggregationRepository repo = new InfinispanEmbeddedAggregationRepository(getCacheName()); repo.setConfiguration(configuration); context.addRoutes(new RouteBuilder() { @Override public void configure() { from("direct:start") .aggregate(header(CORRELATOR_HEADER)) .aggregationRepository(repo) .aggregationStrategy((oldExchange, newExchange) -> { if (oldExchange == null) { return newExchange; } else { Integer n = newExchange.getIn().getBody(Integer.class); Integer o = oldExchange.getIn().getBody(Integer.class); Integer v = (o == null ? 0 : o) + (n == null ? 0 : n); oldExchange.getIn().setBody(v, Integer.class); return oldExchange; } }) .completionSize(COMPLETION_SIZE) .to("mock:result"); } }); MockEndpoint mock = getMockEndpoint("mock:result"); mock.expectedMessageCount(2); mock.expectedBodiesReceived(1 + 3 + 4 + 5, 6 + 7 + 20 + 21); template.sendBodyAndHeader("direct:start", 1, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 3, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 4, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 5, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 6, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 7, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 20, CORRELATOR_HEADER, CORRELATOR_HEADER); template.sendBodyAndHeader("direct:start", 21, CORRELATOR_HEADER, CORRELATOR_HEADER); mock.assertIsSatisfied(); }
Map<String, Object> sourceProducerConfig(String role) { Map<String, Object> props = new HashMap<>(); props.putAll(originalsWithPrefix(SOURCE_CLUSTER_PREFIX)); props.keySet().retainAll(MirrorClientConfig.CLIENT_CONFIG_DEF.names()); props.putAll(originalsWithPrefix(PRODUCER_CLIENT_PREFIX)); props.putAll(originalsWithPrefix(SOURCE_PREFIX + PRODUCER_CLIENT_PREFIX)); addClientId(props, role); return props; }
@Test public void testSourceProducerConfigWithSourcePrefix() { String prefix = MirrorConnectorConfig.SOURCE_PREFIX + MirrorConnectorConfig.PRODUCER_CLIENT_PREFIX; Map<String, String> connectorProps = makeProps(prefix + "acks", "1"); MirrorConnectorConfig config = new TestMirrorConnectorConfig(connectorProps); Map<String, Object> connectorProducerProps = config.sourceProducerConfig("test"); Map<String, Object> expectedProducerProps = new HashMap<>(); expectedProducerProps.put("acks", "1"); expectedProducerProps.put("client.id", "source1->target2|ConnectorName|test"); assertEquals(expectedProducerProps, connectorProducerProps, prefix + " source producer config not matching"); }
public long getIntervalInMs() { return this.intervalInMs; }
@Test void testIntervalInMs() { Pane<?> pane = mock(Pane.class); when(pane.getIntervalInMs()).thenReturn(100L); assertEquals(100L, pane.getIntervalInMs()); }
@Parameter(hidden = true) @DeleteMapping(value = "/{id}") @ResponseStatus(HttpStatus.OK) @ApiException(DELETE_ACCESS_TOKEN_ERROR) public Result<Boolean> delAccessTokenById(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable(value = "id") int id) { accessTokenService.deleteAccessTokenById(loginUser, id); return Result.success(true); }
@Test public void testDelAccessTokenById() throws Exception { testCreateToken(); MvcResult mvcResult = mockMvc.perform(delete("/access-tokens/1") .header("sessionId", sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); }
@Override public void subscribe(Subscriber<? super T>[] subscribers) { if (!validate(subscribers)) { return; } @SuppressWarnings("unchecked") Subscriber<? super T>[] newSubscribers = new Subscriber[subscribers.length]; for (int i = 0; i < subscribers.length; i++) { AutoDisposingSubscriberImpl<? super T> subscriber = new AutoDisposingSubscriberImpl<>(scope, subscribers[i]); newSubscribers[i] = subscriber; } source.subscribe(newSubscribers); }
@Test public void autoDispose_withScopeProviderCompleted_shouldNotReportDoubleSubscriptions() { TestSubscriber<Object> firstSubscriber = new TestSubscriber<>(); TestSubscriber<Object> secondSubscriber = new TestSubscriber<>(); //noinspection unchecked Subscriber<Object>[] subscribers = new Subscriber[] {firstSubscriber, secondSubscriber}; PublishProcessor.create() .parallel(DEFAULT_PARALLELISM) .to(autoDisposable(ScopeProvider.UNBOUND)) .subscribe(subscribers); firstSubscriber.assertNoValues(); firstSubscriber.assertNoErrors(); secondSubscriber.assertNoValues(); secondSubscriber.assertNoErrors(); rule.assertNoErrors(); }
@VisibleForTesting static <T, CoderT extends Coder<T>, CandidateT> void verifyCompatible( CoderT coder, Type candidateType) throws IncompatibleCoderException { // Various representations of the coder's class @SuppressWarnings("unchecked") Class<CoderT> coderClass = (Class<CoderT>) coder.getClass(); TypeDescriptor<CoderT> coderDescriptor = TypeDescriptor.of(coderClass); // Various representations of the actual coded type @SuppressWarnings("unchecked") TypeDescriptor<T> codedDescriptor = CoderUtils.getCodedType(coderDescriptor); @SuppressWarnings("unchecked") Class<T> codedClass = (Class<T>) codedDescriptor.getRawType(); Type codedType = codedDescriptor.getType(); // Various representations of the candidate type @SuppressWarnings("unchecked") TypeDescriptor<CandidateT> candidateDescriptor = (TypeDescriptor<CandidateT>) TypeDescriptor.of(candidateType); @SuppressWarnings("unchecked") Class<CandidateT> candidateClass = (Class<CandidateT>) candidateDescriptor.getRawType(); // If coder has type Coder<T> where the actual value of T is lost // to erasure, then we cannot rule it out. if (candidateType instanceof TypeVariable) { return; } // If the raw types are not compatible, we can certainly rule out // coder compatibility if (!codedClass.isAssignableFrom(candidateClass)) { throw new IncompatibleCoderException( String.format( "Cannot encode elements of type %s with coder %s because the" + " coded type %s is not assignable from %s", candidateType, coder, codedClass, candidateType), coder, candidateType); } // we have established that this is a covariant upcast... though // coders are invariant, we are just checking one direction @SuppressWarnings("unchecked") TypeDescriptor<T> candidateOkDescriptor = (TypeDescriptor<T>) candidateDescriptor; // If the coded type is a parameterized type where any of the actual // type parameters are not compatible, then the whole thing is certainly not // compatible. if ((codedType instanceof ParameterizedType) && !isNullOrEmpty(coder.getCoderArguments())) { ParameterizedType parameterizedSupertype = (ParameterizedType) candidateOkDescriptor.getSupertype(codedClass).getType(); Type[] typeArguments = parameterizedSupertype.getActualTypeArguments(); List<? extends Coder<?>> typeArgumentCoders = coder.getCoderArguments(); if (typeArguments.length < typeArgumentCoders.size()) { throw new IncompatibleCoderException( String.format( "Cannot encode elements of type %s with coder %s:" + " the generic supertype %s has %s type parameters, which is less than the" + " number of coder arguments %s has (%s).", candidateOkDescriptor, coder, parameterizedSupertype, typeArguments.length, coder, typeArgumentCoders.size()), coder, candidateOkDescriptor.getType()); } for (int i = 0; i < typeArgumentCoders.size(); i++) { try { Coder<?> typeArgumentCoder = typeArgumentCoders.get(i); verifyCompatible( typeArgumentCoder, candidateDescriptor.resolveType(typeArguments[i]).getType()); } catch (IncompatibleCoderException exn) { throw new IncompatibleCoderException( String.format( "Cannot encode elements of type %s with coder %s" + " because some component coder is incompatible", candidateType, coder), coder, candidateType, exn); } } } }
@Test public void testIntVersusStringIncompatibility() throws Exception { thrown.expect(IncompatibleCoderException.class); thrown.expectMessage("not assignable"); CoderRegistry.verifyCompatible(BigEndianIntegerCoder.of(), String.class); }
@Override public Optional<IndexSetConfig> get(String id) { return get(new ObjectId(id)); }
@Test @MongoDBFixtures("MongoIndexSetServiceTest.json") public void getReturnsExistingIndexSetConfig() throws Exception { final Optional<IndexSetConfig> indexSetConfig = indexSetService.get(new ObjectId("57f3d721a43c2d59cb750001")); assertThat(indexSetConfig) .isPresent() .contains( IndexSetConfig.create( "57f3d721a43c2d59cb750001", "Test 1", "This is the index set configuration for Test 1", true, true, "test_1", 4, 1, MessageCountRotationStrategy.class.getCanonicalName(), MessageCountRotationStrategyConfig.create(1000), NoopRetentionStrategy.class.getCanonicalName(), NoopRetentionStrategyConfig.create(10), ZonedDateTime.of(2016, 10, 4, 17, 0, 0, 0, ZoneOffset.UTC), "standard", "test_1", null, 1, false ) ); }
public static <KLeftT, KRightT> KTableHolder<KLeftT> build( final KTableHolder<KLeftT> left, final KTableHolder<KRightT> right, final ForeignKeyTableTableJoin<KLeftT, KRightT> join, final RuntimeBuildContext buildContext ) { final LogicalSchema leftSchema = left.getSchema(); final LogicalSchema rightSchema = right.getSchema(); final ProcessingLogger logger = buildContext.getProcessingLogger( join.getProperties().getQueryContext() ); final ExpressionEvaluator expressionEvaluator; final CodeGenRunner codeGenRunner = new CodeGenRunner( leftSchema, buildContext.getKsqlConfig(), buildContext.getFunctionRegistry() ); final Optional<ColumnName> leftColumnName = join.getLeftJoinColumnName(); final Optional<Expression> leftJoinExpression = join.getLeftJoinExpression(); if (leftColumnName.isPresent()) { expressionEvaluator = codeGenRunner.buildCodeGenFromParseTree( new UnqualifiedColumnReferenceExp(leftColumnName.get()), "Left Join Expression" ); } else if (leftJoinExpression.isPresent()) { expressionEvaluator = codeGenRunner.buildCodeGenFromParseTree( leftJoinExpression.get(), "Left Join Expression" ); } else { throw new IllegalStateException("Both leftColumnName and leftJoinExpression are empty."); } final ForeignKeyJoinParams<KRightT> joinParams = ForeignKeyJoinParamsFactory .create(expressionEvaluator, leftSchema, rightSchema, logger); final Formats formats = join.getFormats(); final PhysicalSchema physicalSchema = PhysicalSchema.from( joinParams.getSchema(), formats.getKeyFeatures(), formats.getValueFeatures() ); final Serde<KLeftT> keySerde = left.getExecutionKeyFactory().buildKeySerde( formats.getKeyFormat(), physicalSchema, join.getProperties().getQueryContext() ); final Serde<GenericRow> valSerde = buildContext.buildValueSerde( formats.getValueFormat(), physicalSchema, join.getProperties().getQueryContext() ); final KTable<KLeftT, GenericRow> result; switch (join.getJoinType()) { case INNER: result = left.getTable().join( right.getTable(), joinParams.getKeyExtractor(), joinParams.getJoiner(), buildContext.getMaterializedFactory().create(keySerde, valSerde) ); break; case LEFT: result = left.getTable().leftJoin( right.getTable(), joinParams.getKeyExtractor(), joinParams.getJoiner(), buildContext.getMaterializedFactory().create(keySerde, valSerde) ); break; default: throw new IllegalStateException("invalid join type: " + join.getJoinType()); } return KTableHolder.unmaterialized( result, joinParams.getSchema(), left.getExecutionKeyFactory() ); }
@Test @SuppressWarnings({"unchecked", "rawtypes"}) public void shouldDoInnerJoinOnKey() { // Given: givenInnerJoin(left, L_KEY); // When: final KTableHolder<Struct> result = join.build(planBuilder, planInfo); // Then: final ArgumentCaptor<KsqlKeyExtractor> ksqlKeyExtractor = ArgumentCaptor.forClass(KsqlKeyExtractor.class); verify(leftKTable).join( same(rightKTable), ksqlKeyExtractor.capture(), eq(new KsqlValueJoiner(LEFT_SCHEMA.value().size(), RIGHT_SCHEMA.value().size(), 0)), any(Materialized.class) ); verifyNoMoreInteractions(leftKTable, rightKTable, resultKTable); final GenericKey extractedKey = GenericKey.genericKey(LEFT_KEY); assertThat(ksqlKeyExtractor.getValue().apply(LEFT_ROW), is(extractedKey)); assertThat(result.getTable(), is(resultKTable)); assertThat(result.getExecutionKeyFactory(), is(executionKeyFactory)); }
void truncateTable() throws IOException { if ( table == null ) { return; } Cursor tableRows = Cursor.createCursor( table ); while ( tableRows.moveToNextRow() ) { tableRows.deleteCurrentRow(); } }
@Test public void testTruncateTable() throws IOException { data.createDatabase( mdbFile ); data.createTable( "TruncatingThisTable", generateRowMeta() ); data.addRowsToTable( generateRowData( 10 ) ); assertEquals( 10, data.table.getRowCount() ); data.truncateTable(); assertEquals( 0, data.table.getRowCount() ); data.addRowToTable( generateRowData( 1 ).get( 0 ) ); assertEquals( 1, data.table.getRowCount() ); data.closeDatabase(); }
@Udf(description = "Returns the INT base raised to the INT exponent.") public Double power( @UdfParameter( value = "base", description = "the base of the power." ) final Integer base, @UdfParameter( value = "exponent", description = "the exponent of the power." ) final Integer exponent ) { return power( base == null ? null : base.doubleValue(), exponent == null ? null : exponent.doubleValue() ); }
@Test public void shouldHandleNegativeExponent() { assertThat(udf.power(15, -1), closeTo(0.06666666666666667, 0.000000000000001)); assertThat(udf.power(15L, -1L), closeTo(0.06666666666666667, 0.000000000000001)); assertThat(udf.power(15.0, -1.0), closeTo(0.06666666666666667, 0.000000000000001)); assertThat(udf.power(15, -2), closeTo(0.0044444444444444444, 0.000000000000001)); assertThat(udf.power(15L, -2L), closeTo(0.0044444444444444444, 0.000000000000001)); assertThat(udf.power(15.0, -2.0), closeTo(0.0044444444444444444, 0.000000000000001)); }
@Override public TransferStatus prepare(final Path file, final Local local, final TransferStatus parent, final ProgressListener progress) throws BackgroundException { final TransferStatus status = super.prepare(file, local, parent, progress); if(status.isExists()) { final String filename = file.getName(); int no = 0; do { String proposal = String.format("%s-%d", FilenameUtils.getBaseName(filename), ++no); if(StringUtils.isNotBlank(Path.getExtension(filename))) { proposal += String.format(".%s", Path.getExtension(filename)); } status.withRename(LocalFactory.get(local.getParent(), proposal)); } while(status.getRename().local.exists()); if(log.isInfoEnabled()) { log.info(String.format("Changed download target from %s to %s", local, status.getRename().local)); } if(log.isDebugEnabled()) { log.debug(String.format("Clear exist flag for file %s", local)); } status.setExists(false); } else { if(parent.getRename().local != null) { status.withRename(LocalFactory.get(parent.getRename().local, file.getName())); } if(log.isInfoEnabled()) { log.info(String.format("Changed download target from %s to %s", local, status.getRename().local)); } } return status; }
@Test public void testDirectoryDownload() throws Exception { RenameFilter f = new RenameFilter(new DisabledDownloadSymlinkResolver(), new NullTransferSession(new Host(new TestProtocol()))); final String name = new AsciiRandomStringService().random(); final NullLocal local = new NullLocal("/tmp", name) { @Override public boolean exists() { return name.equals(this.getName()); } @Override public boolean isFile() { return false; } @Override public boolean isDirectory() { return true; } }; final Path directory = new Path("t", EnumSet.of(Path.Type.directory)); final Path file = new Path(directory, name, EnumSet.of(Path.Type.file)); final TransferStatus directoryStatus = f.prepare(directory, local, new TransferStatus().exists(true), new DisabledProgressListener()); final TransferStatus fileStatus = f.prepare(file, new NullLocal(local, "f"), directoryStatus, new DisabledProgressListener()); assertNotNull(fileStatus.getRename().local); final String s = System.getProperty("file.separator"); assertEquals(String.format("%stmp%st-1%s%s", s, s, s, name), fileStatus.getRename().local.getAbsolute()); }
public void logCatalogResize(final long oldCatalogLength, final long newCatalogLength) { final int length = SIZE_OF_LONG * 2; final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(CATALOG_RESIZE.toEventCodeId(), encodedLength); if (index > 0) { try { encodeCatalogResize( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, oldCatalogLength, newCatalogLength); } finally { ringBuffer.commit(index); } } }
@Test void logCatalogResize() { final int offset = ALIGNMENT * 3; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); final int captureLength = SIZE_OF_LONG * 2; final long catalogLength = 42; final long newCatalogLength = 142; logger.logCatalogResize(catalogLength, newCatalogLength); verifyLogHeader(logBuffer, offset, CATALOG_RESIZE.toEventCodeId(), captureLength, captureLength); assertEquals(catalogLength, logBuffer.getLong(encodedMsgOffset(offset + LOG_HEADER_LENGTH), LITTLE_ENDIAN)); assertEquals(newCatalogLength, logBuffer.getLong(encodedMsgOffset(offset + LOG_HEADER_LENGTH + SIZE_OF_LONG), LITTLE_ENDIAN)); }
public Status getStatus() { return getStatus(migrationHistory.getLastMigrationNumber(), migrationSteps.getMaxMigrationNumber()); }
@Test public void getStatus_returns_REQUIRES_DOWNGRADE_when_max_migration_number_in_table_is_greater_than_max_migration_number_in_configuration() { mockMaxMigrationNumberInDb(200L); mockMaxMigrationNumberInConfig(150L); assertThat(underTest.getStatus()).isEqualTo(REQUIRES_DOWNGRADE); }
@Override public AnalysisPhase getAnalysisPhase() { return ANALYSIS_PHASE; }
@Test public void testGetAnalysisPhase() { FileNameAnalyzer instance = new FileNameAnalyzer(); AnalysisPhase expResult = AnalysisPhase.INFORMATION_COLLECTION; AnalysisPhase result = instance.getAnalysisPhase(); assertEquals(expResult, result); }
public T getRecordingProxy() { return _templateProxy; }
@Test(expectedExceptions = UnsupportedOperationException.class) public void testUnsupportedMethod() { makeOne().getRecordingProxy().schema(); }
public static List<QueryDetail> getQueryDetailsAfterTime(long eventTime) { List<QueryDetail> results = Lists.newArrayList(); for (QueryDetail queryDetail : TOTAL_QUERIES) { if (queryDetail.getEventTime() > eventTime) { results.add(queryDetail); } } return results; }
@Test public void testExecutor() throws Exception { boolean old = Config.enable_collect_query_detail_info; Config.enable_collect_query_detail_info = true; starRocksAssert.withDatabase("db1") .useDatabase("db1") .withTable("create table test_running_detail (c1 int, c2 int) " + "properties('replication_num'='1') "); String sql = "select * from test_running_detail"; QueryStatement parsedStmt = (QueryStatement) UtFrameUtils.parseStmtWithNewParser(sql, connectContext); StmtExecutor executor = new StmtExecutor(connectContext, parsedStmt); long startTime = System.currentTimeMillis(); executor.addRunningQueryDetail(parsedStmt); executor.execute(); executor.addFinishedQueryDetail(); List<QueryDetail> queryDetails = QueryDetailQueue.getQueryDetailsAfterTime(startTime); QueryDetail runningDetail = queryDetails.get(0); Assert.assertEquals(QueryDetail.QueryMemState.RUNNING, runningDetail.getState()); Assert.assertEquals(sql, runningDetail.getSql()); QueryDetail finishedDetail = queryDetails.get(1); Assert.assertEquals(QueryDetail.QueryMemState.FINISHED, finishedDetail.getState()); Assert.assertEquals(sql, finishedDetail.getSql()); Config.enable_collect_query_detail_info = old; }
public static Map<String, Object> getFlattenedMap(Map<String, Object> source) { Map<String, Object> result = new LinkedHashMap<>(); buildFlattenedMap(result, source, null); return result; }
@Test public void testGetFlattenedMap() { Map<String, Object> source = new HashMap<>(); source.put("map", Collections.singletonMap("key", "abc")); source.put("list", Collections.singletonList(123)); Map<String, Object> result = new LinkedHashMap<>(); result.put("list[0]", 123); result.put("map.key", "abc"); Map<String, Object> map = MapUtil.getFlattenedMap(source); Assertions.assertEquals(result, map); }
@Override public int length() { return 2; }
@Test public void testLength() { System.out.println("length"); NegativeBinomialDistribution instance = new NegativeBinomialDistribution(3, 0.3); instance.rand(); assertEquals(2, instance.length()); }
static Optional<Long> determineReplayIdFor(final SalesforceEndpoint endpoint, final String topicName) { final String channelName = getChannelName(topicName); final Long replayId = endpoint.getReplayId(); final SalesforceComponent component = endpoint.getComponent(); final SalesforceEndpointConfig endpointConfiguration = endpoint.getConfiguration(); final Map<String, Long> endpointInitialReplayIdMap = endpointConfiguration.getInitialReplayIdMap(); final Long endpointReplayId = endpointInitialReplayIdMap.getOrDefault(topicName, endpointInitialReplayIdMap.get(channelName)); final Long endpointDefaultReplayId = endpointConfiguration.getDefaultReplayId(); final SalesforceEndpointConfig componentConfiguration = component.getConfig(); final Map<String, Long> componentInitialReplayIdMap = componentConfiguration.getInitialReplayIdMap(); final Long componentReplayId = componentInitialReplayIdMap.getOrDefault(topicName, componentInitialReplayIdMap.get(channelName)); final Long componentDefaultReplayId = componentConfiguration.getDefaultReplayId(); // the endpoint values have priority over component values, and the // default values priority // over give topic values return Stream.of(replayId, endpointReplayId, componentReplayId, endpointDefaultReplayId, componentDefaultReplayId) .filter(Objects::nonNull).findFirst(); }
@Test public void shouldSupportInitialConfigMapWithTwoKeySyntaxes() throws Exception { final Map<String, Long> initialReplayIdMap = new HashMap<>(); initialReplayIdMap.put("my-topic-1", 10L); initialReplayIdMap.put("/topic/my-topic-1", 20L); initialReplayIdMap.put("/topic/my-topic-2", 30L); final SalesforceEndpointConfig config = new SalesforceEndpointConfig(); config.setDefaultReplayId(14L); config.setInitialReplayIdMap(initialReplayIdMap); final SalesforceComponent component = mock(SalesforceComponent.class); final SalesforceEndpoint endpoint = mock(SalesforceEndpoint.class); when(endpoint.getReplayId()).thenReturn(null); when(endpoint.getComponent()).thenReturn(component); when(endpoint.getConfiguration()).thenReturn(config); when(component.getConfig()).thenReturn(new SalesforceEndpointConfig()); assertEquals(Optional.of(10L), determineReplayIdFor(endpoint, "my-topic-1"), "Expecting replayId for `my-topic-1` to be 10, as short topic names have priority"); assertEquals(Optional.of(30L), determineReplayIdFor(endpoint, "my-topic-2"), "Expecting replayId for `my-topic-2` to be 30, the only one given"); assertEquals(Optional.of(14L), determineReplayIdFor(endpoint, "my-topic-3"), "Expecting replayId for `my-topic-3` to be 14, the default"); }
@Override public UpsertTarget create(ExpressionEvalContext evalContext) { return new PrimitiveUpsertTarget(); }
@Test public void test_create() { PrimitiveUpsertTargetDescriptor descriptor = PrimitiveUpsertTargetDescriptor.INSTANCE; // when UpsertTarget target = descriptor.create(mock()); // then assertThat(target).isInstanceOf(PrimitiveUpsertTarget.class); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { try { if (statement.getStatement() instanceof CreateAsSelect) { registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement); } else if (statement.getStatement() instanceof CreateSource) { registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } // Remove schema id from SessionConfig return stripSchemaIdConfig(statement); }
@Test public void shouldNotRegisterSchemaForSchemaRegistryDisabledFormatCreateAsSelect() { // Given: config = new KsqlConfig(ImmutableMap.of()); givenStatement("CREATE STREAM sink WITH(value_format='DELIMITED') AS SELECT * FROM SOURCE;"); // When: final KsqlSchemaRegistryNotConfiguredException e = assertThrows(KsqlSchemaRegistryNotConfiguredException.class, () -> injector.inject(statement)); // Then: assertThat(e.getMessage(), containsString("Cannot create topic 'SINK' with format AVRO without configuring")); }
public List<DataRecord> merge(final List<DataRecord> dataRecords) { Map<DataRecord.Key, DataRecord> result = new HashMap<>(); dataRecords.forEach(each -> { if (PipelineSQLOperationType.INSERT == each.getType()) { mergeInsert(each, result); } else if (PipelineSQLOperationType.UPDATE == each.getType()) { mergeUpdate(each, result); } else if (PipelineSQLOperationType.DELETE == each.getType()) { mergeDelete(each, result); } }); return new ArrayList<>(result.values()); }
@Test void assertUpdateBeforeUpdate() { DataRecord beforeDataRecord = mockUpdateDataRecord(1, 1, 10, 100); DataRecord afterDataRecord = mockUpdateDataRecord(1, 1, 10, 200); Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord)); assertThat(actual.size(), is(1)); DataRecord dataRecord = actual.iterator().next(); assertThat(dataRecord.getType(), is(PipelineSQLOperationType.UPDATE)); assertThat(dataRecord.getTableName(), is("order")); assertThat(dataRecord.getActualTableName(), is("order_0")); assertThat(dataRecord.getCommitTime(), is(456L)); assertColumnsMatched(dataRecord.getColumn(0), new Column("id", 1, 1, false, true)); assertColumnsMatched(dataRecord.getColumn(1), new Column("user_id", 10, 10, false, false)); assertColumnsMatched(dataRecord.getColumn(2), new Column("total_price", 50, 200, true, false)); }
@Override public double getValue(double quantile) { if (quantile < 0.0 || quantile > 1.0 || Double.isNaN(quantile)) { throw new IllegalArgumentException(quantile + " is not in [0..1]"); } if (values.length == 0) { return 0.0; } int posx = Arrays.binarySearch(quantiles, quantile); if (posx < 0) posx = ((-posx) - 1) - 1; if (posx < 1) { return values[0]; } if (posx >= values.length) { return values[values.length - 1]; } return values[posx]; }
@Test public void bigQuantilesAreTheLastValue() { assertThat(snapshot.getValue(1.0)) .isEqualTo(5.0, offset(0.1)); }
@Override public <S, C extends Config<S>> void removeConfig(S subject, Class<C> configClass) { checkPermission(CONFIG_WRITE); checkNotNull(subject, NULL_SUBJECT_MSG); checkNotNull(configClass, NULL_CCLASS_MSG); store.clearConfig(subject, configClass); }
@Test public void testRemoveConfig() { assertThat(configService.getSubjectFactory(String.class), nullValue()); assertThat(configService.getSubjectFactory("key"), nullValue()); registry.registerConfigFactory(config1Factory); registry.registerConfigFactory(config2Factory); configService.applyConfig("configKey", BasicConfig1.class, new ObjectMapper().createObjectNode()); configService.applyConfig("key1", "key", "config1", new ObjectMapper().createObjectNode()); configService.applyConfig("key1", "keyxx", "config3", new ObjectMapper().createObjectNode()); configService.applyConfig("key2", "key1", "config4", new ObjectMapper().createObjectNode()); configService.removeConfig(); Set<String> subjects = configService.getSubjects(factory1.subjectClass()); assertThat(subjects.size(), is(0)); Set<String> subjects2 = configService.getSubjects(factory2.subjectClass()); assertThat(subjects2.size(), is(0)); configService.applyConfig("key1", "key", "config1", new ObjectMapper().createObjectNode()); configService.applyConfig("key1", "keyxx", "config3", new ObjectMapper().createObjectNode()); configService.applyConfig("key1", "key1", "config4", new ObjectMapper().createObjectNode()); @SuppressWarnings("unchecked") Set<String> configs = configService.getSubjects( configService.getSubjectFactory("key1").subjectClass()); configs.forEach(c -> configService.removeConfig(c)); Set<String> newConfig1 = configService.getSubjects(factory1.subjectClass()); assertThat(newConfig1, notNullValue()); }
public void setCvssBelow(List<Double> cvssBelow) { this.cvssBelow = cvssBelow; }
@Test @SuppressWarnings("squid:S2699") public void testSetCvssBelow() { //already tested, this is just left so the IDE doesn't recreate it. }
@Override public HttpResponseOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { return this.write(file, this.toHeaders(file, status, expect), status); } catch(ConflictException e) { if(expect) { if(null != status.getLockId()) { // Handle 412 Precondition Failed with expired token log.warn(String.format("Retry failure %s with lock id %s removed", e, status.getLockId())); return this.write(file, this.toHeaders(file, status.withLockId(null), expect), status); } } throw e; } catch(InteroperabilityException e) { if(expect) { // Handle 417 Expectation Failed log.warn(String.format("Retry failure %s with Expect: Continue removed", e)); return this.write(file, this.toHeaders(file, status.withLockId(null), false), status); } throw e; } }
@Test public void testWriteContentRange() throws Exception { final DAVWriteFeature feature = new DAVWriteFeature(session); final Path test = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final byte[] content = RandomUtils.nextBytes(64000); { final TransferStatus status = new TransferStatus(); status.setOffset(0L); status.setLength(1024L); final HttpResponseOutputStream<Void> out = feature.write(test, status, new DisabledConnectionCallback()); // Write first 1024 new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out); out.close(); } assertTrue(new DAVFindFeature(session).find(test)); assertEquals(1024L, new DefaultAttributesFinderFeature(session).find(test).getSize()); { // Remaining chunked transfer with offset final TransferStatus status = new TransferStatus(); status.setLength(content.length - 1024L); status.setOffset(1024L); status.setAppend(true); final HttpResponseOutputStream<Void> out = feature.write(test, status, new DisabledConnectionCallback()); new StreamCopier(status, status).withOffset(status.getOffset()).withLimit(status.getLength()).transfer(new ByteArrayInputStream(content), out); out.close(); } final ByteArrayOutputStream out = new ByteArrayOutputStream(content.length); IOUtils.copy(new DAVReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()), out); assertArrayEquals(content, out.toByteArray()); new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static String formatExpression(final Expression expression) { return formatExpression(expression, FormatOptions.of(s -> false)); }
@Test public void shouldFormatCast() { // Given: final Cast cast = new Cast( new LongLiteral(1), new Type(SqlTypes.DOUBLE)); // When: final String result = ExpressionFormatter.formatExpression(cast); // Then: assertThat(result, equalTo("CAST(1 AS DOUBLE)")); }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testDuplicateMemoryDefinitionAbsolute() throws Exception { expectInvalidResource("memory"); parseResourceConfigValue("2048 1024 mb, 2 vcores"); }
public static SchemaKStream<?> buildSource( final PlanBuildContext buildContext, final DataSource dataSource, final QueryContext.Stacker contextStacker ) { final boolean windowed = dataSource.getKsqlTopic().getKeyFormat().isWindowed(); switch (dataSource.getDataSourceType()) { case KSTREAM: return windowed ? buildWindowedStream( buildContext, dataSource, contextStacker ) : buildStream( buildContext, dataSource, contextStacker ); case KTABLE: return windowed ? buildWindowedTable( buildContext, dataSource, contextStacker ) : buildTable( buildContext, dataSource, contextStacker ); default: throw new UnsupportedOperationException("Source type:" + dataSource.getDataSourceType()); } }
@Test public void shouldReplaceNonWindowedStreamSourceWithMatchingPseudoColumnVersion() { // Given: givenNonWindowedStream(); givenExistingQueryWithOldPseudoColumnVersion(streamSource); // When: final SchemaKStream<?> result = SchemaKSourceFactory.buildSource( buildContext, dataSource, contextStacker ); // Then: assertThat(((StreamSource) result.getSourceStep()).getPseudoColumnVersion(), equalTo(LEGACY_PSEUDOCOLUMN_VERSION_NUMBER)); assertValidSchema(result); }
static public int facilityStringToint(String facilityStr) { if ("KERN".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_KERN; } else if ("USER".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_USER; } else if ("MAIL".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_MAIL; } else if ("DAEMON".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_DAEMON; } else if ("AUTH".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_AUTH; } else if ("SYSLOG".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_SYSLOG; } else if ("LPR".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LPR; } else if ("NEWS".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_NEWS; } else if ("UUCP".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_UUCP; } else if ("CRON".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_CRON; } else if ("AUTHPRIV".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_AUTHPRIV; } else if ("FTP".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_FTP; } else if ("NTP".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_NTP; } else if ("AUDIT".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_AUDIT; } else if ("ALERT".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_ALERT; } else if ("CLOCK".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_CLOCK; } else if ("LOCAL0".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL0; } else if ("LOCAL1".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL1; } else if ("LOCAL2".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL2; } else if ("LOCAL3".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL3; } else if ("LOCAL4".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL4; } else if ("LOCAL5".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL5; } else if ("LOCAL6".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL6; } else if ("LOCAL7".equalsIgnoreCase(facilityStr)) { return SyslogConstants.LOG_LOCAL7; } else { throw new IllegalArgumentException(facilityStr + " is not a valid syslog facility string"); } }
@Test public void testFacilityStringToint() throws InterruptedException { assertEquals(SyslogConstants.LOG_KERN, SyslogAppenderBase.facilityStringToint("KERN")); assertEquals(SyslogConstants.LOG_USER, SyslogAppenderBase.facilityStringToint("USER")); assertEquals(SyslogConstants.LOG_MAIL, SyslogAppenderBase.facilityStringToint("MAIL")); assertEquals(SyslogConstants.LOG_DAEMON, SyslogAppenderBase.facilityStringToint("DAEMON")); assertEquals(SyslogConstants.LOG_AUTH, SyslogAppenderBase.facilityStringToint("AUTH")); assertEquals(SyslogConstants.LOG_SYSLOG, SyslogAppenderBase.facilityStringToint("SYSLOG")); assertEquals(SyslogConstants.LOG_LPR, SyslogAppenderBase.facilityStringToint("LPR")); assertEquals(SyslogConstants.LOG_NEWS, SyslogAppenderBase.facilityStringToint("NEWS")); assertEquals(SyslogConstants.LOG_UUCP, SyslogAppenderBase.facilityStringToint("UUCP")); assertEquals(SyslogConstants.LOG_CRON, SyslogAppenderBase.facilityStringToint("CRON")); assertEquals(SyslogConstants.LOG_AUTHPRIV, SyslogAppenderBase.facilityStringToint("AUTHPRIV")); assertEquals(SyslogConstants.LOG_FTP, SyslogAppenderBase.facilityStringToint("FTP")); assertEquals(SyslogConstants.LOG_NTP, SyslogAppenderBase.facilityStringToint("NTP")); assertEquals(SyslogConstants.LOG_AUDIT, SyslogAppenderBase.facilityStringToint("AUDIT")); assertEquals(SyslogConstants.LOG_ALERT, SyslogAppenderBase.facilityStringToint("ALERT")); assertEquals(SyslogConstants.LOG_CLOCK, SyslogAppenderBase.facilityStringToint("CLOCK")); assertEquals(SyslogConstants.LOG_LOCAL0, SyslogAppenderBase.facilityStringToint("LOCAL0")); assertEquals(SyslogConstants.LOG_LOCAL1, SyslogAppenderBase.facilityStringToint("LOCAL1")); assertEquals(SyslogConstants.LOG_LOCAL2, SyslogAppenderBase.facilityStringToint("LOCAL2")); assertEquals(SyslogConstants.LOG_LOCAL3, SyslogAppenderBase.facilityStringToint("LOCAL3")); assertEquals(SyslogConstants.LOG_LOCAL4, SyslogAppenderBase.facilityStringToint("LOCAL4")); assertEquals(SyslogConstants.LOG_LOCAL5, SyslogAppenderBase.facilityStringToint("LOCAL5")); assertEquals(SyslogConstants.LOG_LOCAL6, SyslogAppenderBase.facilityStringToint("LOCAL6")); assertEquals(SyslogConstants.LOG_LOCAL7, SyslogAppenderBase.facilityStringToint("LOCAL7")); }
public static SlidingWindows ofTimeDifferenceAndGrace(final Duration timeDifference, final Duration afterWindowEnd) throws IllegalArgumentException { final String timeDifferenceMsgPrefix = prepareMillisCheckFailMsgPrefix(timeDifference, "timeDifference"); final long timeDifferenceMs = validateMillisecondDuration(timeDifference, timeDifferenceMsgPrefix); final String afterWindowEndMsgPrefix = prepareMillisCheckFailMsgPrefix(afterWindowEnd, "afterWindowEnd"); final long afterWindowEndMs = validateMillisecondDuration(afterWindowEnd, afterWindowEndMsgPrefix); return new SlidingWindows(timeDifferenceMs, afterWindowEndMs); }
@Test public void gracePeriodMustNotBeNegative() { assertThrows(IllegalArgumentException.class, () -> SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(10), ofMillis(-1))); }
@Override public UV get(UK key) throws Exception { return delegatedState.get(key); }
@Test public void testGetNotRecorded() throws Exception { testRecorded( singletonMap("x", "y"), state -> state.get("x"), logger -> assertFalse(logger.anythingChanged())); }
@Override protected CompletableFuture<JobExecutionResultResponseBody> handleRequest( @Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException { final JobID jobId = request.getPathParameter(JobIDPathParameter.class); final CompletableFuture<JobStatus> jobStatusFuture = gateway.requestJobStatus(jobId, timeout); return jobStatusFuture .thenCompose( jobStatus -> { if (jobStatus.isGloballyTerminalState()) { return gateway.requestJobResult(jobId, timeout) .thenApply(JobExecutionResultResponseBody::created); } else { return CompletableFuture.completedFuture( JobExecutionResultResponseBody.inProgress()); } }) .exceptionally( throwable -> { throw propagateException(throwable); }); }
@Test void testCompletedResult() throws Exception { final JobStatus jobStatus = JobStatus.FINISHED; final ArchivedExecutionGraph executionGraph = new ArchivedExecutionGraphBuilder() .setJobID(TEST_JOB_ID) .setState(jobStatus) .build(); final TestingRestfulGateway testingRestfulGateway = new TestingRestfulGateway.Builder() .setRequestJobStatusFunction( jobId -> { assertThat(jobId).isEqualTo(TEST_JOB_ID); return CompletableFuture.completedFuture(jobStatus); }) .setRequestJobResultFunction( jobId -> { assertThat(jobId).isEqualTo(TEST_JOB_ID); return CompletableFuture.completedFuture( JobResult.createFrom(executionGraph)); }) .build(); final JobExecutionResultResponseBody responseBody = jobExecutionResultHandler.handleRequest(testRequest, testingRestfulGateway).get(); assertThat(responseBody.getStatus().getId()).isEqualTo(QueueStatus.Id.COMPLETED); assertThat(responseBody.getJobExecutionResult()).isNotNull(); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { try { if (statement.getStatement() instanceof CreateAsSelect) { registerForCreateAs((ConfiguredStatement<? extends CreateAsSelect>) statement); } else if (statement.getStatement() instanceof CreateSource) { registerForCreateSource((ConfiguredStatement<? extends CreateSource>) statement); } } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { throw new KsqlStatementException( ErrorMessageUtil.buildErrorMessage(e), statement.getMaskedStatementText(), e.getCause()); } // Remove schema id from SessionConfig return stripSchemaIdConfig(statement); }
@Test public void shouldRegisterDependenciesForProtobuf() throws Exception { // Given: givenStatement("CREATE STREAM source (f1 TIMESTAMP) " + "WITH (" + " kafka_topic='expectedName', " + " key_format='KAFKA', " + " value_format='PROTOBUF', " + " partitions=1 " + ");"); // When: injector.inject(statement); // Then: verify(schemaRegistryClient).register("expectedName-value", PROTOBUF_SCHEMA); }
@Udf(schema = "ARRAY<STRUCT<K STRING, V STRING>>") public List<Struct> entriesString( @UdfParameter(description = "The map to create entries from") final Map<String, String> map, @UdfParameter(description = "If true then the resulting entries are sorted by key") final boolean sorted ) { return entries(map, STRING_STRUCT_SCHEMA, sorted); }
@Test public void shouldReturnNullListForNullMapString() { assertNull(entriesUdf.entriesString(null, false)); }
public boolean eval(StructLike data) { return new EvalVisitor().eval(data); }
@Test public void testNotEqual() { assertThat(notEqual("x", 5).literals().size()).isEqualTo(1); Evaluator evaluator = new Evaluator(STRUCT, notEqual("x", 7)); assertThat(evaluator.eval(TestHelpers.Row.of(7, 8, null))).as("7 != 7 => false").isFalse(); assertThat(evaluator.eval(TestHelpers.Row.of(6, 8, null))).as("6 != 7 => true").isTrue(); Evaluator structEvaluator = new Evaluator(STRUCT, notEqual("s1.s2.s3.s4.i", 7)); assertThat( structEvaluator.eval( TestHelpers.Row.of( 7, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(7))))))) .as("7 != 7 => false") .isFalse(); assertThat( structEvaluator.eval( TestHelpers.Row.of( 6, 8, null, TestHelpers.Row.of( TestHelpers.Row.of(TestHelpers.Row.of(TestHelpers.Row.of(6))))))) .as("6 != 7 => true") .isTrue(); }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void redberriesContractCadavaDead() { // Get the bush patch final FarmingPatch patch = farmingGuildPatches.get(Varbits.FARMING_4772); assertNotNull(patch); when(farmingTracker.predictPatch(patch)) .thenReturn(new PatchPrediction(Produce.CADAVABERRIES, CropState.DEAD, 0, 2, 3)); farmingContractManager.setContract(Produce.REDBERRIES); assertEquals(SummaryState.OCCUPIED, farmingContractManager.getSummary()); }
public static List<ArtifactInformation> getArtifacts(List<String> stagingFiles) { ImmutableList.Builder<ArtifactInformation> artifactsBuilder = ImmutableList.builder(); Set<String> deduplicatedStagingFiles = new LinkedHashSet<>(stagingFiles); for (String path : deduplicatedStagingFiles) { File file; String stagedName = null; if (path.contains("=")) { String[] components = path.split("=", 2); file = new File(components[1]); stagedName = components[0]; } else { file = new File(path); } // Spurious items get added to the classpath, but ignoring silently can cause confusion. // Therefore, issue logs if a file does not exist before ignoring. The level will be warning // if they have a staged name, as those are likely to cause problems or unintended behavior // (e.g., dataflow-worker.jar, windmill_main). if (!file.exists()) { if (stagedName != null) { LOG.warn( "Stage Artifact '{}' with the name '{}' was not found, staging will be ignored.", file, stagedName); } else { LOG.info("Stage Artifact '{}' was not found, staging will be ignored.", file); } continue; } ArtifactInformation.Builder artifactBuilder = ArtifactInformation.newBuilder(); artifactBuilder.setTypeUrn(BeamUrns.getUrn(StandardArtifacts.Types.FILE)); artifactBuilder.setRoleUrn(BeamUrns.getUrn(StandardArtifacts.Roles.STAGING_TO)); HashCode hashCode; if (file.isDirectory()) { File zippedFile; try { zippedFile = zipDirectory(file); hashCode = Files.asByteSource(zippedFile).hash(Hashing.sha256()); } catch (IOException e) { throw new RuntimeException(e); } artifactBuilder.setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(zippedFile.getPath()) .setSha256(hashCode.toString()) .build() .toByteString()); } else { try { hashCode = Files.asByteSource(file).hash(Hashing.sha256()); } catch (IOException e) { throw new RuntimeException(e); } artifactBuilder.setTypePayload( RunnerApi.ArtifactFilePayload.newBuilder() .setPath(file.getPath()) .setSha256(hashCode.toString()) .build() .toByteString()); } if (stagedName == null) { stagedName = createStagingFileName(file, hashCode); } artifactBuilder.setRolePayload( RunnerApi.ArtifactStagingToRolePayload.newBuilder() .setStagedName(stagedName) .build() .toByteString()); artifactsBuilder.add(artifactBuilder.build()); } return artifactsBuilder.build(); }
@Test public void testGetArtifactsBadNamedFileLogsWarn() throws Exception { File file1 = File.createTempFile("file1-", ".txt"); file1.deleteOnExit(); List<ArtifactInformation> artifacts = Environments.getArtifacts( ImmutableList.of(file1.getAbsolutePath(), "file_name=spurious_file")); assertThat(artifacts, hasSize(1)); expectedLogs.verifyWarn("name 'file_name' was not found"); }
public MessageType convert(Schema avroSchema) { if (!avroSchema.getType().equals(Schema.Type.RECORD)) { throw new IllegalArgumentException("Avro schema must be a record."); } return new MessageType(avroSchema.getFullName(), convertFields(avroSchema.getFields(), "")); }
@Test public void testAvroFixed12AsParquetInt96Type() throws Exception { Schema schema = new Schema.Parser() .parse(Resources.getResource("fixedToInt96.avsc").openStream()); Configuration conf = new Configuration(); conf.setStrings( WRITE_FIXED_AS_INT96, "int96", "mynestedrecord.int96inrecord", "mynestedrecord.myarrayofoptional", "mynestedrecord.mymap"); testAvroToParquetConversion( conf, schema, "message org.apache.parquet.avro.fixedToInt96 {\n" + " required int96 int96;\n" + " required fixed_len_byte_array(12) notanint96;\n" + " required group mynestedrecord {\n" + " required int96 int96inrecord;\n" + " required group myarrayofoptional (LIST) {\n" + " repeated int96 array;\n" + " }\n" + " required group mymap (MAP) {\n" + " repeated group key_value (MAP_KEY_VALUE) {\n" + " required binary key (STRING);\n" + " required int96 value;\n" + " }\n" + " }\n" + " }\n" + " required fixed_len_byte_array(1) onebytefixed;\n" + "}"); conf.setStrings(WRITE_FIXED_AS_INT96, "onebytefixed"); assertThrows( "Exception should be thrown for fixed types to be converted to INT96 where the size is not 12 bytes", IllegalArgumentException.class, () -> new AvroSchemaConverter(conf).convert(schema)); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (!StringUtils.containsIgnoreCase(request.getContentType(), MediaType.MULTIPART_FORM_DATA) && !StringUtils.containsIgnoreCase(request.getContentType(), MediaType.APPLICATION_OCTET_STREAM)) { interceptor.onFilter(request, response, chain); } else { chain.doFilter(request, response); } }
@Test public void testChainDoFilter() throws ServletException, IOException { PulsarService mockPulsarService = Mockito.mock(PulsarService.class); BrokerInterceptor spyInterceptor = Mockito.mock(BrokerInterceptor.class); HttpServletResponse mockHttpServletResponse = Mockito.mock(HttpServletResponse.class); ServiceConfiguration config = new ServiceConfiguration(); FilterChain spyFilterChain = Mockito.spy(FilterChain.class); Mockito.doReturn(spyInterceptor).when(mockPulsarService).getBrokerInterceptor(); Mockito.doReturn(config).when(mockPulsarService).getConfig(); // request has MULTIPART_FORM_DATA content-type config.setBrokerInterceptors(Sets.newHashSet("Interceptor1","Interceptor2")); HttpServletRequest mockHttpServletRequest2 = Mockito.mock(HttpServletRequest.class); Mockito.doReturn(MediaType.MULTIPART_FORM_DATA).when(mockHttpServletRequest2).getContentType(); ProcessHandlerFilter processHandlerFilter2 = new ProcessHandlerFilter(mockPulsarService.getBrokerInterceptor()); processHandlerFilter2.doFilter(mockHttpServletRequest2, mockHttpServletResponse, spyFilterChain); Mockito.verify(spyFilterChain).doFilter(mockHttpServletRequest2, mockHttpServletResponse); Mockito.clearInvocations(spyFilterChain); // request has APPLICATION_OCTET_STREAM content-type Mockito.doReturn(MediaType.APPLICATION_OCTET_STREAM).when(mockHttpServletRequest2).getContentType(); processHandlerFilter2.doFilter(mockHttpServletRequest2, mockHttpServletResponse, spyFilterChain); Mockito.verify(spyFilterChain).doFilter(mockHttpServletRequest2, mockHttpServletResponse); }
@Udf public List<String> keys(@UdfParameter final String jsonObj) { if (jsonObj == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonObj); if (node.isMissingNode() || !node.isObject()) { return null; } final List<String> ret = new ArrayList<>(); node.fieldNames().forEachRemaining(ret::add); return ret; }
@Test public void shouldReturnObjectKeys() { // When: final List<String> result = udf.keys("{\"a\": \"abc\", \"b\": { \"c\": \"a\" }, \"d\": 1}"); // Then: assertEquals(Arrays.asList("a", "b", "d"), result); }
public static List<String> parseAcceptType(final String header) { return parseAcceptTypeStream(header).collect(Collectors.toList()); }
@Test(dataProvider = "sampleInvalidAcceptHeaders", expectedExceptions = InvalidMimeTypeException.class) public void testParseAcceptInvalidTypes(String header) { MIMEParse.parseAcceptType(header); }
public <KEY> Where<KEY> where(KeySelector<T1, KEY> keySelector) { Preconditions.checkNotNull(keySelector); final TypeInformation<KEY> keyType = TypeExtractor.getKeySelectorTypes(keySelector, input1.getType()); return where(keySelector, keyType); }
@Test void testSetAllowedLateness() { Duration lateness = Duration.ofMillis(42L); CoGroupedStreams.WithWindow<String, String, String, TimeWindow> withLateness = dataStream1 .coGroup(dataStream2) .where(keySelector) .equalTo(keySelector) .window(tsAssigner) .allowedLateness(lateness); assertThat(withLateness.getAllowedLatenessDuration()).hasValue(lateness); }
public int importKeys(ECKey... keys) { return importKeys(Collections.unmodifiableList(Arrays.asList(keys))); }
@Test public void importKeys() { Instant now = TimeUtils.currentTime().truncatedTo(ChronoUnit.SECONDS); TimeUtils.setMockClock(now); final ECKey key1 = new ECKey(); TimeUtils.rollMockClock(Duration.ofDays(1)); final ECKey key2 = new ECKey(); final ArrayList<ECKey> keys = Lists.newArrayList(key1, key2); // Import two keys, check the event is correct. assertEquals(2, chain.importKeys(keys)); assertEquals(2, chain.numKeys()); assertTrue(onKeysAddedRan.getAndSet(false)); assertArrayEquals(keys.toArray(), onKeysAdded.get().toArray()); assertEquals(now, chain.earliestKeyCreationTime()); // Check we ignore duplicates. final ECKey newKey = new ECKey(); keys.add(newKey); assertEquals(1, chain.importKeys(keys)); assertTrue(onKeysAddedRan.getAndSet(false)); assertEquals(newKey, onKeysAdded.getAndSet(null).get(0)); assertEquals(0, chain.importKeys(keys)); assertFalse(onKeysAddedRan.getAndSet(false)); assertNull(onKeysAdded.get()); assertTrue(chain.hasKey(key1)); assertTrue(chain.hasKey(key2)); assertEquals(key1, chain.findKeyFromPubHash(key1.getPubKeyHash())); assertEquals(key2, chain.findKeyFromPubKey(key2.getPubKey())); assertNull(chain.findKeyFromPubKey(key2.getPubKeyHash())); }
public static List<FieldValueSetter> getSetters( TypeDescriptor<?> typeDescriptor, Schema schema, FieldValueTypeSupplier fieldValueTypeSupplier, TypeConversionsFactory typeConversionsFactory) { return CACHED_SETTERS.computeIfAbsent( TypeDescriptorWithSchema.create(typeDescriptor, schema), c -> { List<FieldValueTypeInformation> types = fieldValueTypeSupplier.get(typeDescriptor, schema); return types.stream() .map(t -> createSetter(t, typeConversionsFactory)) .collect(Collectors.toList()); }); }
@Test public void testGeneratedSimpleSetters() { SimpleBean simpleBean = new SimpleBean(); List<FieldValueSetter> setters = JavaBeanUtils.getSetters( new TypeDescriptor<SimpleBean>() {}, SIMPLE_BEAN_SCHEMA, new SetterTypeSupplier(), new DefaultTypeConversionsFactory()); assertEquals(12, setters.size()); setters.get(0).set(simpleBean, "field1"); setters.get(1).set(simpleBean, (byte) 41); setters.get(2).set(simpleBean, (short) 42); setters.get(3).set(simpleBean, (int) 43); setters.get(4).set(simpleBean, (long) 44); setters.get(5).set(simpleBean, true); setters.get(6).set(simpleBean, DateTime.parse("1979-03-14").toInstant()); setters.get(7).set(simpleBean, DateTime.parse("1979-03-15").toInstant()); setters.get(8).set(simpleBean, "bytes1".getBytes(StandardCharsets.UTF_8)); setters.get(9).set(simpleBean, "bytes2".getBytes(StandardCharsets.UTF_8)); setters.get(10).set(simpleBean, new BigDecimal(42)); setters.get(11).set(simpleBean, "stringBuilder"); assertEquals("field1", simpleBean.getStr()); assertEquals((byte) 41, simpleBean.getaByte()); assertEquals((short) 42, simpleBean.getaShort()); assertEquals((int) 43, simpleBean.getAnInt()); assertEquals((long) 44, simpleBean.getaLong()); assertTrue(simpleBean.isaBoolean()); assertEquals(DateTime.parse("1979-03-14"), simpleBean.getDateTime()); assertEquals(DateTime.parse("1979-03-15").toInstant(), simpleBean.getInstant()); assertArrayEquals( "Unexpected bytes", "bytes1".getBytes(StandardCharsets.UTF_8), simpleBean.getBytes()); assertEquals( ByteBuffer.wrap("bytes2".getBytes(StandardCharsets.UTF_8)), simpleBean.getByteBuffer()); assertEquals(new BigDecimal(42), simpleBean.getBigDecimal()); assertEquals("stringBuilder", simpleBean.getStringBuilder().toString()); }
static void populateSchemaWithConstraints(Schema toPopulate, SimpleTypeImpl t) { if (t.getAllowedValues() != null && !t.getAllowedValues().isEmpty()) { parseSimpleType(DMNOASConstants.X_DMN_ALLOWED_VALUES, toPopulate, t.getAllowedValuesFEEL(), t.getAllowedValues()); } if (t.getTypeConstraint() != null && !t.getTypeConstraint().isEmpty()) { parseSimpleType(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS, toPopulate, t.getTypeConstraintFEEL(), t.getTypeConstraint()); } }
@Test void populateSchemaWithRangesForTypeConstraints() { List<Object> toRange = Arrays.asList("(>1)", "(<=10)"); String typeConstraintsString = String.join(",", toRange.stream().map(toMap -> String.format("%s", toMap)).toList()); SimpleTypeImpl toRead = getSimpleType(null, typeConstraintsString, FEEL_STRING, BuiltInType.STRING); AtomicReference<Schema> toPopulate = new AtomicReference<>(getSchemaForSimpleType(toRead)); DMNTypeSchemas.populateSchemaWithConstraints(toPopulate.get(), toRead); assertEquals(BigDecimal.ONE, toPopulate.get().getMinimum()); assertTrue(toPopulate.get().getExclusiveMinimum()); assertEquals(BigDecimal.TEN, toPopulate.get().getMaximum()); assertFalse(toPopulate.get().getExclusiveMaximum()); assertTrue(toPopulate.get().getExtensions().containsKey(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS)); String retrieved = ((String) toPopulate.get().getExtensions().get(DMNOASConstants.X_DMN_TYPE_CONSTRAINTS)).replace(" ", ""); String expected = typeConstraintsString.replace("(", "").replace(")", ""); assertEquals(expected, retrieved); }
public static CreateSourceProperties from(final Map<String, Literal> literals) { try { return new CreateSourceProperties(literals, DurationParser::parse, false); } catch (final ConfigException e) { final String message = e.getMessage().replace( "configuration", "property" ); throw new KsqlException(message, e); } }
@Test public void shouldThrowOnSessionWindowWithSize() { // When: final Exception e = assertThrows( KsqlException.class, () -> CreateSourceProperties.from( ImmutableMap.<String, Literal>builder() .putAll(MINIMUM_VALID_PROPS) .put(WINDOW_TYPE_PROPERTY, new StringLiteral("SESSION")) .put(WINDOW_SIZE_PROPERTY, new StringLiteral("2 MILLISECONDS")) .build()) ); // Then: assertThat(e.getMessage(), containsString("'WINDOW_SIZE' should not be set for SESSION windows.")); }
@Override public Optional<Rule> findByKey(RuleKey key) { verifyKeyArgument(key); ensureInitialized(); return Optional.ofNullable(rulesByKey.get(key)); }
@Test public void findByKey_does_not_call_DB_if_key_argument_is_null() { try { underTest.findByKey(null); } catch (NullPointerException e) { assertNoCallToDb(); } }
public static BigDecimal toBigDecimal(Number number) { if (null == number) { return BigDecimal.ZERO; } // issue#3423@Github of CVE-2023-51080 Assert.isTrue(isValidNumber(number), "Number is invalid!"); if (number instanceof BigDecimal) { return (BigDecimal) number; } else if (number instanceof Long) { return new BigDecimal((Long) number); } else if (number instanceof Integer) { return new BigDecimal((Integer) number); } else if (number instanceof BigInteger) { return new BigDecimal((BigInteger) number); } // Float、Double等有精度问题,转换为字符串后再转换 return new BigDecimal(number.toString()); }
@Test public void toBigDecimalTest() { final double a = 3.14; BigDecimal bigDecimal = NumberUtil.toBigDecimal(a); assertEquals("3.14", bigDecimal.toString()); bigDecimal = NumberUtil.toBigDecimal("1,234.55"); assertEquals("1234.55", bigDecimal.toString()); bigDecimal = NumberUtil.toBigDecimal("1,234.56D"); assertEquals("1234.56", bigDecimal.toString()); assertEquals(new BigDecimal("9.0E+7"), NumberUtil.toBigDecimal("9.0E+7")); }
public static void retryWithBackoff( final int maxRetries, final int initialWaitMs, final int maxWaitMs, final Runnable runnable, final Class<?>... passThroughExceptions) { retryWithBackoff( maxRetries, initialWaitMs, maxWaitMs, runnable, () -> false, Arrays.stream(passThroughExceptions) .map(c -> (Predicate<Exception>) c::isInstance) .collect(Collectors.toList()) ); }
@Test public void shouldReturnOnSuccess() { RetryUtil.retryWithBackoff(10, 0, 0, runnable); verify(runnable, times(1)).run(); }
public static FactoryBuilder newFactoryBuilder() { return new FactoryBuilder(); }
@Test void injectFormat() { B3Propagation.Factory factory = (B3Propagation.Factory) B3Propagation.newFactoryBuilder() .injectFormat(Format.SINGLE) .build(); assertThat(factory.injectorFactory).extracting("injectorFunction") .isEqualTo(Format.SINGLE); }
@Override public DataEncryptionKey newDataEncryptionKey() { if (encryptDataTransfer) { synchronized (this) { if (encryptionKey == null || encryptionKey.expiryDate < timer.now()) { // Encryption Key (EK) is generated from Block Key (BK). // Check if EK is expired, and generate a new one using the current BK // if so, otherwise continue to use the previously generated EK. // // It's important to make sure that when EK is not expired, the BK // used to generate the EK is not expired and removed, because // the same BK will be used to re-generate the EK // by BlockTokenSecretManager. // // The current implementation ensures that when an EK is not expired // (within tokenLifetime), the BK that's used to generate it // still has at least "keyUpdateInterval" of life time before // the BK gets expired and removed. // See BlockTokenSecretManager for details. LOG.debug("Generating new data encryption key because current key " + (encryptionKey == null ? "is null." : "expired on " + encryptionKey.expiryDate)); encryptionKey = blockTokenSecretManager.generateDataEncryptionKey(); } return encryptionKey; } } else { return null; } }
@Test public void testNewDataEncryptionKey() throws Exception { final Configuration conf = new HdfsConfiguration(); // Enable data transport encryption and access token conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true); conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true); final long keyUpdateInterval = 2 * 1000; final long tokenLifeTime = keyUpdateInterval; final String blockPoolId = "bp-foo"; FakeTimer fakeTimer = new FakeTimer(); BlockTokenSecretManager btsm = new BlockTokenSecretManager( keyUpdateInterval, tokenLifeTime, 0, 1, blockPoolId, null, false); Whitebox.setInternalState(btsm, "timer", fakeTimer); // When KeyManager asks for block keys, return them from btsm directly NamenodeProtocol namenode = mock(NamenodeProtocol.class); when(namenode.getBlockKeys()).thenReturn(btsm.exportKeys()); // Instantiate a KeyManager instance and get data encryption key. KeyManager keyManager = new KeyManager(blockPoolId, namenode, true, conf); Whitebox.setInternalState(keyManager, "timer", fakeTimer); Whitebox.setInternalState( Whitebox.getInternalState(keyManager, "blockTokenSecretManager"), "timer", fakeTimer); final DataEncryptionKey dek = keyManager.newDataEncryptionKey(); final long remainingTime = dek.expiryDate - fakeTimer.now(); assertEquals("KeyManager dataEncryptionKey should expire in 2 seconds", keyUpdateInterval, remainingTime); // advance the timer to expire the block key and data encryption key fakeTimer.advance(keyUpdateInterval + 1); // After the initial data encryption key expires, KeyManager should // regenerate a valid data encryption key using the current block key. final DataEncryptionKey dekAfterExpiration = keyManager.newDataEncryptionKey(); assertNotEquals("KeyManager should generate a new data encryption key", dek, dekAfterExpiration); assertTrue("KeyManager has an expired DataEncryptionKey!", dekAfterExpiration.expiryDate > fakeTimer.now()); }
@Override public boolean revokeToken(String clientId, String accessToken) { // 先查询,保证 clientId 时匹配的 OAuth2AccessTokenDO accessTokenDO = oauth2TokenService.getAccessToken(accessToken); if (accessTokenDO == null || ObjectUtil.notEqual(clientId, accessTokenDO.getClientId())) { return false; } // 再删除 return oauth2TokenService.removeAccessToken(accessToken) != null; }
@Test public void testRevokeToken_success() { // 准备参数 String clientId = randomString(); String accessToken = randomString(); // mock 方法(访问令牌) OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class).setClientId(clientId); when(oauth2TokenService.getAccessToken(eq(accessToken))).thenReturn(accessTokenDO); // mock 方法(移除) when(oauth2TokenService.removeAccessToken(eq(accessToken))).thenReturn(accessTokenDO); // 调用,并断言 assertTrue(oauth2GrantService.revokeToken(clientId, accessToken)); }
@Override @Transactional(rollbackFor = Exception.class) public void syncCodegenFromDB(Long tableId) { // 校验是否已经存在 CodegenTableDO table = codegenTableMapper.selectById(tableId); if (table == null) { throw exception(CODEGEN_TABLE_NOT_EXISTS); } // 从数据库中,获得数据库表结构 TableInfo tableInfo = databaseTableService.getTable(table.getDataSourceConfigId(), table.getTableName()); // 执行同步 syncCodegen0(tableId, tableInfo); }
@Test @Disabled // TODO @芋艿:这个单测会随机性失败,需要定位下; public void testSyncCodegenFromDB() { // mock 数据(CodegenTableDO) CodegenTableDO table = randomPojo(CodegenTableDO.class, o -> o.setTableName("t_yunai") .setDataSourceConfigId(1L).setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(table); CodegenColumnDO column01 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId()) .setColumnName("id")); codegenColumnMapper.insert(column01); CodegenColumnDO column02 = randomPojo(CodegenColumnDO.class, o -> o.setTableId(table.getId()) .setColumnName("name")); codegenColumnMapper.insert(column02); // 准备参数 Long tableId = table.getId(); // mock 方法(TableInfo) TableInfo tableInfo = mock(TableInfo.class); when(databaseTableService.getTable(eq(1L), eq("t_yunai"))) .thenReturn(tableInfo); when(tableInfo.getComment()).thenReturn("芋艿"); // mock 方法(TableInfo fields) TableField field01 = mock(TableField.class); when(field01.getComment()).thenReturn("主键"); TableField field03 = mock(TableField.class); when(field03.getComment()).thenReturn("分类"); List<TableField> fields = Arrays.asList(field01, field03); when(tableInfo.getFields()).thenReturn(fields); when(databaseTableService.getTable(eq(1L), eq("t_yunai"))) .thenReturn(tableInfo); // mock 方法(CodegenTableDO) List<CodegenColumnDO> newColumns = randomPojoList(CodegenColumnDO.class); when(codegenBuilder.buildColumns(eq(table.getId()), argThat(tableFields -> { assertEquals(2, tableFields.size()); assertSame(tableInfo.getFields(), tableFields); return true; }))).thenReturn(newColumns); // 调用 codegenService.syncCodegenFromDB(tableId); // 断言 List<CodegenColumnDO> dbColumns = codegenColumnMapper.selectList(); assertEquals(newColumns.size(), dbColumns.size()); assertPojoEquals(newColumns.get(0), dbColumns.get(0)); assertPojoEquals(newColumns.get(1), dbColumns.get(1)); }
public static synchronized void yell(final String tag, String text, Object... args) { if (msLogger.supportsYell()) { String msg = getFormattedString(text, args); msLogger.yell(tag, msg); addLog(LVL_YELL, tag, msg); } }
@Test public void testYell() throws Exception { Logger.yell("mTag", "Text with %d digits", 2); Mockito.verify(mMockLog).yell("mTag", "Text with 2 digits"); Logger.yell("mTag", "Text with no digits"); Mockito.verify(mMockLog).yell("mTag", "Text with no digits"); }
@Override public String toString() { return "SpanCustomizer(" + delegate + ")"; }
@Test void doesNotStackOverflowOnToString() { Span span = tracing.tracer().newTrace(); SpanCustomizerShield shield = new SpanCustomizerShield(span); assertThat(shield.toString()) .isNotEmpty() .isEqualTo("SpanCustomizer(RealSpan(" + span.context().traceIdString() + "/" + span.context() .spanIdString() + "))"); }
@Override public TempFileSpace newSpace(final String subdirectoryPrefix) { // TODO: Accept only ISO 8601-style timestamp in the v0.10 series. if (!ISO8601_BASIC_PATTERN.matcher(subdirectoryPrefix).matches()) { logger.warn("TempFileSpaceAllocator#newSpace should be called with ISO 8601 basic format: {}", subdirectoryPrefix); } // It is originally intended to support multiple files/directories, but the reasons are missing. // https://github.com/embulk/embulk/commit/a7643573ecb39e6dd71a08edce77c8e64dc70a77 // https://github.com/embulk/embulk/commit/5a78270a4fc20e3c113c68e4c0f6c66c1bd45886 // UNIX/Linux cannot include '/' as file name. // Windows cannot include ':' as file name. try { return TempFileSpaceImpl.with( this.tempDirectoryBase, "embulk" + subdirectoryPrefix.replace('/', '-').replace(':', '-')); } catch (final IOException ex) { throw new UncheckedIOException(ex); } }
@Test public void testNewSpaceWithNonIso8601Basic() { // TODO: Make it fail from the v0.10 series. final TempFileSpaceAllocator allocator = new SimpleTempFileSpaceAllocator(); allocator.newSpace("2019-10-31 12:34:56 UTC"); }
@Override @Transactional(value="defaultTransactionManager") public OAuth2AccessTokenEntity refreshAccessToken(String refreshTokenValue, TokenRequest authRequest) throws AuthenticationException { if (Strings.isNullOrEmpty(refreshTokenValue)) { // throw an invalid token exception if there's no refresh token value at all throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } OAuth2RefreshTokenEntity refreshToken = clearExpiredRefreshToken(tokenRepository.getRefreshTokenByValue(refreshTokenValue)); if (refreshToken == null) { // throw an invalid token exception if we couldn't find the token throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } ClientDetailsEntity client = refreshToken.getClient(); AuthenticationHolderEntity authHolder = refreshToken.getAuthenticationHolder(); // make sure that the client requesting the token is the one who owns the refresh token ClientDetailsEntity requestingClient = clientDetailsService.loadClientByClientId(authRequest.getClientId()); if (!client.getClientId().equals(requestingClient.getClientId())) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidClientException("Client does not own the presented refresh token"); } //Make sure this client allows access token refreshing if (!client.isAllowRefresh()) { throw new InvalidClientException("Client does not allow refreshing access token!"); } // clear out any access tokens if (client.isClearAccessTokensOnRefresh()) { tokenRepository.clearAccessTokensForRefreshToken(refreshToken); } if (refreshToken.isExpired()) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidTokenException("Expired refresh token: " + refreshTokenValue); } OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity(); // get the stored scopes from the authentication holder's authorization request; these are the scopes associated with the refresh token Set<String> refreshScopesRequested = new HashSet<>(refreshToken.getAuthenticationHolder().getAuthentication().getOAuth2Request().getScope()); Set<SystemScope> refreshScopes = scopeService.fromStrings(refreshScopesRequested); // remove any of the special system scopes refreshScopes = scopeService.removeReservedScopes(refreshScopes); Set<String> scopeRequested = authRequest.getScope() == null ? new HashSet<String>() : new HashSet<>(authRequest.getScope()); Set<SystemScope> scope = scopeService.fromStrings(scopeRequested); // remove any of the special system scopes scope = scopeService.removeReservedScopes(scope); if (scope != null && !scope.isEmpty()) { // ensure a proper subset of scopes if (refreshScopes != null && refreshScopes.containsAll(scope)) { // set the scope of the new access token if requested token.setScope(scopeService.toStrings(scope)); } else { String errorMsg = "Up-scoping is not allowed."; logger.error(errorMsg); throw new InvalidScopeException(errorMsg); } } else { // otherwise inherit the scope of the refresh token (if it's there -- this can return a null scope set) token.setScope(scopeService.toStrings(refreshScopes)); } token.setClient(client); if (client.getAccessTokenValiditySeconds() != null) { Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L)); token.setExpiration(expiration); } if (client.isReuseRefreshToken()) { // if the client re-uses refresh tokens, do that token.setRefreshToken(refreshToken); } else { // otherwise, make a new refresh token OAuth2RefreshTokenEntity newRefresh = createRefreshToken(client, authHolder); token.setRefreshToken(newRefresh); // clean up the old refresh token tokenRepository.removeRefreshToken(refreshToken); } token.setAuthenticationHolder(authHolder); tokenEnhancer.enhance(token, authHolder.getAuthentication()); tokenRepository.saveAccessToken(token); return token; }
@Test(expected = InvalidTokenException.class) public void refreshAccessToken_noRefreshToken() { when(tokenRepository.getRefreshTokenByValue(anyString())).thenReturn(null); service.refreshAccessToken(refreshTokenValue, tokenRequest); }
@Override protected void encode(ChannelHandlerContext ctx, CharSequence msg, List<Object> out) throws Exception { ByteBuf buffer = ByteBufUtil.encodeString(ctx.alloc(), CharBuffer.wrap(msg), charset, lineSeparator.length); buffer.writeBytes(lineSeparator); out.add(buffer); }
@Test public void testEncode() { testLineEncode(LineSeparator.DEFAULT, "abc"); testLineEncode(LineSeparator.WINDOWS, "abc"); testLineEncode(LineSeparator.UNIX, "abc"); }
@Override public List<SmsReceiveRespDTO> parseSmsReceiveStatus(String text) { List<SmsReceiveStatus> callback = JsonUtils.parseArray(text, SmsReceiveStatus.class); return convertList(callback, status -> new SmsReceiveRespDTO() .setSuccess(SmsReceiveStatus.SUCCESS_CODE.equalsIgnoreCase(status.getStatus())) .setErrorCode(status.getErrCode()).setErrorMsg(status.getDescription()) .setMobile(status.getMobile()).setReceiveTime(status.getReceiveTime()) .setSerialNo(status.getSerialNo()).setLogId(status.getSessionContext().getLogId())); }
@Test public void testParseSmsReceiveStatus() { // 准备参数 String text = "[\n" + " {\n" + " \"user_receive_time\": \"2015-10-17 08:03:04\",\n" + " \"nationcode\": \"86\",\n" + " \"mobile\": \"13900000001\",\n" + " \"report_status\": \"SUCCESS\",\n" + " \"errmsg\": \"DELIVRD\",\n" + " \"description\": \"用户短信送达成功\",\n" + " \"sid\": \"12345\",\n" + " \"ext\": {\"logId\":\"67890\"}\n" + " }\n" + "]"; // mock 方法 // 调用 List<SmsReceiveRespDTO> statuses = smsClient.parseSmsReceiveStatus(text); // 断言 assertEquals(1, statuses.size()); assertTrue(statuses.get(0).getSuccess()); assertEquals("DELIVRD", statuses.get(0).getErrorCode()); assertEquals("用户短信送达成功", statuses.get(0).getErrorMsg()); assertEquals("13900000001", statuses.get(0).getMobile()); assertEquals(LocalDateTime.of(2015, 10, 17, 8, 3, 4), statuses.get(0).getReceiveTime()); assertEquals("12345", statuses.get(0).getSerialNo()); assertEquals(67890L, statuses.get(0).getLogId()); }
public Integer doCall() throws Exception { if (all) { client(Integration.class).delete(); printer().println("Integrations deleted"); } else { if (names == null) { throw new RuntimeCamelException("Missing integration name as argument or --all option."); } for (String name : Arrays.stream(names).map(KubernetesHelper::sanitize).toList()) { List<StatusDetails> status = client(Integration.class).withName(name).delete(); if (status.isEmpty()) { printer().printf("Integration %s deletion skipped - not found%n", name); } else { printer().printf("Integration %s deleted%n", name); } } } return 0; }
@Test public void shouldDeleteIntegration() throws Exception { Integration integration = createIntegration(); kubernetesClient.resources(Integration.class).resource(integration).create(); IntegrationDelete command = createCommand(); command.names = new String[] { integration.getMetadata().getName() }; command.doCall(); Assertions.assertEquals("Integration routes deleted", printer.getOutput()); Assertions.assertEquals(0, kubernetesClient.resources(Integration.class).list().getItems().size()); }
@Override protected Map<String, Object> executeScenario(KieContainer kieContainer, ScenarioRunnerData scenarioRunnerData, ExpressionEvaluatorFactory expressionEvaluatorFactory, ScesimModelDescriptor scesimModelDescriptor, Settings settings) { if (!ScenarioSimulationModel.Type.DMN.equals(settings.getType())) { throw new ScenarioException("Impossible to run a not-DMN simulation with DMN runner"); } DMNScenarioExecutableBuilder executableBuilder = createBuilderWrapper(kieContainer); executableBuilder.setActiveModel(settings.getDmnFilePath()); defineInputValues(scenarioRunnerData.getBackgrounds(), scenarioRunnerData.getGivens()).forEach(executableBuilder::setValue); return executableBuilder.run().getOutputs(); }
@Test public void validateScenario_wrongImportPrefix() { String wrongPrefix = "WrongPrefix"; FactIdentifier importedPersonFactIdentifier = FactIdentifier.create(IMPORTED_PREFIX + ".Person", IMPORTED_PREFIX + ".Person", wrongPrefix); ScenarioRunnerData scenarioRunnerData = new ScenarioRunnerData(); AbstractMap.SimpleEntry<String, Object> givenImportedPersonFactData = new AbstractMap.SimpleEntry<>("surname", "White"); AbstractMap.SimpleEntry<String, Object> givenImportedPersonFactData2 = new AbstractMap.SimpleEntry<>("age", 67); scenarioRunnerData.addGiven(new InstanceGiven(importedPersonFactIdentifier, Map.ofEntries(givenImportedPersonFactData, givenImportedPersonFactData2))); assertThatThrownBy(() -> runnerHelper.executeScenario(kieContainerMock, scenarioRunnerData, expressionEvaluatorFactory, simulation.getScesimModelDescriptor(), settings)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Fact name: " + IMPORTED_PREFIX + ".Person has defined an invalid import prefix: " + wrongPrefix); }
public boolean fetchFromCurrentStore(String topic, int queueId, long offset) { return fetchFromCurrentStore(topic, queueId, offset, 1); }
@Test public void testViaTieredStorage() { Properties properties = new Properties(); // TieredStorageLevel.DISABLE properties.setProperty("tieredStorageLevel", "0"); configuration.update(properties); Assert.assertFalse(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); // TieredStorageLevel.NOT_IN_DISK properties.setProperty("tieredStorageLevel", "1"); configuration.update(properties); when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(false); Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); Assert.assertFalse(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); // TieredStorageLevel.NOT_IN_MEM properties.setProperty("tieredStorageLevel", "2"); configuration.update(properties); Mockito.when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(false); Mockito.when(defaultStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(true); Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); Mockito.when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); Mockito.when(defaultStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(false); Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); Mockito.when(defaultStore.checkInStoreByConsumeOffset(anyString(), anyInt(), anyLong())).thenReturn(true); Mockito.when(defaultStore.checkInMemByConsumeOffset(anyString(), anyInt(), anyLong(), anyInt())).thenReturn(true); Assert.assertFalse(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); // TieredStorageLevel.FORCE properties.setProperty("tieredStorageLevel", "3"); configuration.update(properties); Assert.assertTrue(currentStore.fetchFromCurrentStore(mq.getTopic(), mq.getQueueId(), 0)); }
public MijnDigidSessionStatus sessionStatus(String mijnDigiDSessionId) { Optional<MijnDigidSession> optionalSession = mijnDigiDSessionRepository.findById(mijnDigiDSessionId); if( optionalSession.isEmpty()) { return MijnDigidSessionStatus.INVALID; } MijnDigidSession session = optionalSession.get(); if( session.isAuthenticated() ) { return MijnDigidSessionStatus.VALID; } return MijnDigidSessionStatus.INVALID; }
@Test void testStatusNonExistingSession() { MijnDigidSession session = new MijnDigidSession(1L); when(mijnDigiDSessionRepository.findById(eq(session.getId()))).thenReturn(Optional.empty()); MijnDigidSessionStatus status = mijnDigiDSessionService.sessionStatus(session.getId()); verify(mijnDigiDSessionRepository, times(1)).findById(eq(session.getId())); assertEquals(status, MijnDigidSessionStatus.INVALID); }
public static BadRequestException invalidNotificationsFormat(String format) { return new BadRequestException("invalid notifications format:%s", format); }
@Test public void testInvalidNotificationsFormat() { BadRequestException invalidNotificationsFormat = BadRequestException.invalidNotificationsFormat("format"); assertEquals("invalid notifications format:format", invalidNotificationsFormat.getMessage()); }
public static String bytesToHex(byte[] digest) { StringBuilder hexString = new StringBuilder(2 * digest.length); final int mask = 0xFF; for (byte b : digest) { // byte type in Java is signed. Mask away the sign bit String hex = Integer.toHexString(mask & b); if (hex.length() == 1) { hexString.append('0'); } hexString.append(hex); } return hexString.toString(); }
@Test public void testBytesToHex() { byte[] data = {3, -61, -37, -66, 125, -120, 21, -109, 126, 53, 75, -115, 44, 76, -17, -53, 2, 6, 61, -45, 32, -19, 35, -15, 109, -114, 92, -13, 109, -44, -7, 42}; String result = Sha256Util.bytesToHex(data); assertEquals("The result must has even length", 0, result.length() % 2); assertEquals("03c3dbbe7d8815937e354b8d2c4cefcb02063dd320ed23f16d8e5cf36dd4f92a", result); }
public Span nextSpan(Message message) { TraceContextOrSamplingFlags extracted = extractAndClearTraceIdProperties(processorExtractor, message, message); Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler. // When an upstream context was not present, lookup keys are unlikely added if (extracted.context() == null && !result.isNoop()) { // simplify code by re-using an existing MessagingRequest impl tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result); } return result; }
@Test void nextSpan_shouldnt_tag_queue_when_incoming_context() { setStringProperty(message, "b3", "0000000000000001-0000000000000002-1"); message.setDestination(createDestination("foo", QUEUE_TYPE)); jmsTracing.nextSpan(message).start().finish(); assertThat(testSpanHandler.takeLocalSpan().tags()).isEmpty(); }
public Future<Set<Integer>> brokersInUse(Reconciliation reconciliation, Vertx vertx, TlsPemIdentity coTlsPemIdentity, AdminClientProvider adminClientProvider) { try { String bootstrapHostname = KafkaResources.bootstrapServiceName(reconciliation.name()) + "." + reconciliation.namespace() + ".svc:" + KafkaCluster.REPLICATION_PORT; LOGGER.debugCr(reconciliation, "Creating AdminClient for Kafka cluster in namespace {}", reconciliation.namespace()); Admin kafkaAdmin = adminClientProvider.createAdminClient(bootstrapHostname, coTlsPemIdentity.pemTrustSet(), coTlsPemIdentity.pemAuthIdentity()); return topicNames(reconciliation, vertx, kafkaAdmin) .compose(names -> describeTopics(reconciliation, vertx, kafkaAdmin, names)) .compose(topicDescriptions -> { Set<Integer> brokersWithPartitionReplicas = new HashSet<>(); for (TopicDescription td : topicDescriptions.values()) { for (TopicPartitionInfo pd : td.partitions()) { for (org.apache.kafka.common.Node broker : pd.replicas()) { brokersWithPartitionReplicas.add(broker.id()); } } } kafkaAdmin.close(); return Future.succeededFuture(brokersWithPartitionReplicas); }).recover(error -> { LOGGER.warnCr(reconciliation, "Failed to get list of brokers in use", error); kafkaAdmin.close(); return Future.failedFuture(error); }); } catch (KafkaException e) { LOGGER.warnCr(reconciliation, "Failed to check if broker contains any partition replicas", e); return Future.failedFuture(e); } }
@Test public void testBrokersInUse(VertxTestContext context) { Admin admin = mock(Admin.class); AdminClientProvider mock = mock(AdminClientProvider.class); when(mock.createAdminClient(anyString(), any(), any())).thenReturn(admin); // Mock topic description TopicDescription t1 = new TopicDescription("my-topic", false, List.of(new TopicPartitionInfo(0, NODE.apply(0), List.of(NODE.apply(0)), List.of(NODE.apply(0))))); TopicDescription t2 = new TopicDescription("my-topic2", false, List.of(new TopicPartitionInfo(0, NODE.apply(1), List.of(NODE.apply(1)), List.of(NODE.apply(1))))); TopicDescription t3 = new TopicDescription("my-topic3", false, List.of(new TopicPartitionInfo(0, NODE.apply(2), List.of(NODE.apply(2)), List.of(NODE.apply(2))))); DescribeTopicsResult dtr = mock(DescribeTopicsResult.class); when(dtr.allTopicNames()).thenReturn(KafkaFuture.completedFuture(Map.of(t1.name(), t1, t2.name(), t2, t3.name(), t3))); @SuppressWarnings(value = "unchecked") ArgumentCaptor<Collection<String>> topicListCaptor = ArgumentCaptor.forClass(Collection.class); when(admin.describeTopics(topicListCaptor.capture())).thenReturn(dtr); // Mock list topics ListTopicsResult ltr = mock(ListTopicsResult.class); when(ltr.names()).thenReturn(KafkaFuture.completedFuture(Set.of("my-topic", "my-topic2", "my-topic3"))); when(admin.listTopics(any())).thenReturn(ltr); // Get brokers in use Checkpoint checkpoint = context.checkpoint(); BrokersInUseCheck operations = new BrokersInUseCheck(); operations.brokersInUse(RECONCILIATION, vertx, DUMMY_IDENTITY, mock) .onComplete(context.succeeding(brokersInUse -> { Collection<String> topicList = topicListCaptor.getValue(); assertThat(topicList.size(), is(3)); assertThat(topicList, hasItems("my-topic", "my-topic2", "my-topic3")); assertThat(brokersInUse.size(), is(3)); assertThat(brokersInUse, is(Set.of(0, 1, 2))); checkpoint.flag(); })); }
@Override public void onMsg(TbContext ctx, TbMsg msg) throws ExecutionException, InterruptedException, TbNodeException { var metaDataCopy = msg.getMetaData().copy(); var msgDataStr = msg.getData(); boolean hasNoChanges = false; switch (deleteFrom) { case METADATA: var metaDataMap = metaDataCopy.getData(); var mdKeysToDelete = metaDataMap.keySet() .stream() .filter(this::matches) .collect(Collectors.toList()); mdKeysToDelete.forEach(metaDataMap::remove); metaDataCopy = new TbMsgMetaData(metaDataMap); hasNoChanges = mdKeysToDelete.isEmpty(); break; case DATA: JsonNode dataNode = JacksonUtil.toJsonNode(msgDataStr); if (dataNode.isObject()) { var msgDataObject = (ObjectNode) dataNode; var msgKeysToDelete = new ArrayList<String>(); dataNode.fieldNames().forEachRemaining(key -> { if (matches(key)) { msgKeysToDelete.add(key); } }); msgDataObject.remove(msgKeysToDelete); msgDataStr = JacksonUtil.toString(msgDataObject); hasNoChanges = msgKeysToDelete.isEmpty(); } break; default: log.debug("Unexpected DeleteFrom value: {}. Allowed values: {}", deleteFrom, TbMsgSource.values()); } ctx.tellSuccess(hasNoChanges ? msg : TbMsg.transformMsg(msg, metaDataCopy, msgDataStr)); }
@Test void givenDeleteFromMetadata_whenOnMsg_thenVerifyOutput() throws Exception { node.onMsg(ctx, getTbMsg(deviceId, TbMsg.EMPTY_JSON_OBJECT)); ArgumentCaptor<TbMsg> newMsgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx).tellSuccess(newMsgCaptor.capture()); verify(ctx, never()).tellFailure(any(), any()); TbMsg newMsg = newMsgCaptor.getValue(); assertThat(newMsg).isNotNull(); Map<String, String> metaDataMap = newMsg.getMetaData().getData(); assertThat(metaDataMap.containsKey("TestKey_1")).isEqualTo(false); assertThat(metaDataMap.containsKey("voltageDataValue")).isEqualTo(false); }
@Override public <R> List<R> queryMany(String sql, Object[] args, RowMapper<R> mapper) { return queryMany(jdbcTemplate, sql, args, mapper); }
@Test void testQueryMany1() { final String sql = "SELECT * FROM config_info WHERE id >= ? AND id <= ?"; final Object[] args = new Object[] {1, 2}; MockConfigInfo configInfo1 = new MockConfigInfo(); configInfo1.setId(1); MockConfigInfo configInfo2 = new MockConfigInfo(); configInfo2.setId(2); List<MockConfigInfo> configInfos = new ArrayList<>(); configInfos.add(configInfo1); configInfos.add(configInfo2); when(jdbcTemplate.query(eq(sql), eq(args), any(RowMapper.class))).thenReturn(configInfos); assertEquals(configInfos, operate.queryMany(sql, args, rowMapper)); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { final ShenyuContext shenyuContext = exchange.getAttribute(Constants.CONTEXT); assert shenyuContext != null; final HystrixHandle hystrixHandle = HystrixPluginDataHandler.CACHED_HANDLE.get().obtainHandle(CacheKeyUtils.INST.getKey(rule)); String groupKey = hystrixHandle.getGroupKey(); if (StringUtils.isBlank(hystrixHandle.getGroupKey())) { groupKey = Objects.requireNonNull(shenyuContext).getModule(); } String commandKey = hystrixHandle.getCommandKey(); if (StringUtils.isBlank(hystrixHandle.getCommandKey())) { commandKey = Objects.requireNonNull(shenyuContext).getMethod(); } Command command = fetchCommand(hystrixHandle, exchange, chain, commandKey, groupKey); return Mono.create(s -> { Subscription sub = command.fetchObservable().subscribe(s::success, s::error, s::success); s.onCancel(sub::unsubscribe); if (command.isCircuitBreakerOpen()) { LOG.error("hystrix execute have circuitBreaker is Open! groupKey:{},commandKey:{}", hystrixHandle.getGroupKey(), hystrixHandle.getCommandKey()); } }).doOnError(throwable -> { LOG.error("hystrix execute exception:", throwable); exchange.getAttributes().put(Constants.CLIENT_RESPONSE_RESULT_TYPE, ResultEnum.ERROR.getName()); chain.execute(exchange); }).then().doFinally(monoV -> { final Consumer<HttpStatusCode> consumer = exchange.getAttribute(Constants.METRICS_HYSTRIX); Optional.ofNullable(consumer).ifPresent(c -> c.accept(exchange.getResponse().getStatusCode())); }); }
@Test public void testDoExecute() { final ServerWebExchange exchange = MockServerWebExchange.from(MockServerHttpRequest.get("localhost").build()); ShenyuContext shenyuContext = mock(ShenyuContext.class); exchange.getAttributes().put(Constants.CONTEXT, shenyuContext); ShenyuPluginChain chain = mock(ShenyuPluginChain.class); when(chain.execute(exchange)).thenReturn(Mono.empty()); HystrixHandle hystrixHandle = new HystrixHandle(); hystrixHandle.setGroupKey("groupKey"); hystrixHandle.setCommandKey(" commandKey"); hystrixHandle.setMaxConcurrentRequests(0); hystrixHandle.setErrorThresholdPercentage(0); hystrixHandle.setRequestVolumeThreshold(0); hystrixHandle.setSleepWindowInMilliseconds(0); RuleData rule = new RuleData(); HystrixPluginDataHandler.CACHED_HANDLE.get().cachedHandle(CacheKeyUtils.INST.getKey(rule), hystrixHandle); rule.setHandle(GsonUtils.getInstance().toJson(hystrixHandle)); SelectorData selectorData = mock(SelectorData.class); Mono<Void> mono = hystrixPlugin.doExecute(exchange, chain, selectorData, rule); StepVerifier.create(mono).expectSubscription().verifyComplete(); hystrixHandle.setExecutionIsolationStrategy(HystrixIsolationModeEnum.THREAD_POOL.getCode()); rule.setHandle(GsonUtils.getInstance().toJson(hystrixHandle)); Mono<Void> threadMono = hystrixPlugin.doExecute(exchange, chain, selectorData, rule); StepVerifier.create(threadMono).expectSubscription().verifyComplete(); }
@Override public boolean add(final Long value) { return add(value.longValue()); }
@Test public void addingAnElementTwiceDoesNothing() { assertTrue(set.add(1)); assertFalse(set.add(1)); }
@VisibleForTesting Entity exportNativeEntity(InputWithExtractors inputWithExtractors, EntityDescriptorIds entityDescriptorIds) { final Input input = inputWithExtractors.input(); // TODO: Create independent representation of entity? final Map<String, ValueReference> staticFields = input.getStaticFields().entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, kv -> ValueReference.of(kv.getValue()))); final ReferenceMap configuration = toReferenceMap(input.getConfiguration()); final List<ExtractorEntity> extractors = inputWithExtractors.extractors().stream() .map(this::encodeExtractor) .collect(Collectors.toList()); final InputEntity inputEntity = InputEntity.create( ValueReference.of(input.getTitle()), configuration, staticFields, ValueReference.of(input.getType()), ValueReference.of(input.isGlobal()), extractors); final JsonNode data = objectMapper.convertValue(inputEntity, JsonNode.class); final Set<Constraint> constraints = versionConstraints(input); return EntityV1.builder() .id(ModelId.of(entityDescriptorIds.getOrThrow(input.getId(), ModelTypes.INPUT_V1))) .type(ModelTypes.INPUT_V1) .data(data) .constraints(ImmutableSet.copyOf(constraints)) .build(); }
@Test public void exportNativeEntityWithEncryptedValues() { final ImmutableMap<String, Object> fields = ImmutableMap.of( MessageInput.FIELD_TITLE, "Input Title", MessageInput.FIELD_TYPE, "org.graylog2.inputs.misc.jsonpath.JsonPathInput", MessageInput.FIELD_CONFIGURATION, Map.of("encrypted_value", new EncryptedValueService(UUID.randomUUID().toString()).encrypt("secret"))); final InputImpl input = new InputImpl(fields); final ImmutableList<Extractor> extractors = ImmutableList.of(); final InputWithExtractors inputWithExtractors = InputWithExtractors.create(input, extractors); final EntityDescriptor descriptor = EntityDescriptor.create(input.getId(), ModelTypes.INPUT_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Entity entity = facade.exportNativeEntity(inputWithExtractors, entityDescriptorIds); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.INPUT_V1); final EntityV1 entityV1 = (EntityV1) entity; final InputEntity inputEntity = objectMapper.convertValue(entityV1.data(), InputEntity.class); assertThat(inputEntity.title()).isEqualTo(ValueReference.of("Input Title")); assertThat(inputEntity.type()).isEqualTo(ValueReference.of("org.graylog2.inputs.misc.jsonpath.JsonPathInput")); assertThat(inputEntity.configuration()).hasSize(1).hasEntrySatisfying("encrypted_value", ref -> { assertThat(ref).isInstanceOf(ValueReference.class); assertThat(((ValueReference) ref).valueType()).isEqualTo(ValueType.STRING); assertThat(((ValueReference) ref).asString()).startsWith("<Encrypted value was replaced"); }); }
@Override public void init(Set<String> fields, RecordExtractorConfig recordExtractorConfig) { CSVRecordExtractorConfig csvRecordExtractorConfig = (CSVRecordExtractorConfig) recordExtractorConfig; if (fields == null || fields.isEmpty()) { _fields = csvRecordExtractorConfig.getColumnNames(); } else { _fields = ImmutableSet.copyOf(fields); } _multiValueDelimiter = csvRecordExtractorConfig.getMultiValueDelimiter(); }
@Test public void testRemovingSurroundingSpaces() throws IOException { CSVRecordReaderConfig csvRecordReaderConfig = new CSVRecordReaderConfig(); // Create a CSV file where records have two values and the second value contains an extra space. File spaceFile = new File(_tempDir, "space.csv"); BufferedWriter writer = new BufferedWriter(new FileWriter(spaceFile)); writer.write("col1 ,col2\n"); writer.write(" value11, value12"); writer.close(); CSVRecordReader csvRecordReader = new CSVRecordReader(); HashSet<String> fieldsToRead = new HashSet<>(); fieldsToRead.add("col1"); fieldsToRead.add("col2"); csvRecordReader.init(spaceFile, fieldsToRead, csvRecordReaderConfig); GenericRow genericRow = new GenericRow(); csvRecordReader.rewind(); // check if parsing succeeded. Assert.assertTrue(csvRecordReader.hasNext()); csvRecordReader.next(genericRow); Assert.assertEquals(genericRow.getValue("col1"), "value11"); Assert.assertEquals(genericRow.getValue("col2"), "value12"); }
public List<AbstractField> getUnqualifiedArrayList(String name) throws BadFieldValueException { ArrayProperty array = null; for (AbstractField child : getAllProperties()) { if (child.getPropertyName().equals(name)) { if (child instanceof ArrayProperty) { array = (ArrayProperty) child; break; } throw new BadFieldValueException("Property asked is not an array"); } } if (array != null) { return new ArrayList<>(array.getContainer().getAllProperties()); } return null; }
@Test void testArrayList() throws Exception { XMPMetadata meta = XMPMetadata.createXMPMetadata(); ArrayProperty newSeq = meta.getTypeMapping().createArrayProperty(null, "nsSchem", "seqType", Cardinality.Seq); TypeMapping tm = meta.getTypeMapping(); TextType li1 = tm.createText(null, "rdf", "li", "valeur1"); TextType li2 = tm.createText(null, "rdf", "li", "valeur2"); newSeq.getContainer().addProperty(li1); newSeq.getContainer().addProperty(li2); schem.addProperty(newSeq); List<AbstractField> list = schem.getUnqualifiedArrayList("seqType"); assertTrue(list.contains(li1)); assertTrue(list.contains(li2)); }
@Udf public <T> List<T> except( @UdfParameter(description = "Array of values") final List<T> left, @UdfParameter(description = "Array of exceptions") final List<T> right) { if (left == null || right == null) { return null; } final Set<T> distinctRightValues = new HashSet<>(right); final Set<T> distinctLeftValues = new LinkedHashSet<>(left); return distinctLeftValues .stream() .filter(e -> !distinctRightValues.contains(e)) .collect(Collectors.toList()); }
@Test public void shouldRetainOnlyDistinctValues() { final List<String> input1 = Arrays.asList("foo", " ", "foo", "bar"); final List<String> input2 = Arrays.asList("bar"); final List<String> result = udf.except(input1, input2); assertThat(result, contains("foo", " ")); }
public void addConfigurations(List<ConfigurationProperty> configurations) { ConfigurationPropertyBuilder builder = new ConfigurationPropertyBuilder(); for (ConfigurationProperty property : configurations) { SCMConfigurations scmConfigurations = SCMMetadataStore.getInstance().getConfigurationMetadata(getPluginId()); if (isValidPluginConfiguration(property.getConfigKeyName(), scmConfigurations)) { configuration.add(builder.create(property.getConfigKeyName(), property.getConfigValue(), property.getEncryptedValue(), scmConfigurationFor(property.getConfigKeyName(), scmConfigurations).getOption(SCMConfiguration.SECURE))); } else { configuration.add(property); } } }
@Test void shouldAddConfigurationPropertiesForAnyPlugin() { List<ConfigurationProperty> configurationProperties = List.of(create("key", "value", "encValue")); Configuration configuration = new Configuration(); SCM scm = SCMMother.create("id", "name", "does_not_exist", "1.1", configuration); assertThat(configuration.size()).isEqualTo(0); scm.addConfigurations(configurationProperties); assertThat(configuration.size()).isEqualTo(1); }
@Override public List<PrivilegedOperation> reacquireContainer(ContainerId containerId) throws ResourceHandlerException { try { numaResourceAllocator.recoverNumaResource(containerId); } catch (Throwable e) { throw new ResourceHandlerException( "Failed to recover numa resource for " + containerId, e); } return null; }
@Test public void testReacquireContainer() throws Exception { @SuppressWarnings("unchecked") ConcurrentHashMap<ContainerId, Container> mockContainers = mock( ConcurrentHashMap.class); Context mockContext = mock(Context.class); NMStateStoreService mock = mock(NMStateStoreService.class); when(mockContext.getNMStateStore()).thenReturn(mock); ResourceMappings resourceMappings = new ResourceMappings(); AssignedResources assignedRscs = new AssignedResources(); NumaResourceAllocation numaResourceAllocation = new NumaResourceAllocation( "0", 70000, "0", 4); assignedRscs.updateAssignedResources(Arrays.asList(numaResourceAllocation)); resourceMappings.addAssignedResources("numa", assignedRscs); when(mockContainer.getResourceMappings()).thenReturn(resourceMappings); when(mockContainers.get(any())).thenReturn(mockContainer); when(mockContext.getContainers()).thenReturn(mockContainers); numaResourceHandler = new NumaResourceHandlerImpl(conf, mockContext); numaResourceHandler.bootstrap(conf); // recovered numa resources should be added to the used resources and // remaining will be available for further allocation. numaResourceHandler.reacquireContainer( ContainerId.fromString("container_1481156246874_0001_01_000001")); testAllocateNumaResource("container_1481156246874_0001_01_000005", Resource.newInstance(2048, 1), "1", "1"); when(mockContainer.getContainerId()).thenReturn( ContainerId.fromString("container_1481156246874_0001_01_000005")); when(mockContainer.getResource()).thenReturn(Resource.newInstance(2048, 4)); List<PrivilegedOperation> preStart = numaResourceHandler .preStart(mockContainer); assertNull(preStart); }
@Override public RegistryConfig build() { RegistryConfig registry = new RegistryConfig(); super.build(registry); registry.setCheck(check); registry.setClient(client); registry.setCluster(cluster); registry.setDefault(isDefault); registry.setDynamic(dynamic); registry.setExtraKeys(extraKeys); registry.setFile(file); registry.setGroup(group); registry.setParameters(parameters); registry.setPassword(password); registry.setPort(port); registry.setProtocol(protocol); registry.setRegister(register); registry.setServer(server); registry.setSession(session); registry.setSimplified(simplified); registry.setSubscribe(subscribe); registry.setTimeout(timeout); registry.setTransporter(transporter); registry.setUsername(username); registry.setVersion(version); registry.setWait(wait); registry.setUseAsConfigCenter(useAsConfigCenter); registry.setUseAsMetadataCenter(useAsMetadataCenter); registry.setAccepts(accepts); registry.setPreferred(preferred); registry.setWeight(weight); registry.setAddress(address); return registry; }
@Test void build() { RegistryBuilder builder = new RegistryBuilder(); builder.address("address") .username("username") .password("password") .port(8080) .protocol("protocol") .transporter("transporter") .server("server") .client("client") .cluster("cluster") .group("group") .version("version") .timeout(1000) .session(2000) .file("file") .wait(Integer.valueOf(10)) .isCheck(true) .isDynamic(false) .register(true) .subscribe(false) .isDefault(true) .simplified(false) .extraKeys("A") .parameter("default.num", "one") .id("id"); RegistryConfig config = builder.build(); RegistryConfig config2 = builder.build(); Assertions.assertEquals(8080, config.getPort()); Assertions.assertEquals(1000, config.getTimeout()); Assertions.assertEquals(2000, config.getSession()); Assertions.assertEquals(10, config.getWait()); Assertions.assertTrue(config.isCheck()); Assertions.assertFalse(config.isDynamic()); Assertions.assertTrue(config.isRegister()); Assertions.assertFalse(config.isSubscribe()); Assertions.assertTrue(config.isDefault()); Assertions.assertFalse(config.getSimplified()); Assertions.assertEquals("address", config.getAddress()); Assertions.assertEquals("username", config.getUsername()); Assertions.assertEquals("password", config.getPassword()); Assertions.assertEquals("protocol", config.getProtocol()); Assertions.assertEquals("transporter", config.getTransporter()); Assertions.assertEquals("server", config.getServer()); Assertions.assertEquals("client", config.getClient()); Assertions.assertEquals("cluster", config.getCluster()); Assertions.assertEquals("group", config.getGroup()); Assertions.assertEquals("version", config.getVersion()); Assertions.assertEquals("file", config.getFile()); Assertions.assertEquals("A", config.getExtraKeys()); Assertions.assertTrue(config.getParameters().containsKey("default.num")); Assertions.assertEquals("one", config.getParameters().get("default.num")); Assertions.assertEquals("id", config.getId()); Assertions.assertNotSame(config, config2); }
public BitList<Invoker<T>> getInvokers() { return invokers; }
@Test void tagRouterRuleParseTestV3() { String tagRouterRuleConfig = "---\n" + "configVersion: v3.0\n" + "force: false\n" + "runtime: true\n" + "enabled: true\n" + "priority: 1\n" + "key: demo-provider\n" + "tags:\n" + " - name: tag1\n" + " match:\n" + " - key: match_key1\n" + " value:\n" + " exact: value1\n" + " - name: tag2\n" + " addresses:\n" + " - \"10.20.3.3:20880\"\n" + " - \"10.20.3.4:20880\"\n" + " match:\n" + " - key: match_key2\n" + " value:\n" + " exact: value2\n" + " - name: tag3\n" + " match:\n" + " - key: match_key2\n" + " value:\n" + " exact: value2\n" + " - name: tag4\n" + " match:\n" + " - key: not_exist\n" + " value:\n" + " exact: not_exist\n" + " - name: tag5\n" + " match:\n" + " - key: match_key2\n" + " value:\n" + " wildcard: \"*\"\n" + "..."; TagRouterRule tagRouterRule = TagRuleParser.parse(tagRouterRuleConfig); TagStateRouter<String> router = Mockito.mock(TagStateRouter.class); Mockito.when(router.getInvokers()).thenReturn(getInvokers()); tagRouterRule.init(router); // assert tags assert tagRouterRule.getKey().equals("demo-provider"); assert tagRouterRule.getPriority() == 1; assert tagRouterRule.getTagNames().contains("tag1"); assert tagRouterRule.getTagNames().contains("tag2"); assert tagRouterRule.getTagNames().contains("tag3"); assert tagRouterRule.getTagNames().contains("tag4"); // assert addresses assert tagRouterRule.getAddresses().size() == 2; assert tagRouterRule.getAddresses().contains("10.20.3.3:20880"); assert tagRouterRule.getTagnameToAddresses().get("tag1").size() == 2; assert tagRouterRule.getTagnameToAddresses().get("tag2").size() == 2; assert tagRouterRule.getTagnameToAddresses().get("tag3").size() == 1; assert tagRouterRule.getTagnameToAddresses().get("tag5").size() == 1; assert tagRouterRule.getTagnameToAddresses().get("tag4") == null; }
@ScalarOperator(INDETERMINATE) @SqlType(StandardTypes.BOOLEAN) public static boolean indeterminate(@SqlType(StandardTypes.IPADDRESS) Slice value, @IsNull boolean isNull) { return isNull; }
@Test public void testIndeterminate() { assertOperator(INDETERMINATE, "CAST(null AS IPADDRESS)", BOOLEAN, true); assertOperator(INDETERMINATE, "IPADDRESS '::2222'", BOOLEAN, false); }
@RequestMapping(value = {"/index", "/"}) public String index(final Model model) { model.addAttribute("domain", ShenyuDomain.getInstance().getHttpPath()); return "index"; }
@Test public void testIndex() throws Exception { this.mockMvc.perform(get("/index")) .andExpect(status().isOk()) .andExpect(model().attribute("domain", ShenyuDomain.getInstance().getHttpPath())) .andReturn(); }
@Override public Optional<IntentProcessPhase> execute() { try { List<Intent> compiled = processor.compile(data.intent(), //TODO consider passing an optional here in the future stored.map(IntentData::installables).orElse(null)); return Optional.of(new Installing(processor, IntentData.compiled(data, compiled), stored)); } catch (IntentException e) { log.warn("Unable to compile intent {} due to:", data.intent(), e); if (stored.filter(x -> !x.installables().isEmpty()).isPresent()) { // removing orphaned flows and deallocating resources return Optional.of(new Withdrawing(processor, IntentData.compiled(data, stored.get().installables()))); } else { return Optional.of(new Failed(data)); } } }
@Test public void testMoveToNextPhaseWithoutError() { IntentData pending = new IntentData(input, INSTALL_REQ, version); expect(processor.compile(input, null)).andReturn(Collections.singletonList(compiled)); replay(processor); Compiling sut = new Compiling(processor, pending, Optional.empty()); Optional<IntentProcessPhase> output = sut.execute(); verify(processor); assertThat(output.get(), is(instanceOf(Installing.class))); }
static List<String> parseLocal(String[] args) { String[] params = new String[args.length - 1]; System.arraycopy(args, 1, params, 0, params.length); CommandLine commandLine = parse(LOCAL_OPTIONS, params); if (commandLine.hasOption(OPTION_HELP.getOpt())) { printLocalHelp(); System.exit(0); } List<String> options = new ArrayList<>(); options.add("local"); return options; }
@Test void testParseLocalWithoutOptions() { String[] args = {"local"}; List<String> commandOptions = PythonShellParser.parseLocal(args); String[] expectedCommandOptions = {"local"}; assertThat(commandOptions.toArray()).isEqualTo(expectedCommandOptions); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testIntegerNotEqRewritten() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MIN_VALUE - 25))).eval(FILE); assertThat(shouldRead).as("Should match: no values == 5").isTrue(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MIN_VALUE - 1))).eval(FILE); assertThat(shouldRead).as("Should match: no values == 39").isTrue(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MIN_VALUE))).eval(FILE); assertThat(shouldRead).as("Should not match: some value may be == 30").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE - 4))).eval(FILE); assertThat(shouldRead).as("Should not match: some value may be == 75").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE))).eval(FILE); assertThat(shouldRead).as("Should not match: some value may be == 79").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE + 1))).eval(FILE); assertThat(shouldRead).as("Should match: no values == 80").isTrue(); shouldRead = new StrictMetricsEvaluator(SCHEMA, not(equal("id", INT_MAX_VALUE + 6))).eval(FILE); assertThat(shouldRead).as("Should read: no values == 85").isTrue(); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 2) { onInvalidDataReceived(device, data); return; } // Read the Op Code final int opCode = data.getIntValue(Data.FORMAT_UINT8, 0); // Estimate the expected operand size based on the Op Code int expectedOperandSize; switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> // UINT8 expectedOperandSize = 1; case OP_CODE_CALIBRATION_VALUE_RESPONSE -> // Calibration Value expectedOperandSize = 10; case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE, OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE, OP_CODE_HYPO_ALERT_LEVEL_RESPONSE, OP_CODE_HYPER_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE, OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> // SFLOAT expectedOperandSize = 2; case OP_CODE_RESPONSE_CODE -> // Request Op Code (UINT8), Response Code Value (UINT8) expectedOperandSize = 2; default -> { onInvalidDataReceived(device, data); return; } } // Verify packet length if (data.size() != 1 + expectedOperandSize && data.size() != 1 + expectedOperandSize + 2) { onInvalidDataReceived(device, data); return; } // Verify CRC if present final boolean crcPresent = data.size() == 1 + expectedOperandSize + 2; // opCode + expected operand + CRC if (crcPresent) { final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 1 + expectedOperandSize); final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 1 + expectedOperandSize); if (expectedCrc != actualCrc) { onCGMSpecificOpsResponseReceivedWithCrcError(device, data); return; } } switch (opCode) { case OP_CODE_COMMUNICATION_INTERVAL_RESPONSE -> { final int interval = data.getIntValue(Data.FORMAT_UINT8, 1); onContinuousGlucoseCommunicationIntervalReceived(device, interval, crcPresent); return; } case OP_CODE_CALIBRATION_VALUE_RESPONSE -> { final float glucoseConcentrationOfCalibration = data.getFloatValue(Data.FORMAT_SFLOAT, 1); final int calibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 3); final int calibrationTypeAndSampleLocation = data.getIntValue(Data.FORMAT_UINT8, 5); @SuppressLint("WrongConstant") final int calibrationType = calibrationTypeAndSampleLocation & 0x0F; final int calibrationSampleLocation = calibrationTypeAndSampleLocation >> 4; final int nextCalibrationTime = data.getIntValue(Data.FORMAT_UINT16_LE, 6); final int calibrationDataRecordNumber = data.getIntValue(Data.FORMAT_UINT16_LE, 8); final int calibrationStatus = data.getIntValue(Data.FORMAT_UINT8, 10); onContinuousGlucoseCalibrationValueReceived(device, glucoseConcentrationOfCalibration, calibrationTime, nextCalibrationTime, calibrationType, calibrationSampleLocation, calibrationDataRecordNumber, new CGMCalibrationStatus(calibrationStatus), crcPresent); return; } case OP_CODE_RESPONSE_CODE -> { final int requestCode = data.getIntValue(Data.FORMAT_UINT8, 1); // ignore final int responseCode = data.getIntValue(Data.FORMAT_UINT8, 2); if (responseCode == CGM_RESPONSE_SUCCESS) { onCGMSpecificOpsOperationCompleted(device, requestCode, crcPresent); } else { onCGMSpecificOpsOperationError(device, requestCode, responseCode, crcPresent); } return; } } // Read SFLOAT value final float value = data.getFloatValue(Data.FORMAT_SFLOAT, 1); switch (opCode) { case OP_CODE_PATIENT_HIGH_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientHighAlertReceived(device, value, crcPresent); case OP_CODE_PATIENT_LOW_ALERT_LEVEL_RESPONSE -> onContinuousGlucosePatientLowAlertReceived(device, value, crcPresent); case OP_CODE_HYPO_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHypoAlertReceived(device, value, crcPresent); case OP_CODE_HYPER_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseHyperAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_DECREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfDecreaseAlertReceived(device, value, crcPresent); case OP_CODE_RATE_OF_INCREASE_ALERT_LEVEL_RESPONSE -> onContinuousGlucoseRateOfIncreaseAlertReceived(device, value, crcPresent); } }
@Test public void onContinuousGlucoseRateOfIncreaseAlertReceived() { final Data data = new Data(new byte[] { 24, 10, 64}); callback.onDataReceived(null, data); assertEquals("Level", 100000f, rateOfIncreaseAlertLevel, 0.01); assertFalse(secured); }
@Udf public String lpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); final int padUpTo = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padUpTo; i += padding.length()) { sb.append(padding); } sb.setLength(padUpTo); sb.append(input); sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldPadInputBytes() { final ByteBuffer result = udf.lpad(BYTES_123, 7, BYTES_45); assertThat(result, is(ByteBuffer.wrap(new byte[]{4,5,4,5,1,2,3}))); }