focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
TableViewImpl(PulsarClientImpl client, Schema<T> schema, TableViewConfigurationData conf) { this.conf = conf; this.isPersistentTopic = conf.getTopicName().startsWith(TopicDomain.persistent.toString()); this.data = new ConcurrentHashMap<>(); this.immutableData = Collections.unmodifiableMap(data); this.listeners = new ArrayList<>(); this.listenersMutex = new ReentrantLock(); this.compactionStrategy = TopicCompactionStrategy.load(TABLE_VIEW_TAG, conf.getTopicCompactionStrategyClassName()); this.pendingRefreshRequests = new ConcurrentHashMap<>(); this.lastReadPositions = new ConcurrentHashMap<>(); ReaderBuilder<T> readerBuilder = client.newReader(schema) .topic(conf.getTopicName()) .startMessageId(MessageId.earliest) .autoUpdatePartitions(true) .autoUpdatePartitionsInterval((int) conf.getAutoUpdatePartitionsSeconds(), TimeUnit.SECONDS) .poolMessages(true) .subscriptionName(conf.getSubscriptionName()); if (isPersistentTopic) { readerBuilder.readCompacted(true); } CryptoKeyReader cryptoKeyReader = conf.getCryptoKeyReader(); if (cryptoKeyReader != null) { readerBuilder.cryptoKeyReader(cryptoKeyReader); } readerBuilder.cryptoFailureAction(conf.getCryptoFailureAction()); this.reader = readerBuilder.createAsync(); }
@Test public void testTableViewImpl() { data.setCryptoKeyReader(mock(CryptoKeyReader.class)); TableView tableView = new TableViewImpl(client, Schema.BYTES, data); assertNotNull(tableView); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testAnnotationDefaults() throws Exception { ProxyInvocationHandler handler = new ProxyInvocationHandler(Maps.newHashMap()); DefaultAnnotations proxy = handler.as(DefaultAnnotations.class); assertTrue(proxy.getBoolean()); assertEquals('a', proxy.getChar()); assertEquals((byte) 4, proxy.getByte()); assertEquals((short) 5, proxy.getShort()); assertEquals(6, proxy.getInt()); assertEquals(7, proxy.getLong()); assertEquals(8f, proxy.getFloat(), 0f); assertEquals(9d, proxy.getDouble(), 0d); assertEquals("testString", proxy.getString()); assertEquals(DefaultAnnotations.class, proxy.getClassOption()); assertEquals(EnumType.MyEnum, proxy.getEnum()); assertEquals("testOptionFactory", proxy.getComplex()); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatInsertValuesNoSchema() { final String statementString = "INSERT INTO ADDRESS VALUES (2);"; final Statement statement = parseSingle(statementString); final String result = SqlFormatter.formatSql(statement); assertThat(result, is("INSERT INTO ADDRESS VALUES (2)")); }
@Override public void subscribe(URL url, NotifyListener listener) { try { if (registry != null) { registry.subscribe(url, listener); } } finally { listenerEvent(serviceListener -> serviceListener.onSubscribe(url, registry)); } }
@Test void testSubscribe() { Map<String, String> parameters = new HashMap<>(); parameters.put(INTERFACE_KEY, DemoService.class.getName()); parameters.put("registry", "zookeeper"); parameters.put("register", "true"); parameters.put(REGISTER_IP_KEY, "172.23.236.180"); parameters.put("registry.listeners", "listener-one"); Map<String, Object> attributes = new HashMap<>(); ServiceConfigURL serviceConfigURL = new ServiceConfigURL( "registry", "127.0.0.1", 2181, "org.apache.dubbo.registry.RegistryService", parameters); Map<String, String> refer = new HashMap<>(); attributes.put(REFER_KEY, refer); attributes.put("key1", "value1"); URL url = serviceConfigURL.addAttributes(attributes); RegistryFactory registryFactory = mock(RegistryFactory.class); Registry registry = mock(Registry.class); NotifyListener notifyListener = mock(NotifyListener.class); when(registryFactory.getRegistry(url)).thenReturn(registry); RegistryFactoryWrapper registryFactoryWrapper = new RegistryFactoryWrapper(registryFactory); Registry registryWrapper = registryFactoryWrapper.getRegistry(url); Assertions.assertTrue(registryWrapper instanceof ListenerRegistryWrapper); URL subscribeUrl = new ServiceConfigURL("dubbo", "127.0.0.1", 20881, DemoService.class.getName(), parameters); RegistryServiceListener listener = Mockito.mock(RegistryServiceListener.class); RegistryServiceListener1.delegate = listener; registryWrapper.subscribe(subscribeUrl, notifyListener); verify(listener, times(1)).onSubscribe(subscribeUrl, registry); }
public final void addSource(final Topology.AutoOffsetReset offsetReset, final String name, final TimestampExtractor timestampExtractor, final Deserializer<?> keyDeserializer, final Deserializer<?> valDeserializer, final String... topics) { if (topics.length == 0) { throw new TopologyException("You must provide at least one topic"); } Objects.requireNonNull(name, "name must not be null"); if (nodeFactories.containsKey(name)) { throw new TopologyException("Processor " + name + " is already added."); } for (final String topic : topics) { Objects.requireNonNull(topic, "topic names cannot be null"); validateTopicNotAlreadyRegistered(topic); maybeAddToResetList(earliestResetTopics, latestResetTopics, offsetReset, topic); rawSourceTopicNames.add(topic); } nodeFactories.put(name, new SourceNodeFactory<>(name, topics, null, timestampExtractor, keyDeserializer, valDeserializer)); nodeToSourceTopics.put(name, Arrays.asList(topics)); nodeGrouper.add(name); nodeGroups = null; }
@Test public void testAddSourceWithSameTopic() { builder.addSource(null, "source", null, null, null, "topic-1"); try { builder.addSource(null, "source-2", null, null, null, "topic-1"); fail("Should throw TopologyException with topic conflict"); } catch (final TopologyException expected) { /* ok */ } }
public static JsonSchemaNode createSchema(JsonParser parser) throws IOException { JsonSchemaNode dummy = new JsonSchemaStructNode(null); JsonSchemaStructNode parent = (JsonSchemaStructNode) dummy; JsonToken currentToken = parser.nextToken(); int nameLocation = -1; if (currentToken == null) { return null; } while (currentToken != null) { if (currentToken.isStructStart()) { JsonSchemaStructNode structNode = new JsonSchemaStructNode(parent); JsonSchemaNameValue nameValue = new JsonSchemaNameValue(nameLocation, structNode); parent.addChild(nameValue); parent = structNode; nameLocation = -1; } else if (currentToken == JsonToken.FIELD_NAME) { nameLocation = (int) getTokenLocation(parser); } else if (currentToken.isStructEnd()) { parent = parent.getParent(); nameLocation = -1; } else { JsonSchemaTerminalNode terminalNode = new JsonSchemaTerminalNode(parent); terminalNode.setValueStartLocation((int) getTokenLocation(parser)); JsonSchemaNameValue nameValue = new JsonSchemaNameValue(nameLocation, terminalNode); parent.addChild(nameValue); nameLocation = -1; } currentToken = parser.nextToken(); } JsonSchemaNameValue nameValue = ((JsonSchemaStructNode) dummy).getChild(0); if (nameValue == null) { return null; } dummy = nameValue.getValue(); dummy.setParent(null); return dummy; }
@Test public void testEmptyStringReturnsNullSchema() throws IOException { NavigableJsonInputAdapter input = toAdapter(new HazelcastJsonValue("")); JsonSchemaNode description = JsonSchemaHelper.createSchema(createParserFromInput(input)); assertNull(description); }
@Override public List<Column> getPartitionColumns(Map<ColumnId, Column> idToColumn) { List<Column> columns = MetaUtils.getColumnsByColumnIds(idToColumn, partitionColumnIds); for (int i = 0; i < columns.size(); i++) { Expr expr = partitionExprs.get(i).convertToColumnNameExpr(idToColumn); Column column = columns.get(i); if (expr.getType().getPrimitiveType() != PrimitiveType.INVALID_TYPE && expr.getType().getPrimitiveType() != column.getType().getPrimitiveType()) { Column newColumn = new Column(column); newColumn.setType(expr.getType()); columns.set(i, newColumn); } } return columns; }
@Test public void testInitUseFunction() { partitionExprs.add(ColumnIdExpr.create(functionCallExpr)); List<Column> schema = Collections.singletonList(k2); ExpressionRangePartitionInfo expressionRangePartitionInfo = new ExpressionRangePartitionInfo(partitionExprs, schema, PartitionType.RANGE); List<Column> partitionColumns = expressionRangePartitionInfo.getPartitionColumns( MetaUtils.buildIdToColumn(schema)); Assert.assertEquals(partitionColumns.size(), 1); Assert.assertEquals(partitionColumns.get(0), k2); }
public Map<String, String> getPropertiesWithPrefix(String prefix) { return getPropertiesWithPrefix(prefix, false); }
@Test public void testGetPropertiesWithPrefix() { ConfigurationProperties configurationProperties = new ConfigurationProperties(PROPERTIES); Map<String, String> props = configurationProperties .getPropertiesWithPrefix("root.1.2"); Assert.assertEquals(4, props.size()); Assert.assertTrue(props.containsKey("")); Assert.assertEquals("TEST_VALUE_3", props.get("")); Assert.assertTrue(props.containsKey("4")); Assert.assertEquals("TEST_VALUE_3_1", props.get("4")); Assert.assertTrue(props.containsKey("3")); Assert.assertEquals("TEST_VALUE_1", props.get("3")); Assert.assertTrue(props.containsKey("4.5")); Assert.assertEquals("TEST_VALUE_3_2", props.get("4.5")); // Test the scenario where the prefix has a dot appended to it // (see CapacitySchedulerConfiguration.getQueuePrefix(String queue)). // The dot is disregarded. props = configurationProperties .getPropertiesWithPrefix("root.1.2.4."); Assert.assertEquals(2, props.size()); Assert.assertTrue(props.containsKey("")); Assert.assertEquals("TEST_VALUE_3_1", props.get("")); Assert.assertTrue(props.containsKey("5")); Assert.assertEquals("TEST_VALUE_3_2", props.get("5")); Map<String, String> propsWithRootPrefix = configurationProperties .getPropertiesWithPrefix("root"); Assert.assertEquals(6, propsWithRootPrefix.size()); Assert.assertTrue(propsWithRootPrefix.containsKey("")); Assert.assertEquals("TEST_VALUE_4", propsWithRootPrefix.get("")); Assert.assertTrue(propsWithRootPrefix.containsKey("1.2.3")); Assert.assertEquals("TEST_VALUE_1", propsWithRootPrefix.get("1.2.3")); Assert.assertTrue(propsWithRootPrefix.containsKey("1")); Assert.assertEquals("TEST_VALUE_2", propsWithRootPrefix.get("1")); Assert.assertTrue(propsWithRootPrefix.containsKey("1.2")); Assert.assertEquals("TEST_VALUE_3", propsWithRootPrefix.get("1.2")); Assert.assertTrue(propsWithRootPrefix.containsKey("1.2.4")); Assert.assertEquals("TEST_VALUE_3_1", propsWithRootPrefix.get("1.2.4")); Assert.assertTrue(propsWithRootPrefix.containsKey("1.2.4.5")); Assert.assertEquals("TEST_VALUE_3_2", propsWithRootPrefix.get("1.2.4.5")); }
public ValueAndTimestamp<V> get(final K key) { if (timestampedStore != null) { return timestampedStore.get(key); } if (versionedStore != null) { final VersionedRecord<V> versionedRecord = versionedStore.get(key); return versionedRecord == null ? null : ValueAndTimestamp.make(versionedRecord.value(), versionedRecord.timestamp()); } throw new IllegalStateException("KeyValueStoreWrapper must be initialized with either timestamped or versioned store"); }
@Test public void shouldGetNullFromVersionedStore() { givenWrapperWithVersionedStore(); when(versionedStore.get(KEY)).thenReturn(null); assertThat(wrapper.get(KEY), nullValue()); }
@Override public void write(JsonWriter json) throws InterruptedException { ClusterHealth clusterHealth = healthChecker.checkCluster(); writeHealth(clusterHealth.getHealth(), json); writeGlobalSections(json); writeApplicationNodes(json, clusterHealth); writeSearchNodes(json, clusterHealth); }
@Test public void writeInfo() throws InterruptedException { StringWriter writer = new StringWriter(); JsonWriter jsonWriter = JsonWriter.of(writer); jsonWriter.beginObject(); underTest.write(jsonWriter); jsonWriter.endObject(); assertThat(writer).hasToString("{\"Health\":\"GREEN\"," + "\"Health Causes\":[],\"\":{\"name\":\"globalInfo\"}," + "\"Application Nodes\":[{\"Name\":\"appNodes\",\"\":{\"name\":\"appNodes\"}}]," + "\"Search Nodes\":[{\"Name\":\"searchNodes\",\"\":{\"name\":\"searchNodes\"}}]}"); }
public void close() throws IOException { try { closeAsync().get(); } catch (ExecutionException e) { if (e.getCause() instanceof IOException) { throw (IOException) e.getCause(); } else { throw new PulsarServerException(e.getCause()); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }
@SuppressWarnings("deprecation") @Test public void testTlsAuthAllowInsecure() throws Exception { final String topicName = "persistent://prop/ns-abc/newTopic"; final String subName = "newSub"; Authentication auth; Set<String> providers = new HashSet<>(); providers.add("org.apache.pulsar.broker.authentication.AuthenticationProviderTls"); conf.setAuthenticationEnabled(true); conf.setAuthenticationProviders(providers); conf.setBrokerServicePortTls(Optional.of(0)); conf.setWebServicePortTls(Optional.of(0)); conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH); conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH); conf.setTlsAllowInsecureConnection(true); conf.setNumExecutorThreadPoolSize(5); restartBroker(); Map<String, String> authParams = new HashMap<>(); authParams.put("tlsCertFile", getTlsFileForClient("admin.cert")); authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8")); PulsarClient pulsarClient = null; // Case 1: Access without client certificate try { pulsarClient = PulsarClient.builder().serviceUrl(brokerUrlTls.toString()).enableTls(true) .allowTlsInsecureConnection(true).statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscribe(); fail("should fail"); } catch (Exception e) { assertTrue(e.getMessage().contains("Unauthorized")); } finally { pulsarClient.close(); } // Case 2: Access with client certificate try { auth = new AuthenticationTls(); auth.configure(authParams); pulsarClient = PulsarClient.builder().authentication(auth).serviceUrl(brokerUrlTls.toString()) .enableTls(true).allowTlsInsecureConnection(true).statsInterval(0, TimeUnit.SECONDS) .operationTimeout(1000, TimeUnit.MILLISECONDS).build(); @Cleanup Consumer<byte[]> consumer = pulsarClient.newConsumer().topic(topicName).subscriptionName(subName) .subscribe(); } catch (Exception e) { fail("should not fail"); } finally { pulsarClient.close(); } }
public void transferAllStateDataToDirectory( Collection<StateHandleDownloadSpec> downloadRequests, CloseableRegistry closeableRegistry) throws Exception { // We use this closer for fine-grained shutdown of all parallel downloading. CloseableRegistry internalCloser = new CloseableRegistry(); // Make sure we also react to external close signals. closeableRegistry.registerCloseable(internalCloser); try { // We have to wait for all futures to be completed, to make sure in // case of failure that we will clean up all the files FutureUtils.completeAll( createDownloadRunnables(downloadRequests, internalCloser).stream() .map( runnable -> CompletableFuture.runAsync( runnable, transfer.getExecutorService())) .collect(Collectors.toList())) .get(); } catch (Exception e) { downloadRequests.stream() .map(StateHandleDownloadSpec::getDownloadDestination) .map(Path::toFile) .forEach(FileUtils::deleteDirectoryQuietly); // Error reporting Throwable throwable = ExceptionUtils.stripExecutionException(e); throwable = ExceptionUtils.stripException(throwable, RuntimeException.class); if (throwable instanceof IOException) { throw (IOException) throwable; } else { throw new FlinkRuntimeException("Failed to download data for state handles.", e); } } finally { // Unregister and close the internal closer. if (closeableRegistry.unregisterCloseable(internalCloser)) { IOUtils.closeQuietly(internalCloser); } } }
@Test public void testMultiThreadRestoreCorrectly() throws Exception { int numRemoteHandles = 3; int numSubHandles = 6; byte[][][] contents = createContents(numRemoteHandles, numSubHandles); List<StateHandleDownloadSpec> downloadRequests = new ArrayList<>(numRemoteHandles); for (int i = 0; i < numRemoteHandles; ++i) { downloadRequests.add( createDownloadRequestForContent( temporaryFolder.newFolder().toPath(), contents[i], i)); } try (RocksDBStateDownloader rocksDBStateDownloader = new RocksDBStateDownloader(4)) { rocksDBStateDownloader.transferAllStateDataToDirectory( downloadRequests, new CloseableRegistry()); } for (int i = 0; i < numRemoteHandles; ++i) { StateHandleDownloadSpec downloadRequest = downloadRequests.get(i); Path dstPath = downloadRequest.getDownloadDestination(); Assert.assertTrue(dstPath.toFile().exists()); for (int j = 0; j < numSubHandles; ++j) { assertStateContentEqual( contents[i][j], dstPath.resolve(String.format("sharedState-%d-%d", i, j))); } } }
public String toTypeString() { return JOINER.join(namespace(), object(), action()); }
@Test public void testToTypeString() throws Exception { final AuditEventType type = AuditEventType.create("namespace:object:action"); assertThat(type.toTypeString()).isEqualTo("namespace:object:action"); }
static GlobalMetaData mergeInto(FileMetaData toMerge, GlobalMetaData mergedMetadata) { return mergeInto(toMerge, mergedMetadata, true); }
@Test public void testMergeMetadataWithConflictingKeyValues() { Map<String, String> keyValues1 = new HashMap<String, String>() { { put("a", "b"); } }; Map<String, String> keyValues2 = new HashMap<String, String>() { { put("a", "c"); } }; FileMetaData md1 = new FileMetaData( new MessageType( "root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), keyValues1, "test"); FileMetaData md2 = new FileMetaData( new MessageType( "root1", new PrimitiveType(REPEATED, BINARY, "a"), new PrimitiveType(OPTIONAL, BINARY, "b")), keyValues2, "test"); GlobalMetaData merged = ParquetFileWriter.mergeInto(md2, ParquetFileWriter.mergeInto(md1, null)); try { merged.merge(new StrictKeyValueMetadataMergeStrategy()); fail("Merge metadata is expected to fail because of conflicting key values"); } catch (RuntimeException e) { // expected because of conflicting values assertTrue(e.getMessage().contains("could not merge metadata")); } Map<String, String> mergedKeyValues = merged.merge(new ConcatenatingKeyValueMetadataMergeStrategy()).getKeyValueMetaData(); assertEquals(1, mergedKeyValues.size()); String mergedValue = mergedKeyValues.get("a"); assertTrue(mergedValue.equals("b,c") || mergedValue.equals("c,b")); }
@CanDistro @DeleteMapping @TpsControl(pointName = "NamingInstanceDeregister", name = "HttpNamingInstanceDeregister") @Secured(action = ActionTypes.WRITE) public String deregister(HttpServletRequest request) throws Exception { Instance instance = HttpRequestInstanceBuilder.newBuilder() .setDefaultInstanceEphemeral(switchDomain.isDefaultInstanceEphemeral()).setRequest(request).build(); String namespaceId = WebUtils.optional(request, CommonParams.NAMESPACE_ID, Constants.DEFAULT_NAMESPACE_ID); String serviceName = WebUtils.required(request, CommonParams.SERVICE_NAME); NamingUtils.checkServiceNameFormat(serviceName); getInstanceOperator().removeInstance(namespaceId, serviceName, instance); NotifyCenter.publishEvent(new DeregisterInstanceTraceEvent(System.currentTimeMillis(), NamingRequestUtil.getSourceIpForHttpRequest(request), false, DeregisterInstanceReason.REQUEST, namespaceId, NamingUtils.getGroupName(serviceName), NamingUtils.getServiceName(serviceName), instance.getIp(), instance.getPort())); return "ok"; }
@Test void testDeregister() throws Exception { assertEquals("ok", instanceController.deregister(request)); verify(instanceServiceV2).removeInstance(eq(Constants.DEFAULT_NAMESPACE_ID), eq(TEST_GROUP_NAME + "@@" + TEST_SERVICE_NAME), any(Instance.class)); TimeUnit.SECONDS.sleep(1); assertEquals(DeregisterInstanceTraceEvent.class, eventReceivedClass); }
public static boolean isBean(Class<?> clazz) { return hasSetter(clazz) || hasPublicField(clazz); }
@Test public void issueI9VTZGTest() { final boolean bean = BeanUtil.isBean(Dict.class); assertFalse(bean); }
@Udf(description = "Splits a string into an array of substrings based on a delimiter.") public List<String> split( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The delimiter to split a string by. If NULL, then function returns NULL.") final String delimiter) { if (string == null || delimiter == null) { return null; } // Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split() // is to accept only literal strings. This method uses Guava Splitter instead, which does not // accept any regex pattern. This is to avoid a confusion to users when splitting by regex // special characters, such as '.' and '|'. try { // Guava Splitter does not accept empty delimiters. Use the Java split() method instead. if (delimiter.isEmpty()) { return Arrays.asList(EMPTY_DELIMITER.split(string)); } else { return Splitter.on(delimiter).splitToList(string); } } catch (final Exception e) { throw new KsqlFunctionException( String.format("Invalid delimiter '%s' in the split() function.", delimiter), e); } }
@Test public void shouldReturnNullOnAnyNullParametersOnSplitString() { assertThat(splitUdf.split(null, ""), is(nullValue())); assertThat(splitUdf.split("", null), is(nullValue())); assertThat(splitUdf.split((String) null, null), is(nullValue())); }
@Override public <V> MultiLabel generateOutput(V label) { if (label instanceof Collection) { Collection<?> c = (Collection<?>) label; List<Pair<String,Boolean>> dimensions = new ArrayList<>(); for (Object o : c) { dimensions.add(MultiLabel.parseElement(o.toString())); } return MultiLabel.createFromPairList(dimensions); } return MultiLabel.parseString(label.toString()); }
@Test public void testGenerateOutput_emptyStr() { MultiLabelFactory factory = new MultiLabelFactory(); MultiLabel output = factory.generateOutput(""); assertEquals(0, output.getLabelSet().size()); assertEquals("", output.getLabelString()); }
public Filter parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) { if (!filterExpression.contains(FIELD_AND_VALUE_SEPARATOR)) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final String[] split = filterExpression.split(FIELD_AND_VALUE_SEPARATOR, 2); final String fieldPart = split[0]; if (fieldPart == null || fieldPart.isEmpty()) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final String valuePart = split[1]; if (valuePart == null || valuePart.isEmpty()) { throw new IllegalArgumentException(WRONG_FILTER_EXPR_FORMAT_ERROR_MSG); } final EntityAttribute attributeMetaData = getAttributeMetaData(attributes, fieldPart); final SearchQueryField.Type fieldType = attributeMetaData.type(); if (isRangeValueExpression(valuePart, fieldType)) { if (valuePart.startsWith(RANGE_VALUES_SEPARATOR)) { return new RangeFilter(attributeMetaData.id(), null, extractValue(fieldType, valuePart.substring(RANGE_VALUES_SEPARATOR.length())) ); } else if (valuePart.endsWith(RANGE_VALUES_SEPARATOR)) { return new RangeFilter(attributeMetaData.id(), extractValue(fieldType, valuePart.substring(0, valuePart.length() - RANGE_VALUES_SEPARATOR.length())), null ); } else { final String[] ranges = valuePart.split(RANGE_VALUES_SEPARATOR); return new RangeFilter(attributeMetaData.id(), extractValue(fieldType, ranges[0]), extractValue(fieldType, ranges[1]) ); } } else { return new SingleValueFilter(attributeMetaData.id(), extractValue(fieldType, valuePart)); } }
@Test void parsesFilterExpressionCorrectlyForDateRanges() { final String fromString = "2012-12-12 12:12:12"; final String toString = "2022-12-12 12:12:12"; final List<EntityAttribute> entityAttributes = List.of(EntityAttribute.builder() .id("created_at") .title("Creation Date") .type(SearchQueryField.Type.DATE) .filterable(true) .build()); assertEquals( new RangeFilter("created_at", new DateTime(2012, 12, 12, 12, 12, 12, DateTimeZone.UTC).toDate(), new DateTime(2022, 12, 12, 12, 12, 12, DateTimeZone.UTC).toDate() ), toTest.parseSingleExpression("created_at:" + fromString + RANGE_VALUES_SEPARATOR + toString, entityAttributes )); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewName(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewName()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename_keyNotExist() { Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); if (sameSlot) { // This is a quirk of the implementation - since same-slot renames use the non-cluster version, // the result is a Redis error. This behavior matches other spring-data-redis implementations assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block()) .isInstanceOf(RedisSystemException.class); } else { Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(null); } }
@Override public void validate(Context context) { if (!context.deployState().isHostedTenantApplication(context.model().getAdmin().getApplicationType())) return; context.model().getContainerClusters().forEach((clusterId, appCluster) -> { var mp = appCluster.getMemoryPercentage().orElse(null); if (mp == null) return; if (mp.asAbsoluteGb().isEmpty()) { context.deployState().getDeployLogger().log(Level.FINE, "Host resources unknown or percentage overridden with 'allocated-memory'"); return; } long jvmModelCost = appCluster.onnxModelCostCalculator().aggregatedModelCostInBytes(); if (jvmModelCost > 0) { double availableMemoryGb = mp.asAbsoluteGb().getAsDouble(); int percentageOfTotal = mp.ofContainerTotal().getAsInt(); double modelCostGb = jvmModelCost / (1024D * 1024 * 1024); context.deployState().getDeployLogger().log(Level.FINE, () -> Text.format("JVM: %d%% (limit: %d%%), %.2fGB (limit: %.2fGB), ONNX: %.2fGB", percentageOfTotal, percentLimit, availableMemoryGb, gbLimit, modelCostGb)); if (percentageOfTotal < percentLimit) { context.illegal(Text.format("Allocated percentage of memory of JVM in cluster '%s' is too low (%d%% < %d%%). " + "Estimated cost of ONNX models is %.2fGB. Either use a node flavor with more memory or use less expensive models. " + "You may override this validation by specifying 'allocated-memory' (https://docs.vespa.ai/en/performance/container-tuning.html#jvm-heap-size).", clusterId, percentageOfTotal, percentLimit, modelCostGb)); } if (availableMemoryGb < gbLimit) { context.illegal( Text.format("Allocated memory to JVM in cluster '%s' is too low (%.2fGB < %.2fGB). " + "Estimated cost of ONNX models is %.2fGB. Either use a node flavor with more memory or use less expensive models. " + "You may override this validation by specifying 'allocated-memory' (https://docs.vespa.ai/en/performance/container-tuning.html#jvm-heap-size).", clusterId, availableMemoryGb, gbLimit, modelCostGb)); } } }); }
@Test void accepts_services_with_explicit_jvm_size() throws IOException, SAXException { String servicesXml = """ <?xml version="1.0" encoding="utf-8" ?> <services version='1.0'> <container version='1.0'> <nodes count="2"> <jvm allocated-memory='5%'/> <resources vcpu="4" memory="2Gb" disk="125Gb"/> </nodes> <component id="hf-embedder" type="hugging-face-embedder"> <transformer-model url="https://my/url/model.onnx"/> <tokenizer-model path="app/tokenizer.json"/> </component> </container> </services>"""; var deployState = createDeployState(servicesXml, 2, 1024L * 1024 * 1024); var model = new VespaModel(new NullConfigModelRegistry(), deployState); assertDoesNotThrow(() -> ValidationTester.validate(new JvmHeapSizeValidator(), model, deployState)); }
@Udf(description = "Subtracts a duration from a timestamp") public Timestamp timestampSub( @UdfParameter(description = "A unit of time, for example DAY or HOUR") final TimeUnit unit, @UdfParameter( description = "An integer number of intervals to subtract")final Integer interval, @UdfParameter(description = "A TIMESTAMP value.") final Timestamp timestamp ) { if (unit == null || interval == null || timestamp == null) { return null; } return new Timestamp(timestamp.getTime() - unit.toMillis(interval)); }
@Test public void handleNullTimestamp() { // When: final Timestamp result = udf.timestampSub(TimeUnit.MILLISECONDS, -300, null); // Then: assertNull(result); }
@SuppressWarnings("deprecation") public static ByteBuf setShortBE(ByteBuf buf, int index, int shortValue) { return buf.order() == ByteOrder.BIG_ENDIAN? buf.setShort(index, shortValue) : buf.setShort(index, swapShort((short) shortValue)); }
@SuppressWarnings("deprecation") @Test public void setShortBE() { int shortValue = 0x1234; ByteBuf buf = Unpooled.wrappedBuffer(new byte[2]).order(ByteOrder.BIG_ENDIAN); ByteBufUtil.setShortBE(buf, 0, shortValue); assertEquals(shortValue, buf.readShort()); buf.resetReaderIndex(); assertEquals(ByteBufUtil.swapShort((short) shortValue), buf.readShortLE()); buf.release(); buf = Unpooled.wrappedBuffer(new byte[2]).order(ByteOrder.LITTLE_ENDIAN); ByteBufUtil.setShortBE(buf, 0, shortValue); assertEquals(ByteBufUtil.swapShort((short) shortValue), buf.readShortLE()); buf.resetReaderIndex(); assertEquals(ByteBufUtil.swapShort((short) shortValue), buf.readShort()); buf.release(); }
public <T> SchemaCoder<T> getSchemaCoder(Class<T> clazz) throws NoSuchSchemaException { return getSchemaCoder(TypeDescriptor.of(clazz)); }
@Test public void testGetSchemaCoder() throws NoSuchSchemaException { SchemaRegistry registry = SchemaRegistry.createDefault(); registry.registerJavaBean(SimpleBean.class); Schema schema = registry.getSchema(SimpleBean.class); SerializableFunction<SimpleBean, Row> toRowFunction = registry.getToRowFunction(SimpleBean.class); SerializableFunction<Row, SimpleBean> fromRowFunction = registry.getFromRowFunction(SimpleBean.class); SchemaCoder schemaCoder = registry.getSchemaCoder(SimpleBean.class); assertTrue(schema.equivalent(schemaCoder.getSchema())); assertTrue(toRowFunction.equals(schemaCoder.getToRowFunction())); assertTrue(fromRowFunction.equals(schemaCoder.getFromRowFunction())); thrown.expect(NoSuchSchemaException.class); registry.getSchemaCoder(Double.class); }
public static String toJson(MetadataUpdate metadataUpdate) { return toJson(metadataUpdate, false); }
@Test public void testUpgradeFormatVersionFromJson() { int formatVersion = 2; String expected = "{\"action\":\"upgrade-format-version\",\"format-version\":2}"; MetadataUpdate.UpgradeFormatVersion actual = new MetadataUpdate.UpgradeFormatVersion(formatVersion); assertThat(MetadataUpdateParser.toJson(actual)) .as("Upgrade format version should convert to the correct JSON value") .isEqualTo(expected); }
ImmutableMap<PCollection<?>, FieldAccessDescriptor> getPCollectionFieldAccess() { return ImmutableMap.copyOf(pCollectionFieldAccess); }
@Test public void testFieldAccessKnownMainInput() { Pipeline p = Pipeline.create(); FieldAccessVisitor fieldAccessVisitor = new FieldAccessVisitor(); Schema schema = Schema.of(Field.of("field1", FieldType.STRING), Field.of("field2", FieldType.STRING)); PCollection<Row> source = p.apply(Create.of(Row.withSchema(schema).addValues("foo", "bar").build())) .setRowSchema(schema); source.apply(new FieldAccessTransform(FieldAccessDescriptor.withFieldNames("field1"))); p.traverseTopologically(fieldAccessVisitor); FieldAccessDescriptor fieldAccess = fieldAccessVisitor.getPCollectionFieldAccess().get(source); assertFalse(fieldAccess.getAllFields()); assertThat(fieldAccess.fieldNamesAccessed(), containsInAnyOrder("field1")); }
private static File targetTagFile(String dataId, String group, String tenant, String tag) { // fix https://github.com/alibaba/nacos/issues/10067 dataId = PathEncoderManager.getInstance().encode(dataId); group = PathEncoderManager.getInstance().encode(group); tenant = PathEncoderManager.getInstance().encode(tenant); File file = null; if (StringUtils.isBlank(tenant)) { file = new File(EnvUtil.getNacosHome(), TAG_DIR); } else { file = new File(EnvUtil.getNacosHome(), TENANT_TAG_DIR); file = new File(file, tenant); } file = new File(file, group); file = new File(file, dataId); file = new File(file, tag); return file; }
@Test void testTargetTagFile() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException { Method method = ConfigRawDiskService.class.getDeclaredMethod("targetTagFile", String.class, String.class, String.class, String.class); method.setAccessible(true); File result = (File) method.invoke(null, "aaaa?dsaknkf", "aaaa*dsaknkf", "aaaa:dsaknkf", "aaaadsaknkf"); // 分解路径 Path path = Paths.get(result.getPath()); Path parent = path.getParent(); Path grandParent = parent.getParent(); Path greatGrandParent = grandParent.getParent(); // 获取最后四段路径 String secondLastSegment = parent.getFileName().toString(); String thirdLastSegment = grandParent.getFileName().toString(); String fourthLastSegment = greatGrandParent.getFileName().toString(); assertEquals(isWindows() ? "aaaa%A3%dsaknkf" : fourthLastSegment, fourthLastSegment); assertEquals(isWindows() ? "aaaa%A4%dsaknkf" : thirdLastSegment, thirdLastSegment); assertEquals(isWindows() ? "aaaa%A5%dsaknkf" : secondLastSegment, secondLastSegment); String lastSegment = path.getFileName().toString(); assertEquals("aaaadsaknkf", lastSegment); }
@GET @ApiOperation(value = "Retrieve a search query") @Path("{id}") @Produces({MediaType.APPLICATION_JSON, SEARCH_FORMAT_V1}) public SearchDTO getSearch(@ApiParam(name = "id") @PathParam("id") String searchId, @Context SearchUser searchUser) { final Search search = searchDomain.getForUser(searchId, searchUser) .orElseThrow(() -> new NotFoundException("Search with id " + searchId + " does not exist")); return SearchDTO.fromSearch(search); }
@Test public void getSearchLoadsSearch() { final Query query = Query.builder() .id("queryId") .searchTypes(Collections.emptySet()) .filter(StreamFilter.anyIdOf("streamId")) .build(); final Search search = Search.builder() .id("deadbeef") .parameters(ImmutableSet.of()) .queries(ImmutableSet.of(query)) .build(); final SearchDomain searchDomain = mockSearchDomain(Optional.of(search)); final SearchResource resource = new SearchResource(searchDomain, searchExecutor, searchJobService, eventBus, clusterConfigService); final SearchDTO returnedSearch = resource.getSearch(search.id(), searchUser); assertThat(returnedSearch.id()).isEqualTo(search.id()); }
@Override public void updateSocialClient(SocialClientSaveReqVO updateReqVO) { // 校验存在 validateSocialClientExists(updateReqVO.getId()); // 校验重复 validateSocialClientUnique(updateReqVO.getId(), updateReqVO.getUserType(), updateReqVO.getSocialType()); // 更新 SocialClientDO updateObj = BeanUtils.toBean(updateReqVO, SocialClientDO.class); socialClientMapper.updateById(updateObj); }
@Test public void testUpdateSocialClient_success() { // mock 数据 SocialClientDO dbSocialClient = randomPojo(SocialClientDO.class); socialClientMapper.insert(dbSocialClient);// @Sql: 先插入出一条存在的数据 // 准备参数 SocialClientSaveReqVO reqVO = randomPojo(SocialClientSaveReqVO.class, o -> { o.setId(dbSocialClient.getId()); // 设置更新的 ID o.setSocialType(randomEle(SocialTypeEnum.values()).getType()) .setUserType(randomEle(UserTypeEnum.values()).getValue()) .setStatus(randomCommonStatus()); }); // 调用 socialClientService.updateSocialClient(reqVO); // 校验是否更新正确 SocialClientDO socialClient = socialClientMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, socialClient); }
@PUT @Path("disable/{name}") @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response disableConfig(@PathParam("name") String configName) { log.trace(String.format(MESSAGE_CONFIG, UPDATE)); TelemetryConfig config = configService.getConfig( nullIsIllegal(configName, CONFIG_NAME + NOT_NULL_MESSAGE)); if (config == null) { log.warn("There is no config found to disable for {}", configName); return Response.notModified().build(); } else { TelemetryConfig updatedConfig = config.updateStatus(DISABLED); configService.updateTelemetryConfig(updatedConfig); return Response.ok().build(); } }
@Test public void testDisableConfig() { expect(mockConfigAdminService.getConfig(anyString())) .andReturn(telemetryConfig).once(); mockConfigAdminService.updateTelemetryConfig(telemetryConfig); replay(mockConfigAdminService); final WebTarget wt = target(); Response response = wt.path(PATH + "/disable/test1") .request(MediaType.APPLICATION_JSON_TYPE) .put(Entity.json("")); final int status = response.getStatus(); assertEquals(200, status); verify(mockConfigAdminService); }
public FederationPolicyManager getPolicyManager(String queueName) throws YarnException { FederationPolicyManager policyManager = policyManagerMap.get(queueName); // If we don't have the policy manager cached, pull configuration // from the FederationStateStore to create and cache it if (policyManager == null) { try { // If we don't have the configuration cached, pull it // from the stateStore SubClusterPolicyConfiguration conf = policyConfMap.get(queueName); if (conf == null) { conf = stateStore.getPolicyConfiguration(queueName); } // If configuration is still null, it does not exist in the // FederationStateStore if (conf == null) { LOG.info("Read null policy for queue {}.", queueName); return null; } // Generate PolicyManager based on PolicyManagerType. String policyManagerType = conf.getType(); policyManager = FederationPolicyUtils.instantiatePolicyManager(policyManagerType); policyManager.setQueue(queueName); // If PolicyManager supports Weighted PolicyInfo, it means that // we need to use this parameter to determine which sub-cluster the router goes to // or which sub-cluster the container goes to. if (policyManager.isSupportWeightedPolicyInfo()) { ByteBuffer weightedPolicyInfoParams = conf.getParams(); if (weightedPolicyInfoParams == null) { LOG.warn("Warning: Queue = {}, FederationPolicyManager {} WeightedPolicyInfo is empty.", queueName, policyManagerType); return null; } WeightedPolicyInfo weightedPolicyInfo = WeightedPolicyInfo.fromByteBuffer(conf.getParams()); policyManager.setWeightedPolicyInfo(weightedPolicyInfo); } else { LOG.warn("Warning: FederationPolicyManager of unsupported WeightedPolicyInfo type {}, " + "initialization may be incomplete.", policyManager.getClass()); } policyManagerMap.put(queueName, policyManager); policyConfMap.put(queueName, conf); } catch (YarnException e) { LOG.error("Error reading SubClusterPolicyConfiguration from state " + "store for queue: {}", queueName); throw e; } } return policyManager; }
@Test public void testGetUniformBroadcastPolicyManager() throws Exception { stateStore = new MemoryFederationStateStore(); stateStore.init(new Configuration()); List<String> notSupportWeightedPolicyInfos = new ArrayList<>(); notSupportWeightedPolicyInfos.add(HashBroadcastPolicyManager.class.getName()); notSupportWeightedPolicyInfos.add(UniformBroadcastPolicyManager.class.getName()); notSupportWeightedPolicyInfos.add(HomePolicyManager.class.getName()); notSupportWeightedPolicyInfos.add(RejectAllPolicyManager.class.getName()); String prefix = "org.apache.hadoop.yarn.server.federation.policies.manager."; for (String policyManagerType : notSupportWeightedPolicyInfos) { // root.c uses UniformBroadcastPolicyManager. // Step1. Prepare routerPolicyWeights. WeightedPolicyInfo weightedPolicyInfo = new WeightedPolicyInfo(); weightedPolicyInfo.setHeadroomAlpha(1); // Step2. Set PolicyConfiguration. SubClusterPolicyConfiguration config = SubClusterPolicyConfiguration.newInstance("root.c", policyManagerType, weightedPolicyInfo.toByteBuffer()); SetSubClusterPolicyConfigurationRequest request = SetSubClusterPolicyConfigurationRequest.newInstance(config); stateStore.setPolicyConfiguration(request); // Step3. Get FederationPolicyManager using policyFacade. facade.reinitialize(stateStore, conf); policyFacade = new GPGPolicyFacade(facade, conf); FederationPolicyManager policyManager = policyFacade.getPolicyManager("root.c"); Assert.assertNotNull(policyManager); Assert.assertFalse(policyManager.isSupportWeightedPolicyInfo()); String policyManagerTypeSimple = policyManagerType.replace(prefix, ""); // Verify that PolicyManager is initialized successfully, // but getWeightedPolicyInfo is not supported. LambdaTestUtils.intercept(NotImplementedException.class, policyManagerTypeSimple + " does not implement getWeightedPolicyInfo.", () -> policyManager.getWeightedPolicyInfo()); } }
public static String cloudIdEncode(String... args) { final String joinedArgs = String.join("$", args); return Base64.getUrlEncoder().encodeToString(joinedArgs.getBytes()); }
@Test public void testThrowExceptionWhenElasticSegmentSegmentIsUndefined() { String[] raw = new String[] {"us-east-1.aws.found.io", "undefined", "my-kibana"}; String encoded = CloudSettingId.cloudIdEncode(raw); Exception thrownException = assertThrows(org.jruby.exceptions.ArgumentError.class, () -> { new CloudSettingId(encoded); }); assertThat(thrownException.getMessage(), containsString("Cloud Id, after decoding, elasticsearch segment is 'undefined', literally.")); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatSelectStructAllCorrectly() { final String statementString = "CREATE STREAM S AS SELECT a.address->* FROM address a;"; final Statement statement = parseSingle(statementString); assertThat(SqlFormatter.formatSql(statement), equalTo("CREATE STREAM S AS SELECT A.ADDRESS->*\n" + "FROM ADDRESS A\nEMIT CHANGES")); }
@Override public PageResult<CombinationActivityDO> getCombinationActivityPage(CombinationActivityPageReqVO pageReqVO) { return combinationActivityMapper.selectPage(pageReqVO); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetCombinationActivityPage() { // mock 数据 CombinationActivityDO dbCombinationActivity = randomPojo(CombinationActivityDO.class, o -> { // 等会查询到 o.setName(null); //o.setSpuId(null); o.setTotalLimitCount(null); o.setSingleLimitCount(null); o.setStartTime(null); o.setEndTime(null); o.setUserSize(null); o.setVirtualGroup(null); o.setStatus(null); o.setLimitDuration(null); o.setCreateTime(null); }); combinationActivityMapper.insert(dbCombinationActivity); // 测试 name 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setName(null))); // 测试 spuId 不匹配 //combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setSpuId(null))); // 测试 totalLimitCount 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setTotalLimitCount(null))); // 测试 singleLimitCount 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setSingleLimitCount(null))); // 测试 startTime 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setStartTime(null))); // 测试 endTime 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setEndTime(null))); // 测试 userSize 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setUserSize(null))); // 测试 virtualGroup 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setVirtualGroup(null))); // 测试 status 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setStatus(null))); // 测试 limitDuration 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setLimitDuration(null))); // 测试 createTime 不匹配 combinationActivityMapper.insert(cloneIgnoreId(dbCombinationActivity, o -> o.setCreateTime(null))); // 准备参数 CombinationActivityPageReqVO reqVO = new CombinationActivityPageReqVO(); reqVO.setName(null); reqVO.setStatus(null); // 调用 PageResult<CombinationActivityDO> pageResult = combinationActivityService.getCombinationActivityPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbCombinationActivity, pageResult.getList().get(0)); }
public ShardingSphereTransactionManagerEngine getResource() { return resource.get(); }
@Test void assertInitTransactionRuleWithMultiDatabaseType() { try (TransactionRule actual = new TransactionRule(createTransactionRuleConfiguration(), Collections.singletonMap(SHARDING_DB_1, createDatabase()))) { assertThat(actual.getResource().getTransactionManager(TransactionType.XA), instanceOf(ShardingSphereTransactionManagerFixture.class)); } }
@Override public List<TableIdentifier> listTables(Namespace namespace) { namespaceExists(namespace); // should be safe to list all before returning the list, instead of dynamically load the list. String nextToken = null; List<TableIdentifier> results = Lists.newArrayList(); do { GetTablesResponse response = glue.getTables( GetTablesRequest.builder() .catalogId(awsProperties.glueCatalogId()) .databaseName( IcebergToGlueConverter.toDatabaseName( namespace, awsProperties.glueCatalogSkipNameValidation())) .nextToken(nextToken) .build()); nextToken = response.nextToken(); if (response.hasTableList()) { results.addAll( response.tableList().stream() .filter(this::isGlueIcebergTable) .map(GlueToIcebergConverter::toTableId) .collect(Collectors.toList())); } } while (nextToken != null); LOG.debug("Listing of namespace: {} resulted in the following tables: {}", namespace, results); return results; }
@Test public void testListTables() { Mockito.doReturn( GetDatabaseResponse.builder().database(Database.builder().name("db1").build()).build()) .when(glue) .getDatabase(Mockito.any(GetDatabaseRequest.class)); Mockito.doReturn( GetTablesResponse.builder() .tableList( Table.builder() .databaseName("db1") .name("t1") .parameters( ImmutableMap.of( BaseMetastoreTableOperations.TABLE_TYPE_PROP, BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE)) .build(), Table.builder() .databaseName("db1") .name("t2") .parameters( ImmutableMap.of( "key", "val", BaseMetastoreTableOperations.TABLE_TYPE_PROP, BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE)) .build(), Table.builder() .databaseName("db1") .name("t3") .parameters( ImmutableMap.of( "key", "val", BaseMetastoreTableOperations.TABLE_TYPE_PROP, "wrongVal")) .build(), Table.builder() .databaseName("db1") .name("t4") .parameters(ImmutableMap.of("key", "val")) .build(), Table.builder().databaseName("db1").name("t5").parameters(null).build()) .build()) .when(glue) .getTables(Mockito.any(GetTablesRequest.class)); assertThat(glueCatalog.listTables(Namespace.of("db1"))) .isEqualTo( Lists.newArrayList(TableIdentifier.of("db1", "t1"), TableIdentifier.of("db1", "t2"))); }
public void awaitSynchronous() { List<CompletableFuture<Void>> futures = pending.get(); if (futures.isEmpty()) { return; } try { CompletableFuture.allOf(futures.toArray(CompletableFuture[]::new)).join(); } catch (CompletionException e) { logger.log(Level.WARNING, "", e); } finally { futures.clear(); } }
@Test public void awaitSynchronous() { var dispatcher = new EventDispatcher<Integer, Integer>(Runnable::run); dispatcher.pending.get().add(CompletableFuture.completedFuture(null)); dispatcher.awaitSynchronous(); assertThat(dispatcher.pending.get()).isEmpty(); }
@NonNull public URI callback(@NonNull CallbackRequest request) { var session = mustFindSession(request.sessionId()); var idToken = session .trustedSectoralIdpStep() .exchangeSectoralIdpCode(request.code(), session.codeVerifier()); session = removeSession(request.sessionId()); if (session == null) { throw new ValidationException(new Message("error.invalidSession")); } var issued = tokenIssuer.issueCode(session, idToken); return UriBuilder.fromUri(session.redirectUri()) .queryParam("code", issued.code()) .queryParam("state", session.state()) .build(); }
@Test void callback() { var config = new RelyingPartyConfig(List.of("code"), List.of(REDIRECT_URI)); var sessionRepo = mock(SessionRepo.class); var tokenIssuer = mock(TokenIssuer.class); var sut = new AuthService(BASE_URI, config, null, sessionRepo, tokenIssuer, null); var sessionId = UUID.randomUUID().toString(); var state = "mySuperDuperState"; var nonce = "20e5ed8b-f96b-48de-ae73-4460bcfc35a1"; var clientId = "myapp"; var trustedIdpStep = mock(TrustedSectoralIdpStep.class); var session = Session.create() .id(sessionId) .state(state) .nonce(nonce) .redirectUri(REDIRECT_URI) .clientId(clientId) .trustedSectoralIdpStep(trustedIdpStep) .build(); when(sessionRepo.load(sessionId)).thenReturn(session); when(sessionRepo.remove(sessionId)).thenReturn(session); var code = "6238e4504332468aa0c12e300787fded"; when(trustedIdpStep.exchangeSectoralIdpCode(code, null)).thenReturn(null); var issued = new Code(code, null, null, REDIRECT_URI, nonce, clientId, null); when(tokenIssuer.issueCode(session, null)).thenReturn(issued); var req = new CallbackRequest(sessionId, null); // when var res = sut.callback(req); // then assertEquals( "https://myapp.example.com?code=6238e4504332468aa0c12e300787fded&state=mySuperDuperState", res.toString()); verify(sessionRepo).remove(sessionId); }
public static String readAllBytes(InputStream input, Charset charset) throws IOException { if (charset == null) { input = ensureMarkSupport(input); input.mark(4); byte[] buffer = new byte[4]; int bytesRead = fillBuffer(input, buffer); input.reset(); charset = detectUtfCharset0(buffer, bytesRead); if (charset == null) { throw new IOException("Unsupported UCS-4 variant (neither UTF-32BE nor UTF32-LE)"); } } Reader reader = new InputStreamReader(input, charset); return readAllChars(reader); }
@Test void validateSupportForUnmarkableStreams() throws IOException { assertEquals("ABCD", UtfTextUtils.readAllBytes(new UnmarkableInputStream(new ByteArrayInputStream(hexBytes("41424344"))), null)); }
public int runInteractively() { displayWelcomeMessage(); RemoteServerSpecificCommand.validateClient(terminal.writer(), restClient); boolean eof = false; while (!eof) { try { handleLine(nextNonCliCommand()); } catch (final EndOfFileException exception) { // EOF is fine, just terminate the REPL terminal.writer().println("Exiting ksqlDB."); eof = true; } catch (final Exception exception) { LOGGER.error("An error occurred while running a command. Error = " + exception.getMessage(), exception); terminal.printError(ErrorMessageUtil.buildErrorMessage(exception), exception.toString()); } terminal.flush(); } return NO_ERROR; }
@Test public void shouldFailOnUnsupportedCpServerVersion() throws Exception { givenRunInteractivelyWillExit(); final KsqlRestClient mockRestClient = givenMockRestClient("5.5.0-0"); assertThrows( KsqlUnsupportedServerException.class, () -> new Cli(1L, 1L, mockRestClient, console) .runInteractively() ); }
public GithubAppConfiguration validate(AlmSettingDto almSettingDto) { return validate(almSettingDto.getAppId(), almSettingDto.getClientId(), almSettingDto.getClientSecret(), almSettingDto.getPrivateKey(), almSettingDto.getUrl()); }
@Test public void github_validation_checks_missing_clientId() { AlmSettingDto almSettingDto = createNewGithubDto(null, null, EXAMPLE_APP_ID, null); assertThatThrownBy(() -> underTest.validate(almSettingDto)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Missing Client Id"); }
@VisibleForTesting static Map<String, ExternalResourceDriver> externalResourceDriversFromConfig( Configuration config, PluginManager pluginManager) { final Set<String> resourceSet = getExternalResourceSet(config); if (resourceSet.isEmpty()) { return Collections.emptyMap(); } final Iterator<ExternalResourceDriverFactory> factoryIterator = pluginManager.load(ExternalResourceDriverFactory.class); final Map<String, ExternalResourceDriverFactory> externalResourceFactories = new HashMap<>(); factoryIterator.forEachRemaining( externalResourceDriverFactory -> externalResourceFactories.put( externalResourceDriverFactory.getClass().getName(), externalResourceDriverFactory)); final Map<String, ExternalResourceDriver> externalResourceDrivers = new HashMap<>(); for (String resourceName : resourceSet) { final ConfigOption<String> driverClassOption = key(ExternalResourceOptions .getExternalResourceDriverFactoryConfigOptionForResource( resourceName)) .stringType() .noDefaultValue(); final String driverFactoryClassName = config.get(driverClassOption); if (StringUtils.isNullOrWhitespaceOnly(driverFactoryClassName)) { LOG.warn( "Could not find driver class name for {}. Please make sure {} is configured.", resourceName, driverClassOption.key()); continue; } ExternalResourceDriverFactory externalResourceDriverFactory = externalResourceFactories.get(driverFactoryClassName); if (externalResourceDriverFactory != null) { DelegatingConfiguration delegatingConfiguration = new DelegatingConfiguration( config, ExternalResourceOptions .getExternalResourceParamConfigPrefixForResource( resourceName)); try { externalResourceDrivers.put( resourceName, externalResourceDriverFactory.createExternalResourceDriver( delegatingConfiguration)); LOG.info("Add external resources driver for {}.", resourceName); } catch (Exception e) { LOG.warn( "Could not instantiate driver with factory {} for {}. {}", driverFactoryClassName, resourceName, e); } } else { LOG.warn( "Could not find factory class {} for {}.", driverFactoryClassName, resourceName); } } return externalResourceDrivers; }
@Test public void testNotConfiguredFactoryClass() { final Configuration config = new Configuration(); final Map<Class<?>, Iterator<?>> plugins = new HashMap<>(); plugins.put( ExternalResourceDriverFactory.class, IteratorUtils.singletonIterator(new TestingExternalResourceDriverFactory())); final PluginManager testingPluginManager = new TestingPluginManager(plugins); config.set( ExternalResourceOptions.EXTERNAL_RESOURCE_LIST, Collections.singletonList(RESOURCE_NAME_1)); final Map<String, ExternalResourceDriver> externalResourceDrivers = ExternalResourceUtils.externalResourceDriversFromConfig( config, testingPluginManager); assertThat(externalResourceDrivers.entrySet(), is(empty())); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) { if (list == null) { return FEELFnResult.ofResult(false); } boolean result = false; for (final Object element : list) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" + " a Boolean")); } else { if (element != null) { result |= (Boolean) element; } } } return FEELFnResult.ofResult(result); }
@Test void invokeArrayParamTypeHeterogenousArray() { FunctionTestUtil.assertResultError(anyFunction.invoke(new Object[]{Boolean.FALSE, 1}), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(anyFunction.invoke(new Object[]{Boolean.TRUE, 1}), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(anyFunction.invoke(new Object[]{Boolean.TRUE, null, 1}), InvalidParametersEvent.class); }
@Override public Iterable<RedisClusterNode> clusterGetNodes() { return read(null, StringCodec.INSTANCE, CLUSTER_NODES); }
@Test public void testClusterGetNodes() { Iterable<RedisClusterNode> nodes = connection.clusterGetNodes(); assertThat(nodes).hasSize(6); for (RedisClusterNode redisClusterNode : nodes) { assertThat(redisClusterNode.getLinkState()).isNotNull(); assertThat(redisClusterNode.getFlags()).isNotEmpty(); assertThat(redisClusterNode.getHost()).isNotNull(); assertThat(redisClusterNode.getPort()).isNotNull(); assertThat(redisClusterNode.getId()).isNotNull(); assertThat(redisClusterNode.getType()).isNotNull(); if (redisClusterNode.getType() == NodeType.MASTER) { assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty(); } else { assertThat(redisClusterNode.getMasterId()).isNotNull(); } } }
@Override public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) { return fromConnectData(topic, schema, value); }
@Test public void testStringHeaderToJson() { JsonNode converted = parse(converter.fromConnectHeader(TOPIC, "headerName", Schema.STRING_SCHEMA, "test-string")); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"string\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals("test-string", converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).textValue()); }
public static Gson instance() { return SingletonHolder.INSTANCE; }
@Test void rejectsSerializationOfAESEncrypter() { final IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> Serialization.instance().toJson(new AESEncrypter(mock(AESCipherProvider.class)))); assertEquals(format("Refusing to serialize a %s instance and leak security details!", AESEncrypter.class.getName()), e.getMessage()); }
@Override protected Object createObject(String className, List<String> genericClasses) { if (ScenarioSimulationSharedUtils.isMap(className)) { return new HashMap(); } else { try { return classLoader.loadClass(className).newInstance(); } catch (Exception e) { throw new IllegalArgumentException("Impossible to instantiate " + className, e); } } }
@Test public void createObject() { assertThat(expressionEvaluator.createObject(String.class.getCanonicalName(), List.of())).isNotNull(); assertThat(expressionEvaluator.createObject(Map.class.getCanonicalName(), List.of(String.class.getCanonicalName(), String.class.getCanonicalName()))).isInstanceOf(Map.class); assertThatThrownBy(() -> expressionEvaluator.createObject("com.invalid.class.Name", List.of())).isInstanceOf(IllegalArgumentException.class) .hasMessage("Impossible to instantiate com.invalid.class.Name"); }
static Collection<File> internalGetFileResources(String path, Pattern pattern) { final File file = new File(path); if (!file.isDirectory()) { return Collections.emptySet(); } return getFileResourcesFromDirectory(file, pattern); }
@Test public void internalGetResourcesExisting() { String path = "." + File.separator + "target" + File.separator + "test-classes"; Pattern pattern = Pattern.compile(".*txt"); final Collection<File> retrieved = ResourceHelper.internalGetFileResources(path, pattern); commonVerifyCollectionWithExpectedFile(retrieved, TEST_FILE); }
public static List<String> splitFilter( final String input ) { final List<String> result = new ArrayList<>(); if ( input.length() >= 5 && input.startsWith("(") && input.endsWith("))") && (input.charAt(1) == '&' || input.charAt(1) == '|') && input.charAt(2) == '(' ) { // Strip off the outer parenthesis and search operator. String stripped = input.substring(2, input.length() - 1); // The remainder should consist only of ()-surrounded parts. // We'll remove the leading '(' and trailing ')' character, then split on ")(" to get all parts. stripped = stripped.substring(1, stripped.length() - 1); final String[] split = stripped.split("\\)\\("); result.addAll(Arrays.asList(split)); } else { result.add(input); } return result; }
@Test public void testSplitFilterSimpleSingleValue() throws Exception { // Setup fixture. final String input = "test"; // Execute system under test. final List<String> result = LdapManager.splitFilter( input ); // Verify result. assertNotNull( result ); assertEquals( 1, result.size() ); assertTrue( result.contains( input ) ); }
public static TFileFormatType getFormatType(String fileFormat, String path) { if (fileFormat != null) { if (fileFormat.toLowerCase().equals("parquet")) { return TFileFormatType.FORMAT_PARQUET; } else if (fileFormat.toLowerCase().equals("orc")) { return TFileFormatType.FORMAT_ORC; } else if (fileFormat.toLowerCase().equals("json")) { return TFileFormatType.FORMAT_JSON; } // Attention: The compression type of csv format is from the suffix of filename. } String lowerCasePath = path.toLowerCase(); if (lowerCasePath.endsWith(".parquet") || lowerCasePath.endsWith(".parq")) { return TFileFormatType.FORMAT_PARQUET; } else if (lowerCasePath.endsWith(".orc")) { return TFileFormatType.FORMAT_ORC; } else if (lowerCasePath.endsWith(".gz")) { return TFileFormatType.FORMAT_CSV_GZ; } else if (lowerCasePath.endsWith(".bz2")) { return TFileFormatType.FORMAT_CSV_BZ2; } else if (lowerCasePath.endsWith(".lz4")) { return TFileFormatType.FORMAT_CSV_LZ4_FRAME; } else if (lowerCasePath.endsWith(".deflate")) { return TFileFormatType.FORMAT_CSV_DEFLATE; } else if (lowerCasePath.endsWith(".zst")) { return TFileFormatType.FORMAT_CSV_ZSTD; } else { return TFileFormatType.FORMAT_CSV_PLAIN; } }
@Test public void testGetFormatType() { Assert.assertEquals(TFileFormatType.FORMAT_PARQUET, Load.getFormatType("parquet", "hdfs://127.0.0.1:9000/some_file")); Assert.assertEquals(TFileFormatType.FORMAT_ORC, Load.getFormatType("orc", "hdfs://127.0.0.1:9000/some_file")); Assert.assertEquals(TFileFormatType.FORMAT_JSON, Load.getFormatType("json", "hdfs://127.0.0.1:9000/some_file")); Assert.assertEquals(TFileFormatType.FORMAT_PARQUET, Load.getFormatType("", "hdfs://127.0.0.1:9000/some_file.parq")); Assert.assertEquals(TFileFormatType.FORMAT_PARQUET, Load.getFormatType("", "hdfs://127.0.0.1:9000/some_file.parquet")); Assert.assertEquals(TFileFormatType.FORMAT_ORC, Load.getFormatType("", "hdfs://127.0.0.1:9000/some_file.orc")); Assert.assertEquals(TFileFormatType.FORMAT_CSV_GZ, Load.getFormatType("csv", "hdfs://127.0.0.1:9000/some_file.gz")); Assert.assertEquals(TFileFormatType.FORMAT_CSV_BZ2, Load.getFormatType("csv", "hdfs://127.0.0.1:9000/some_file.bz2")); Assert.assertEquals(TFileFormatType.FORMAT_CSV_LZ4_FRAME, Load.getFormatType("csv", "hdfs://127.0.0.1:9000/some_file.lz4")); Assert.assertEquals(TFileFormatType.FORMAT_CSV_DEFLATE, Load.getFormatType("csv", "hdfs://127.0.0.1:9000/some_file.deflate")); Assert.assertEquals(TFileFormatType.FORMAT_CSV_ZSTD, Load.getFormatType("csv", "hdfs://127.0.0.1:9000/some_file.zst")); Assert.assertEquals(TFileFormatType.FORMAT_CSV_PLAIN, Load.getFormatType("csv", "hdfs://127.0.0.1:9000/some_file")); }
@Override public WebhookPayload create(ProjectAnalysis analysis) { Writer string = new StringWriter(); try (JsonWriter writer = JsonWriter.of(string)) { writer.beginObject(); writeServer(writer); writeTask(writer, analysis.getCeTask()); writeAnalysis(writer, analysis, system2); writeProject(analysis, writer, analysis.getProject()); analysis.getBranch().ifPresent(b -> writeBranch(writer, analysis.getProject(), b)); analysis.getQualityGate().ifPresent(qualityGate -> writeQualityGate(writer, qualityGate)); writeAnalysisProperties(writer, analysis.getProperties()); writer.endObject().close(); return new WebhookPayload(analysis.getProject().getKey(), string.toString()); } }
@Test public void create_payload_on_pull_request() { CeTask task = new CeTask("#1", CeTask.Status.SUCCESS); ProjectAnalysis analysis = newAnalysis(task, null, new Branch(false, "pr/foo", Branch.Type.PULL_REQUEST), 1_500_000_000_000L, emptyMap()); WebhookPayload payload = underTest.create(analysis); assertJson(payload.getJson()) .isSimilarTo("{" + "\"branch\": {" + " \"name\": \"pr/foo\"," + " \"type\": \"PULL_REQUEST\"," + " \"isMain\": false," + " \"url\": \"http://foo/dashboard?id=P1&pullRequest=pr%2Ffoo\"" + "}" + "}"); }
public static void writeUnsignedVarint(int value, ByteBuffer buffer) { if ((value & (0xFFFFFFFF << 7)) == 0) { buffer.put((byte) value); } else { buffer.put((byte) (value & 0x7F | 0x80)); if ((value & (0xFFFFFFFF << 14)) == 0) { buffer.put((byte) ((value >>> 7) & 0xFF)); } else { buffer.put((byte) ((value >>> 7) & 0x7F | 0x80)); if ((value & (0xFFFFFFFF << 21)) == 0) { buffer.put((byte) ((value >>> 14) & 0xFF)); } else { buffer.put((byte) ((value >>> 14) & 0x7F | 0x80)); if ((value & (0xFFFFFFFF << 28)) == 0) { buffer.put((byte) ((value >>> 21) & 0xFF)); } else { buffer.put((byte) ((value >>> 21) & 0x7F | 0x80)); buffer.put((byte) ((value >>> 28) & 0xFF)); } } } } }
@Test public void testCorrectnessWriteUnsignedVarint() { // The old well-known implementation for writeUnsignedVarint. IntFunction<ByteBuffer> simpleImplementation = (int value) -> { ByteBuffer buffer = ByteBuffer.allocate(MAX_LENGTH_VARINT); while (true) { if ((value & ~0x7F) == 0) { buffer.put((byte) value); break; } else { buffer.put((byte) ((value & 0x7F) | 0x80)); value >>>= 7; } } return buffer; }; // compare the full range of values final ByteBuffer actual = ByteBuffer.allocate(MAX_LENGTH_VARINT); for (int i = 0; i < Integer.MAX_VALUE && i >= 0; i += 13) { ByteUtils.writeUnsignedVarint(i, actual); final ByteBuffer expected = simpleImplementation.apply(i); assertArrayEquals(expected.array(), actual.array(), "Implementations do not match for integer=" + i); actual.clear(); } }
private void generateResponse(final HttpServletResponse response, final List<ConfigGroupEnum> changedGroups) { try { response.setHeader("Pragma", "no-cache"); response.setDateHeader("Expires", 0); response.setHeader("Cache-Control", "no-cache,no-store"); response.setContentType(MediaType.APPLICATION_JSON_VALUE); response.setStatus(HttpServletResponse.SC_OK); response.getWriter().println(GsonUtils.getInstance().toJson(ShenyuAdminResult.success(ShenyuResultMessage.SUCCESS, changedGroups))); } catch (IOException ex) { LOG.error("Sending response failed.", ex); } }
@Test public void testGenerateResponse() throws UnsupportedEncodingException { List<ConfigGroupEnum> changedGroups = new ArrayList<>(); changedGroups.add(ConfigGroupEnum.PLUGIN); this.httpServletResponse.setHeader("Pragma", "no-cache"); this.httpServletResponse.setDateHeader("Expires", 0); this.httpServletResponse.setHeader("Cache-Control", "no-cache,no-store"); this.httpServletResponse.setContentType(MediaType.APPLICATION_JSON_VALUE); this.httpServletResponse.setStatus(MockHttpServletResponse.SC_OK); this.httpServletResponse.getWriter().println(GsonUtils.getInstance().toJson(ShenyuAdminResult.success(ShenyuResultMessage.SUCCESS, changedGroups))); }
public static ExecutionEnvironment createBatchExecutionEnvironment(FlinkPipelineOptions options) { return createBatchExecutionEnvironment( options, MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()), options.getFlinkConfDir()); }
@Test public void shouldSupportIPv6Batch() { FlinkPipelineOptions options = getDefaultPipelineOptions(); options.setRunner(FlinkRunner.class); options.setFlinkMaster("[FE80:CD00:0000:0CDE:1257:0000:211E:729C]:1234"); ExecutionEnvironment bev = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options); checkHostAndPort(bev, "FE80:CD00:0000:0CDE:1257:0000:211E:729C", 1234); options.setFlinkMaster("FE80:CD00:0000:0CDE:1257:0000:211E:729C"); bev = FlinkExecutionEnvironments.createBatchExecutionEnvironment(options); checkHostAndPort( bev, "FE80:CD00:0000:0CDE:1257:0000:211E:729C", RestOptions.PORT.defaultValue()); }
@Override public void showPreviewForKey( Keyboard.Key key, Drawable icon, View parentView, PreviewPopupTheme previewPopupTheme) { KeyPreview popup = getPopupForKey(key, parentView, previewPopupTheme); Point previewPosition = mPositionCalculator.calculatePositionForPreview( key, previewPopupTheme, getLocationInWindow(parentView)); popup.showPreviewForKey(key, icon, previewPosition); }
@Test public void testNoPopupForModifier() { KeyPreviewsManager underTest = new KeyPreviewsManager(getApplicationContext(), mPositionCalculator, 3); PopupWindow createdPopupWindow = getLatestCreatedPopupWindow(); Assert.assertNull(createdPopupWindow); mTestKeys[0].modifier = true; underTest.showPreviewForKey(mTestKeys[0], "y", mKeyboardView, mTheme); createdPopupWindow = getLatestCreatedPopupWindow(); Assert.assertNull(createdPopupWindow); }
@Override public Map<Alarm.SeverityLevel, Long> getAlarmCounts(DeviceId deviceId) { return getAlarms(deviceId).stream().collect( Collectors.groupingBy(Alarm::severity, Collectors.counting())); }
@Test public void testRemoveWhenDeviceRemoved() { providerService.updateAlarmList(DEVICE_ID, ImmutableSet.of(ALARM_B, ALARM_A)); verifyGettingSetsOfAlarms(manager, 2, 2); validateEvents(AlarmEvent.Type.CREATED, AlarmEvent.Type.CREATED); Map<Alarm.SeverityLevel, Long> critical2 = new CountsMapBuilder().with(CRITICAL, 2L).create(); assertEquals("A critical should be present", critical2, manager.getAlarmCounts()); assertEquals("A critical should be present", critical2, manager.getAlarmCounts(DEVICE_ID)); deviceService.deviceListener.event(new DeviceEvent(DeviceEvent.Type.DEVICE_REMOVED, device)); Map<Alarm.SeverityLevel, Long> zeroAlarms = new CountsMapBuilder().create(); assertEquals("The counts should be empty for removed device", zeroAlarms, manager.getAlarmCounts(DEVICE_ID)); }
@Deprecated @Override public void toXML(Object obj, OutputStream out) { super.toXML(obj, out); }
@Issue("JENKINS-5768") @Test public void xmlRoundTrip() { XStream2 xs = new XStream2(); __Foo_Bar$Class b = new __Foo_Bar$Class(); String xml = xs.toXML(b); __Foo_Bar$Class b2 = (__Foo_Bar$Class) xs.fromXML(xml); assertEquals(xml, b.under_1, b2.under_1); assertEquals(xml, b.under__2, b2.under__2); assertEquals(xml, b._leadUnder1, b2._leadUnder1); assertEquals(xml, b.__leadUnder2, b2.__leadUnder2); assertEquals(xml, b.$dollar, b2.$dollar); assertEquals(xml, b.dollar$2, b2.dollar$2); }
@Override public void resetConfigStats(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Void> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_RESETSTAT); syncFuture(f); }
@Test public void testResetConfigStats() { RedisClusterNode master = getFirstMaster(); connection.resetConfigStats(master); }
static boolean solve(RaidRoom[] rooms) { if (rooms == null) { return false; } List<RaidRoom> match = null; Integer start = null; Integer index = null; int known = 0; for (int i = 0; i < rooms.length; i++) { if (rooms[i] == null || rooms[i].getType() != RoomType.COMBAT || rooms[i] == UNKNOWN_COMBAT) { continue; } if (start == null) { start = i; } known++; } if (known < 2) { return false; } if (known == rooms.length) { return true; } for (List rotation : ROTATIONS) { COMPARE: for (int i = 0; i < rotation.size(); i++) { if (rooms[start] == rotation.get(i)) { for (int j = start + 1; j < rooms.length; j++) { if (rooms[j].getType() != RoomType.COMBAT || rooms[j] == UNKNOWN_COMBAT) { continue; } if (rooms[j] != rotation.get(floorMod(i + j - start, rotation.size()))) { break COMPARE; } } if (match != null && match != rotation) { return false; } index = i - start; match = rotation; } } } if (match == null) { return false; } for (int i = 0; i < rooms.length; i++) { if (rooms[i] == null) { continue; } if (rooms[i].getType() != RoomType.COMBAT || rooms[i] == UNKNOWN_COMBAT) { rooms[i] = match.get(floorMod(index + i, match.size())); } } return true; }
@Test public void testSolve1() { RaidRoom[] rooms = new RaidRoom[]{VESPULA, UNKNOWN_COMBAT, UNKNOWN_COMBAT, VANGUARDS}; RotationSolver.solve(rooms); assertArrayEquals(new RaidRoom[]{VESPULA, SHAMANS, VASA, VANGUARDS}, rooms); }
public static RoutingTable from(LbServicesConfig config, long generation) { Map<Endpoint, Target> entries = new TreeMap<>(); for (var tenants : config.tenants().entrySet()) { TenantName tenantName = TenantName.from(tenants.getKey()); if (tenantName.value().equals(HOSTED_VESPA_TENANT_NAME)) continue; for (var applications : tenants.getValue().applications().entrySet()) { String[] parts = applications.getKey().split(":"); if (parts.length != 4) throw new IllegalArgumentException("Invalid deployment ID '" + applications.getKey() + "'"); ApplicationName application = ApplicationName.from(parts[0]); ZoneId zone = ZoneId.from(parts[1], parts[2]); InstanceName instance = InstanceName.from(parts[3]); for (var configuredEndpoint : applications.getValue().endpoints()) { List<Real> reals = configuredEndpoint.hosts().stream() .map(hostname -> new Real(hostname, 4443, configuredEndpoint.weight(), applications.getValue().activeRotation())) .toList(); Endpoint endpoint = new Endpoint(configuredEndpoint.dnsName(), routingMethodFrom(configuredEndpoint)); ClusterSpec.Id cluster = ClusterSpec.Id.from(configuredEndpoint.clusterId()); Target target; boolean applicationEndpoint = configuredEndpoint.scope() == LbServicesConfig.Tenants.Applications.Endpoints.Scope.Enum.application; if (applicationEndpoint) { target = Target.create(endpoint.dnsName, tenantName, application, cluster, zone, reals); } else { target = Target.create(ApplicationId.from(tenantName, application, instance), cluster, zone, reals); } entries.merge(endpoint, target, (oldValue, value) -> { if (applicationEndpoint) { List<Real> merged = new ArrayList<>(oldValue.reals()); merged.addAll(value.reals()); return value.withReals(merged); } return oldValue; }); } } } return new RoutingTable(entries, generation); }
@Test public void translate_from_lb_services_config() { RoutingTable expected = new RoutingTable(Map.of( new Endpoint("beta.music.vespa.us-north-1.vespa.oath.cloud", RoutingMethod.sharedLayer4), Target.create(ApplicationId.from("vespa", "music", "beta"), ClusterSpec.Id.from("default"), ZoneId.from("prod.us-north-1"), List.of(new Real("host3-beta", 4443, 1, true), new Real("host4-beta", 4443, 1, true))), new Endpoint("music.vespa.global.vespa.oath.cloud", RoutingMethod.sharedLayer4), Target.create(ApplicationId.from("vespa", "music", "default"), ClusterSpec.Id.from("default"), ZoneId.from("prod.us-north-1"), List.of(new Real("host1-default", 4443, 1, true), new Real("host2-default", 4443, 1, true))), new Endpoint("music.vespa.us-north-1.vespa.oath.cloud", RoutingMethod.sharedLayer4), Target.create(ApplicationId.from("vespa", "music", "default"), ClusterSpec.Id.from("default"), ZoneId.from("prod.us-north-1"), List.of(new Real("host1-default", 4443, 1, true), new Real("host2-default", 4443, 1, true))), new Endpoint("rotation-02.vespa.global.routing", RoutingMethod.sharedLayer4), Target.create(ApplicationId.from("vespa", "music", "default"), ClusterSpec.Id.from("default"), ZoneId.from("prod.us-north-1"), List.of(new Real("host1-default", 4443, 1, true), new Real("host2-default", 4443, 1, true))), new Endpoint("use-weighted.music.vespa.us-north-1-r.vespa.oath.cloud", RoutingMethod.sharedLayer4), Target.create("use-weighted.music.vespa.us-north-1-r.vespa.oath.cloud", TenantName.from("vespa"), ApplicationName.from("music"), ClusterSpec.Id.from("default"), ZoneId.from("prod.us-north-1"), List.of(new Real("host3-beta", 4443, 1, true), new Real("host4-beta", 4443, 1, true), new Real("host1-default", 4443, 0, true), new Real("host2-default", 4443, 0, true))) ), 42); RoutingTable actual = TestUtil.readRoutingTable("lbservices-config"); assertEquals(expected, actual); }
@Override public void close() throws IOException { super.close(); closeSocket(); }
@Test void testWriteMessage() throws Exception { try (ServerSocket ss = new ServerSocket(0); TcpServer tcpServer = new TcpServer(ss); ResilientSocketOutputStream resilientSocketOutputStream = new ResilientSocketOutputStream("localhost", ss.getLocalPort(), 1024, 500, SocketFactory.getDefault())) { Future<List<String>> receivedMessages = tcpServer.receive(); resilientSocketOutputStream.write("Test message".getBytes(StandardCharsets.UTF_8)); resilientSocketOutputStream.close(); assertThat(receivedMessages.get(5, TimeUnit.SECONDS)) .singleElement() .isEqualTo("Test message"); } }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testMatchMagicVariables() { run( "def temperature = { celsius: 100, fahrenheit: 212 }", "match temperature contains { fahrenheit: '#($.celsius * 1.8 + 32)' }" ); }
@Override public String authenticate(AuthenticationDataSource authData) throws AuthenticationException { String token; try { // Get Token token = getToken(authData); } catch (AuthenticationException exception) { incrementFailureMetric(ErrorCode.INVALID_AUTH_DATA); throw exception; } // Parse Token by validating String role = getPrincipal(authenticateToken(token)); AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName()); return role; }
@Test(expectedExceptions = AuthenticationException.class) public void testAuthenticateWhenNoJwtPassed() throws AuthenticationException { AuthenticationProviderToken provider = new AuthenticationProviderToken(); provider.authenticate(new AuthenticationDataSource() { @Override public boolean hasDataFromCommand() { return false; } @Override public boolean hasDataFromHttp() { return false; } }); }
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) { Objects.requireNonNull(pred, "pred cannot be null"); Objects.requireNonNull(columns, "columns cannot be null"); return pred.accept(new StatisticsFilter(columns)); }
@Test public void testInNotIn() { Set<Integer> values1 = new HashSet<>(); values1.add(10); values1.add(12); values1.add(15); values1.add(17); values1.add(19); assertFalse(canDrop(in(intColumn, values1), columnMetas)); assertFalse(canDrop(notIn(intColumn, values1), columnMetas)); Set<Integer> values2 = new HashSet<>(); values2.add(109); values2.add(2); values2.add(5); values2.add(117); values2.add(101); assertFalse(canDrop(in(intColumn, values2), columnMetas)); assertFalse(canDrop(notIn(intColumn, values2), columnMetas)); Set<Integer> values3 = new HashSet<>(); values3.add(1); values3.add(2); values3.add(5); values3.add(7); values3.add(10); assertFalse(canDrop(in(intColumn, values3), columnMetas)); assertFalse(canDrop(notIn(intColumn, values3), columnMetas)); Set<Integer> values4 = new HashSet<>(); values4.add(50); values4.add(60); assertFalse(canDrop(in(intColumn, values4), missingMinMaxColumnMetas)); assertFalse(canDrop(notIn(intColumn, values4), missingMinMaxColumnMetas)); Set<Double> values5 = new HashSet<>(); values5.add(1.0); values5.add(2.0); values5.add(95.0); values5.add(107.0); values5.add(99.0); assertFalse(canDrop(in(doubleColumn, values5), columnMetas)); assertFalse(canDrop(notIn(doubleColumn, values5), columnMetas)); Set<Binary> values6 = new HashSet<>(); values6.add(Binary.fromString("test1")); values6.add(Binary.fromString("test2")); assertTrue(canDrop(in(missingColumn, values6), columnMetas)); assertFalse(canDrop(notIn(missingColumn, values6), columnMetas)); Set<Integer> values7 = new HashSet<>(); values7.add(null); assertFalse(canDrop(in(intColumn, values7), nullColumnMetas)); assertFalse(canDrop(notIn(intColumn, values7), nullColumnMetas)); Set<Binary> values8 = new HashSet<>(); values8.add(null); assertFalse(canDrop(in(missingColumn, values8), columnMetas)); assertFalse(canDrop(notIn(missingColumn, values8), columnMetas)); IntStatistics statsNoNulls = new IntStatistics(); statsNoNulls.setMinMax(10, 100); statsNoNulls.setNumNulls(0); IntStatistics statsSomeNulls = new IntStatistics(); statsSomeNulls.setMinMax(10, 100); statsSomeNulls.setNumNulls(3); Set<Integer> values9 = new HashSet<>(); values9.add(null); assertTrue(canDrop( in(intColumn, values9), Arrays.asList(getIntColumnMeta(statsNoNulls, 177L), getDoubleColumnMeta(doubleStats, 177L)))); assertFalse(canDrop( notIn(intColumn, values9), Arrays.asList(getIntColumnMeta(statsNoNulls, 177L), getDoubleColumnMeta(doubleStats, 177L)))); assertFalse(canDrop( in(intColumn, values9), Arrays.asList(getIntColumnMeta(statsSomeNulls, 177L), getDoubleColumnMeta(doubleStats, 177L)))); assertFalse(canDrop( notIn(intColumn, values9), Arrays.asList(getIntColumnMeta(statsSomeNulls, 177L), getDoubleColumnMeta(doubleStats, 177L)))); }
void writeConfigToDisk() { VespaTlsConfig config = VespaZookeeperTlsContextUtils.tlsContext() .map(ctx -> new VespaTlsConfig(ctx, TransportSecurityUtils.getInsecureMixedMode())) .orElse(VespaTlsConfig.tlsDisabled()); writeConfigToDisk(config); }
@Test public void config_is_written_correctly_with_one_server() { ZookeeperServerConfig.Builder builder = createConfigBuilderForSingleHost(cfgFile, idFile); new Configurator(builder.build()).writeConfigToDisk(VespaTlsConfig.tlsDisabled()); validateConfigFileSingleHost(cfgFile, false); validateIdFile(idFile, "0\n"); }
public static <E> E findStaticFieldValue(Class clazz, String fieldName) { try { Field field = clazz.getField(fieldName); return (E) field.get(null); } catch (Exception ignore) { return null; } }
@Test public void test_whenFieldExist() { Integer value = findStaticFieldValue(ClassWithStaticField.class, "staticField"); assertEquals(ClassWithStaticField.staticField, value); }
@ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double jaccardIndex(@SqlType(KHyperLogLogType.NAME) Slice slice1, @SqlType(KHyperLogLogType.NAME) Slice slice2) { KHyperLogLog khll1 = KHyperLogLog.newInstance(slice1); KHyperLogLog khll2 = KHyperLogLog.newInstance(slice2); return KHyperLogLog.jaccardIndex(khll1, khll2); }
@Test public void testJaccardIndex() { int blockSize = 10; long uniqueElements = 10000 * blockSize; double error = ((double) 2 / 3) * 0.05; List<KHyperLogLog> list1 = buildKHyperLogLogs(blockSize, uniqueElements, threshold, potential); List<KHyperLogLog> list2 = buildKHyperLogLogs((int) (blockSize * 1.5), (int) (uniqueElements * 1.5), threshold, potential); String projection = getJaccardIndexProjection(list1, list2); functionAssertions.assertFunctionWithError(projection, DOUBLE, (double) 2 / 3, error); }
@Override public void invalidateTokens(Set<String> tokenIds) { final Set<InvalidTokenEntity> enocaInvalidTokenEntities = tokenIds.stream() .map(tokenId -> InvalidTokenEntity.builder() .tokenId(tokenId) .build() ) .collect(Collectors.toSet()); invalidTokenRepository.saveAll(enocaInvalidTokenEntities); }
@Test void givenTokenIds_whenInvalidateTokens_thenSaveAllTokens() { // Given Set<String> tokenIds = Set.of("token1", "token2"); // When invalidTokenService.invalidateTokens(tokenIds); // Then ArgumentCaptor<Set<InvalidTokenEntity>> captor = ArgumentCaptor.forClass(Set.class); verify(invalidTokenRepository).saveAll(captor.capture()); Set<InvalidTokenEntity> capturedTokens = captor.getValue(); assertThat(capturedTokens) .hasSize(2) .extracting("tokenId") .containsExactlyInAnyOrder("token1", "token2"); }
public static HashingAlgorithm getHashingAlgorithm(String password) { if (password.startsWith("$2y")) { if (getBCryptCost(password) < BCRYPT_MIN_COST) { throw new HashedPasswordException("Minimum cost of BCrypt password must be " + BCRYPT_MIN_COST); } return HashingAlgorithm.BCRYPT; } if (password.contains(":")) { if (getPBKDF2Iterations(password) < PBKDF2_MIN_ITERATIONS) { throw new HashedPasswordException("Minimum iterations of PBKDF2 password must be " + PBKDF2_MIN_ITERATIONS); } return HashingAlgorithm.PBKDF2; } throw new HashedPasswordException("Password hashing algorithm cannot be determined"); }
@Test public void testHashingAlgorithmBCrypt() { String password = "$2y$10$BqTb8hScP5DfcpmHo5PeyugxHz5Ky/qf3wrpD7SNm8sWuA3VlGqsa"; assertEquals(getHashingAlgorithm(password), BCRYPT); }
public void writeStringVar(final String value) { // TODO }
@Test void assertWriteStringVar() { assertDoesNotThrow(() -> new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeStringVar("")); }
@Udf public String lpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); final int padUpTo = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padUpTo; i += padding.length()) { sb.append(padding); } sb.setLength(padUpTo); sb.append(input); sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldPadEmptyInputString() { final String result = udf.lpad("", 4, "foo"); assertThat(result, is("foof")); }
@Override public void validateUserList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return; } // 获得岗位信息 List<AdminUserDO> users = userMapper.selectBatchIds(ids); Map<Long, AdminUserDO> userMap = CollectionUtils.convertMap(users, AdminUserDO::getId); // 校验 ids.forEach(id -> { AdminUserDO user = userMap.get(id); if (user == null) { throw exception(USER_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(user.getStatus())) { throw exception(USER_IS_DISABLE, user.getNickname()); } }); }
@Test public void testValidateUserList_success() { // mock 数据 AdminUserDO userDO = randomAdminUserDO().setStatus(CommonStatusEnum.ENABLE.getStatus()); userMapper.insert(userDO); // 准备参数 List<Long> ids = singletonList(userDO.getId()); // 调用,无需断言 userService.validateUserList(ids); }
@Operation(summary = "list", description = "List clusters") @GetMapping public ResponseEntity<List<ClusterVO>> list() { return ResponseEntity.success(clusterService.list()); }
@Test void listReturnsAllClusters() { List<ClusterVO> clusters = Arrays.asList(new ClusterVO(), new ClusterVO()); when(clusterService.list()).thenReturn(clusters); ResponseEntity<List<ClusterVO>> response = clusterController.list(); assertTrue(response.isSuccess()); assertEquals(clusters, response.getData()); }
Map<String, Object> fencableProducerProps(DistributedConfig workerConfig) { Map<String, Object> result = new HashMap<>(baseProducerProps(workerConfig)); result.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-leader"); // Always require producer acks to all to ensure durable writes result.put(ProducerConfig.ACKS_CONFIG, "all"); // We can set this to 5 instead of 1 without risking reordering because we are using an idempotent producer result.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 5); ConnectUtils.ensureProperty( result, ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "true", "for the worker's config topic producer when exactly-once source support is enabled or in preparation to be enabled", false ); ConnectUtils.ensureProperty( result, ProducerConfig.TRANSACTIONAL_ID_CONFIG, workerConfig.transactionalProducerId(), "for the worker's config topic producer when exactly-once source support is enabled or in preparation to be enabled", true ); return result; }
@Test public void testFencableProducerPropertiesInsertedByDefault() { props.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "preparing"); String groupId = "my-connect-cluster"; props.put(GROUP_ID_CONFIG, groupId); props.remove(TRANSACTIONAL_ID_CONFIG); props.remove(ENABLE_IDEMPOTENCE_CONFIG); createStore(); Map<String, Object> fencableProducerProperties = configStorage.fencableProducerProps(config); assertEquals("connect-cluster-" + groupId, fencableProducerProperties.get(TRANSACTIONAL_ID_CONFIG)); assertEquals("true", fencableProducerProperties.get(ENABLE_IDEMPOTENCE_CONFIG)); }
public List<ChangeStreamRecord> toChangeStreamRecords( PartitionMetadata partition, ChangeStreamResultSet resultSet, ChangeStreamResultSetMetadata resultSetMetadata) { if (this.isPostgres()) { // In PostgresQL, change stream records are returned as JsonB. return Collections.singletonList( toChangeStreamRecordJson(partition, resultSet.getPgJsonb(0), resultSetMetadata)); } // In GoogleSQL, change stream records are returned as an array of structs. return resultSet.getCurrentRowAsStruct().getStructList(0).stream() .flatMap(struct -> toChangeStreamRecord(partition, struct, resultSetMetadata)) .collect(Collectors.toList()); }
@Test public void testMappingJsonRowToHeartbeatRecord() { final HeartbeatRecord heartbeatRecord = new HeartbeatRecord(Timestamp.ofTimeSecondsAndNanos(10L, 20), null); final String jsonString = recordToJson(heartbeatRecord, false, false); assertNotNull(jsonString); ChangeStreamResultSet resultSet = mock(ChangeStreamResultSet.class); when(resultSet.getPgJsonb(0)).thenReturn(jsonString); assertEquals( Collections.singletonList(heartbeatRecord), mapperPostgres.toChangeStreamRecords(partition, resultSet, resultSetMetadata)); }
public static Stream<AssociationRule> apply(double confidence, FPTree tree) { TotalSupportTree ttree = new TotalSupportTree(tree); ARM arm = new ARM(confidence, ttree); return StreamSupport.stream(arm.spliterator(), false); }
@Test public void test() { System.out.println("ARM"); FPTree tree = FPTree.of(3, itemsets); List<AssociationRule> rules = ARM.apply(0.5, tree).collect(Collectors.toList()); assertEquals(9, rules.size()); assertEquals(0.6, rules.get(0).support, 1E-2); assertEquals(0.75, rules.get(0).confidence, 1E-2); assertEquals(1, rules.get(0).antecedent.length); assertEquals(3, rules.get(0).antecedent[0]); assertEquals(1, rules.get(0).consequent.length); assertEquals(2, rules.get(0).consequent[0]); assertEquals(0.3, rules.get(4).support, 1E-2); assertEquals(0.6, rules.get(4).confidence, 1E-2); assertEquals(1, rules.get(4).antecedent.length); assertEquals(1, rules.get(4).antecedent[0]); assertEquals(1, rules.get(4).consequent.length); assertEquals(2, rules.get(4).consequent[0]); assertEquals(0.3, rules.get(8).support, 1E-2); assertEquals(0.6, rules.get(8).confidence, 1E-2); assertEquals(1, rules.get(8).antecedent.length); assertEquals(1, rules.get(8).antecedent[0]); assertEquals(2, rules.get(8).consequent.length); assertEquals(3, rules.get(8).consequent[0]); assertEquals(2, rules.get(8).consequent[1]); }
@Override protected void copy(List<HadoopResourceId> srcResourceIds, List<HadoopResourceId> destResourceIds) throws IOException { for (int i = 0; i < srcResourceIds.size(); ++i) { // this enforces src and dest file systems to match final org.apache.hadoop.fs.FileSystem fs = srcResourceIds.get(i).toPath().getFileSystem(configuration); // Unfortunately HDFS FileSystems don't support a native copy operation so we are forced // to use the inefficient implementation found in FileUtil which copies all the bytes through // the local machine. // // HDFS FileSystem does define a concat method but could only find the DFSFileSystem // implementing it. The DFSFileSystem implemented concat by deleting the srcs after which // is not what we want. Also, all the other FileSystem implementations I saw threw // UnsupportedOperationException within concat. final boolean success = FileUtil.copy( fs, srcResourceIds.get(i).toPath(), fs, destResourceIds.get(i).toPath(), false, true, fs.getConf()); if (!success) { // Defensive coding as this should not happen in practice throw new IOException( String.format( "Unable to copy resource %s to %s. No further information provided by underlying filesystem.", srcResourceIds.get(i).toPath(), destResourceIds.get(i).toPath())); } } }
@Test(expected = FileNotFoundException.class) public void testCopySourceMissing() throws Exception { fileSystem.copy( ImmutableList.of(testPath("missingFile")), ImmutableList.of(testPath("copyTestFile"))); }
public Domain union(Domain other) { checkCompatibility(other); return new Domain(values.union(other.getValues()), this.isNullAllowed() || other.isNullAllowed()); }
@Test public void testUnion() { assertUnion(Domain.all(BIGINT), Domain.all(BIGINT), Domain.all(BIGINT)); assertUnion(Domain.none(BIGINT), Domain.none(BIGINT), Domain.none(BIGINT)); assertUnion(Domain.all(BIGINT), Domain.none(BIGINT), Domain.all(BIGINT)); assertUnion(Domain.notNull(BIGINT), Domain.onlyNull(BIGINT), Domain.all(BIGINT)); assertUnion(Domain.singleValue(BIGINT, 0L), Domain.all(BIGINT), Domain.all(BIGINT)); assertUnion(Domain.singleValue(BIGINT, 0L), Domain.notNull(BIGINT), Domain.notNull(BIGINT)); assertUnion(Domain.singleValue(BIGINT, 0L), Domain.onlyNull(BIGINT), Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 0L)), true)); assertUnion(Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L)), true), Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 2L)), true), Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), true)); assertUnion(Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L)), true), Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), false), Domain.create(ValueSet.ofRanges(Range.equal(BIGINT, 1L), Range.equal(BIGINT, 2L)), true)); }
@Override void execute() throws HiveMetaException { // Need to confirm unless it's a dry run or specified -yes if (!schemaTool.isDryRun() && !this.yes) { boolean confirmed = promptToConfirm(); if (!confirmed) { System.out.println("Operation cancelled, exiting."); return; } } Connection conn = schemaTool.getConnectionToMetastore(true); try { try (Statement stmt = conn.createStatement()) { final String def = Warehouse.DEFAULT_DATABASE_NAME; // List databases List<String> databases = new ArrayList<>(); try (ResultSet rs = stmt.executeQuery("SHOW DATABASES")) { while (rs.next()) { databases.add(rs.getString(1)); } } // Drop databases for (String database : databases) { // Don't try to drop 'default' database as it's not allowed if (!def.equalsIgnoreCase(database)) { if (schemaTool.isDryRun()) { System.out.println("would drop database " + database); } else { logIfVerbose("dropping database " + database); stmt.execute(String.format("DROP DATABASE `%s` CASCADE", database)); } } } // List tables in 'default' database List<String> tables = new ArrayList<>(); try (ResultSet rs = stmt.executeQuery(String.format("SHOW TABLES IN `%s`", def))) { while (rs.next()) { tables.add(rs.getString(1)); } } // Drop tables in 'default' database for (String table : tables) { if (schemaTool.isDryRun()) { System.out.println("would drop table " + table); } else { logIfVerbose("dropping table " + table); stmt.execute(String.format("DROP TABLE `%s`.`%s`", def, table)); } } } } catch (SQLException se) { throw new HiveMetaException("Failed to drop databases.", se); } }
@Test public void testExecutePromptNo() throws Exception { setUpTwoDatabases(); mockPromptWith("n"); uut.execute(); Mockito.verify(stmtMock, times(0)).execute(anyString()); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void instanceOfExpressionFunction() { String inputExpression = "duration instance of function"; BaseNode instanceOfBase = parse( inputExpression ); assertThat( instanceOfBase).isInstanceOf(InstanceOfNode.class); assertThat( instanceOfBase.getText()).isEqualTo(inputExpression); assertThat( instanceOfBase.getResultType()).isEqualTo(BuiltInType.BOOLEAN); InstanceOfNode ioExpr = (InstanceOfNode) instanceOfBase; assertThat( ioExpr.getExpression()).isInstanceOf(NameRefNode.class); assertThat( ioExpr.getExpression().getText()).isEqualTo("duration"); assertThat( ioExpr.getType()).isInstanceOf(TypeNode.class); assertThat( ioExpr.getType().getText()).isEqualTo("function"); }
@Nonnull @Override public Optional<? extends PseudorandomNumberGenerator> parse( @Nullable String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } if (str.toUpperCase().contains("SHA1")) { return Optional.of( new SHA(PseudorandomNumberGenerator.class, new SHA(detectionLocation))); } return switch (str.toUpperCase().trim()) { case "NATIVEPRNG", "DRBG", "NATIVEPRNGBLOCKING", "NATIVEPRNGNONBLOCKING", "WINDOWS-PRNG" -> Optional.empty(); // todo default -> Optional.empty(); }; }
@Test void base() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaPRNGMapper jcaPRNGMapper = new JcaPRNGMapper(); Optional<? extends PseudorandomNumberGenerator> prngOptional = jcaPRNGMapper.parse("NativePRNGBlocking", testDetectionLocation); assertThat(prngOptional).isPresent(); assertThat(prngOptional.get()).isInstanceOf(PseudorandomNumberGenerator.class); assertThat(prngOptional.get().getName()).isEqualTo("PRNG"); assertThat(prngOptional.get().hasChildren()).isFalse(); }
@Override public <T> void storeObject( String accountName, ObjectType objectType, String objectKey, T obj, String filename, boolean isAnUpdate) { if (objectType.equals(ObjectType.CANARY_RESULT_ARCHIVE)) { var draftRecord = new SqlCanaryArchive(); draftRecord.setId(objectKey); draftRecord.setContent(mapToJson(obj, objectType)); draftRecord.setCreatedAt(Instant.now()); draftRecord.setUpdatedAt(Instant.now()); sqlCanaryArchiveRepo.save(draftRecord); return; } if (objectType.equals(ObjectType.CANARY_CONFIG)) { var draftRecord = new SqlCanaryConfig(); draftRecord.setId(objectKey); draftRecord.setContent(mapToJson(obj, objectType)); draftRecord.setCreatedAt(Instant.now()); draftRecord.setUpdatedAt(Instant.now()); sqlCanaryConfigRepo.save(draftRecord); return; } if (objectType.equals(ObjectType.METRIC_SET_PAIR_LIST)) { var draftRecord = new SqlMetricSetPairs(); draftRecord.setId(objectKey); draftRecord.setContent(mapToJson(obj, objectType)); draftRecord.setCreatedAt(Instant.now()); draftRecord.setUpdatedAt(Instant.now()); sqlMetricSetPairsRepo.save(draftRecord); return; } if (objectType.equals(ObjectType.METRIC_SET_LIST)) { var draftRecord = new SqlMetricSets(); draftRecord.setId(objectKey); draftRecord.setContent(mapToJson(obj, objectType)); draftRecord.setCreatedAt(Instant.now()); draftRecord.setUpdatedAt(Instant.now()); sqlMetricSetsRepo.save(draftRecord); return; } throw new IllegalArgumentException("Unsupported object type: " + objectType); }
@Test public void testStoreObjectWhenCanaryConfig() { var testAccountName = UUID.randomUUID().toString(); var testObjectType = ObjectType.CANARY_CONFIG; var testObjectKey = UUID.randomUUID().toString(); var testCanaryConfig = createTestCanaryConfig(); sqlStorageService.storeObject(testAccountName, testObjectType, testObjectKey, testCanaryConfig); verify(sqlCanaryConfigRepo).save(any(SqlCanaryConfig.class)); }
public static org.apache.hadoop.mapred.JobID fromYarn(JobId id) { String identifier = fromClusterTimeStamp(id.getAppId().getClusterTimestamp()); return new org.apache.hadoop.mapred.JobID(identifier, id.getId()); }
@Test public void testFromYarn() throws Exception { int appStartTime = 612354; int appFinishTime = 612355; YarnApplicationState state = YarnApplicationState.RUNNING; ApplicationId applicationId = ApplicationId.newInstance(0, 0); ApplicationReport applicationReport = Records .newRecord(ApplicationReport.class); applicationReport.setApplicationId(applicationId); applicationReport.setYarnApplicationState(state); applicationReport.setStartTime(appStartTime); applicationReport.setFinishTime(appFinishTime); applicationReport.setUser("TestTypeConverter-user"); applicationReport.setPriority(Priority.newInstance(3)); ApplicationResourceUsageReport appUsageRpt = Records .newRecord(ApplicationResourceUsageReport.class); Resource r = Records.newRecord(Resource.class); r.setMemorySize(2048); appUsageRpt.setNeededResources(r); appUsageRpt.setNumReservedContainers(1); appUsageRpt.setNumUsedContainers(3); appUsageRpt.setReservedResources(r); appUsageRpt.setUsedResources(r); applicationReport.setApplicationResourceUsageReport(appUsageRpt); JobStatus jobStatus = TypeConverter.fromYarn(applicationReport, "dummy-jobfile"); assertEquals(appStartTime, jobStatus.getStartTime()); assertEquals(appFinishTime, jobStatus.getFinishTime()); assertEquals(state.toString(), jobStatus.getState().toString()); assertEquals(JobPriority.NORMAL, jobStatus.getPriority()); }
@Override public void close() { httpClient.dispatcher().executorService().shutdown(); httpClient.connectionPool().evictAll(); }
@Test public void testStatementReuse() throws Exception { try (Connection connection = createConnection("blackhole", "blackhole")) { try (Statement statement = connection.createStatement()) { // update statement assertFalse(statement.execute("INSERT INTO test_table VALUES (1), (2)")); assertNull(statement.getResultSet()); assertEquals(statement.getUpdateCount(), 2); assertEquals(statement.getLargeUpdateCount(), 2); // query statement assertTrue(statement.execute("SELECT 123 x, 'foo' y, CAST(NULL AS bigint) z")); ResultSet resultSet = statement.getResultSet(); assertNotNull(resultSet); assertEquals(statement.getUpdateCount(), -1); assertEquals(statement.getLargeUpdateCount(), -1); resultSet.close(); // update statement assertFalse(statement.execute("INSERT INTO test_table VALUES (1), (2), (3)")); assertNull(statement.getResultSet()); assertEquals(statement.getUpdateCount(), 3); assertEquals(statement.getLargeUpdateCount(), 3); } } }
public List<SourceAndTarget> clusterPairs() { List<SourceAndTarget> pairs = new ArrayList<>(); Set<String> clusters = clusters(); Map<String, String> originalStrings = originalsStrings(); boolean globalHeartbeatsEnabled = MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED_DEFAULT; if (originalStrings.containsKey(MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)) { globalHeartbeatsEnabled = Boolean.parseBoolean(originalStrings.get(MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)); } for (String source : clusters) { for (String target : clusters) { if (!source.equals(target)) { String clusterPairConfigPrefix = source + "->" + target + "."; boolean clusterPairEnabled = Boolean.parseBoolean(originalStrings.get(clusterPairConfigPrefix + "enabled")); boolean clusterPairHeartbeatsEnabled = globalHeartbeatsEnabled; if (originalStrings.containsKey(clusterPairConfigPrefix + MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)) { clusterPairHeartbeatsEnabled = Boolean.parseBoolean(originalStrings.get(clusterPairConfigPrefix + MirrorHeartbeatConfig.EMIT_HEARTBEATS_ENABLED)); } // By default, all source->target Herder combinations are created even if `x->y.enabled=false` // Unless `emit.heartbeats.enabled=false` or `x->y.emit.heartbeats.enabled=false` // Reason for this behavior: for a given replication flow A->B with heartbeats, 2 herders are required : // B->A for the MirrorHeartbeatConnector (emits heartbeats into A for monitoring replication health) // A->B for the MirrorSourceConnector (actual replication flow) if (clusterPairEnabled || clusterPairHeartbeatsEnabled) { pairs.add(new SourceAndTarget(source, target)); } } } } return pairs; }
@Test public void testEmptyClusterPairsWithGloballyDisabledHeartbeats() { MirrorMakerConfig mirrorConfig = new MirrorMakerConfig(makeProps( "clusters", "a, b, c", "emit.heartbeats.enabled", "false")); assertEquals(0, mirrorConfig.clusterPairs().size(), "clusterPairs count should be 0"); }
@Override public TimeValue getRetryInterval(HttpResponse response, int execCount, HttpContext context) { // a server may send a 429 / 503 with a Retry-After header // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After Header header = response.getFirstHeader(HttpHeaders.RETRY_AFTER); TimeValue retryAfter = null; if (header != null) { String value = header.getValue(); try { retryAfter = TimeValue.ofSeconds(Long.parseLong(value)); } catch (NumberFormatException ignore) { Instant retryAfterDate = DateUtils.parseStandardDate(value); if (retryAfterDate != null) { retryAfter = TimeValue.ofMilliseconds(retryAfterDate.toEpochMilli() - System.currentTimeMillis()); } } if (TimeValue.isPositive(retryAfter)) { return retryAfter; } } int delayMillis = 1000 * (int) Math.min(Math.pow(2.0, (long) execCount - 1.0), 64.0); int jitter = ThreadLocalRandom.current().nextInt(Math.max(1, (int) (delayMillis * 0.1))); return TimeValue.ofMilliseconds(delayMillis + jitter); }
@Test public void exponentialRetry() { HttpRequestRetryStrategy strategy = new ExponentialHttpRequestRetryStrategy(10); BasicHttpResponse response = new BasicHttpResponse(503, "Oopsie"); // note that the upper limit includes ~10% variability assertThat(strategy.getRetryInterval(response, 0, null).toMilliseconds()).isEqualTo(0); assertThat(strategy.getRetryInterval(response, 1, null).toMilliseconds()) .isBetween(1000L, 2000L); assertThat(strategy.getRetryInterval(response, 2, null).toMilliseconds()) .isBetween(2000L, 3000L); assertThat(strategy.getRetryInterval(response, 3, null).toMilliseconds()) .isBetween(4000L, 5000L); assertThat(strategy.getRetryInterval(response, 4, null).toMilliseconds()) .isBetween(8000L, 9000L); assertThat(strategy.getRetryInterval(response, 5, null).toMilliseconds()) .isBetween(16000L, 18000L); assertThat(strategy.getRetryInterval(response, 6, null).toMilliseconds()) .isBetween(32000L, 36000L); assertThat(strategy.getRetryInterval(response, 7, null).toMilliseconds()) .isBetween(64000L, 72000L); assertThat(strategy.getRetryInterval(response, 10, null).toMilliseconds()) .isBetween(64000L, 72000L); }
@Override public boolean isReleased() { return isReleased; }
@TestTemplate void testIsReleasedChecksParent() { PipelinedSubpartition subpartition = mock(PipelinedSubpartition.class); PipelinedSubpartitionView reader = new PipelinedSubpartitionView(subpartition, mock(BufferAvailabilityListener.class)); assertThat(reader.isReleased()).isFalse(); verify(subpartition, times(1)).isReleased(); when(subpartition.isReleased()).thenReturn(true); assertThat(reader.isReleased()).isTrue(); verify(subpartition, times(2)).isReleased(); }
@Override public MergedResult decorate(final QueryResult queryResult, final SQLStatementContext sqlStatementContext, final EncryptRule rule) { return new EncryptMergedResult(database, encryptRule, selectStatementContext, new TransparentMergedResult(queryResult)); }
@Test void assertDecorateQueryResult() throws SQLException { QueryResult queryResult = mock(QueryResult.class); when(queryResult.next()).thenReturn(true); EncryptDQLResultDecorator decorator = new EncryptDQLResultDecorator(mock(ShardingSphereDatabase.class), mock(EncryptRule.class), mock(SelectStatementContext.class)); MergedResult actual = decorator.decorate(queryResult, mock(SQLStatementContext.class), mock(EncryptRule.class)); assertTrue(actual.next()); }
public static int validateValidHeaderValue(CharSequence value) { int length = value.length(); if (length == 0) { return -1; } if (value instanceof AsciiString) { return verifyValidHeaderValueAsciiString((AsciiString) value); } return verifyValidHeaderValueCharSequence(value); }
@Test void headerValuesCannotEndWithNewlinesAsciiString() { assertEquals(1, validateValidHeaderValue(AsciiString.of("a\n"))); assertEquals(1, validateValidHeaderValue(AsciiString.of("a\r"))); }
public int getMaxSamples() { return samples.length; }
@Test public void testGetMaxSamples() { UnweightedDoubleReservoirSample reservoir = new UnweightedDoubleReservoirSample(200); assertEquals(reservoir.getMaxSamples(), 200); assertEquals(reservoir.getTotalPopulationCount(), 0); }
public JdbcUrl parse(final String jdbcUrl) { Matcher matcher = CONNECTION_URL_PATTERN.matcher(jdbcUrl); ShardingSpherePreconditions.checkState(matcher.matches(), () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%"))); String authority = matcher.group(AUTHORITY_GROUP_KEY); ShardingSpherePreconditions.checkNotNull(authority, () -> new UnrecognizedDatabaseURLException(jdbcUrl, CONNECTION_URL_PATTERN.pattern().replaceAll("%", "%%"))); return new JdbcUrl(parseHostname(authority), parsePort(authority), matcher.group(PATH_GROUP_KEY), parseQueryProperties(matcher.group(QUERY_GROUP_KEY))); }
@Test void assertParseSimpleJdbcUrl() { JdbcUrl actual = new StandardJdbcUrlParser().parse("jdbc:mock://127.0.0.1/"); assertThat(actual.getHostname(), is("127.0.0.1")); assertThat(actual.getPort(), is(-1)); assertThat(actual.getDatabase(), is("")); assertTrue(actual.getQueryProperties().isEmpty()); }
public String characterEncoding() { return characterEncoding; }
@Test void shouldReturnCharacterEncodingWhenSpecifiedNonCharType() throws Exception { final String testXmlString = "<types>" + " <type name=\"testTypeCharacterEncodingNonChar\" primitiveType=\"uint8\" " + "characterEncoding=\" windows-1251\n\r\"/>" + "</types>"; final Map<String, Type> map = parseTestXmlWithMap("/types/type", testXmlString); assertThat( (((EncodedDataType)map.get("testTypeCharacterEncodingNonChar")).characterEncoding()), is("windows-1251")); }
@Override protected Result[] run(String value) { final Map<String, Object> extractedJson; try { extractedJson = extractJson(value); } catch (IOException e) { throw new ExtractorException(e); } final List<Result> results = new ArrayList<>(extractedJson.size()); for (Map.Entry<String, Object> entry : extractedJson.entrySet()) { results.add(new Result(entry.getValue(), entry.getKey(), -1, -1)); } return results.toArray(new Result[results.size()]); }
@Test public void testRunWithEmptyInput() throws Exception { assertThat(jsonExtractor.run("")).isEmpty(); }
@VisibleForTesting static void checkProviderAndReceiverConsistency( Map<String, DelegationTokenProvider> providers, Map<String, DelegationTokenReceiver> receivers) { LOG.info("Checking provider and receiver instances consistency"); if (providers.size() != receivers.size()) { Set<String> missingReceiverServiceNames = new HashSet<>(providers.keySet()); missingReceiverServiceNames.removeAll(receivers.keySet()); if (!missingReceiverServiceNames.isEmpty()) { throw new IllegalStateException( PROVIDER_RECEIVER_INCONSISTENCY_ERROR + " Missing receivers: " + String.join(",", missingReceiverServiceNames)); } Set<String> missingProviderServiceNames = new HashSet<>(receivers.keySet()); missingProviderServiceNames.removeAll(providers.keySet()); if (!missingProviderServiceNames.isEmpty()) { throw new IllegalStateException( PROVIDER_RECEIVER_INCONSISTENCY_ERROR + " Missing providers: " + String.join(",", missingProviderServiceNames)); } } LOG.info("Provider and receiver instances are consistent"); }
@Test public void checkProviderAndReceiverConsistencyShouldNotThrowWhenNothingLoaded() { DefaultDelegationTokenManager.checkProviderAndReceiverConsistency( Collections.emptyMap(), Collections.emptyMap()); }
@GET @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 }) public HistoryInfo get() { return getHistoryInfo(); }
@Test public void testInfoDefault() throws JSONException, Exception { WebResource r = resource(); ClientResponse response = r.path("ws").path("v1").path("history") .path("info/").get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); JSONObject json = response.getEntity(JSONObject.class); assertEquals("incorrect number of elements", 1, json.length()); verifyHSInfo(json.getJSONObject("historyInfo"), appContext); }
@Override protected void route(List<SendingMailbox> destinations, TransferableBlock block) throws Exception { for (SendingMailbox mailbox : destinations) { sendBlock(mailbox, block); } }
@Test public void shouldBroadcast() throws Exception { // Given: ImmutableList<SendingMailbox> destinations = ImmutableList.of(_mailbox1, _mailbox2); // When: new BroadcastExchange(destinations, TransferableBlockUtils::splitBlock).route(destinations, _block); ArgumentCaptor<TransferableBlock> captor = ArgumentCaptor.forClass(TransferableBlock.class); Mockito.verify(_mailbox1, Mockito.times(1)).send(captor.capture()); Assert.assertEquals(captor.getValue(), _block); Mockito.verify(_mailbox2, Mockito.times(1)).send(captor.capture()); Assert.assertEquals(captor.getValue(), _block); }
@Override public HashSlotCursor16byteKey cursor() { return new CursorLongKey2(); }
@Test public void testCursor_key2() { final long key1 = randomKey(); final long key2 = randomKey(); insert(key1, key2); HashSlotCursor16byteKey cursor = hsa.cursor(); cursor.advance(); assertEquals(key2, cursor.key2()); }
@Nonnull public static <T> Traverser<T> traverseEnumeration(@Nonnull Enumeration<T> enumeration) { return () -> enumeration.hasMoreElements() ? requireNonNull(enumeration.nextElement(), "Enumeration contains a null element") : null; }
@Test(expected = NullPointerException.class) public void when_traverseEnumerationWithNull_then_failure() { Traverser<Integer> trav = traverseEnumeration(new Vector<>(asList(1, null)).elements()); trav.next(); trav.next(); }
@VisibleForTesting void setTransMetaFileNaming( RepositoryDirectoryInterface repdir, String directory, DatabaseMeta sourceDbInfo, DatabaseMeta targetDbInfo, String[] tables, int i, TransMeta transMeta ) { String transname = BaseMessages.getString( PKG, "Spoon.RipDB.Monitor.Transname1" ) + sourceDbInfo + "].[" + tables[i] + BaseMessages.getString( PKG, "Spoon.RipDB.Monitor.Transname2" ) + targetDbInfo + "]"; if ( repdir != null ) { transMeta.setRepositoryDirectory( repdir ); transMeta.setName( transname ); } else { transMeta.setFilename( Const.createFilename( directory, transname, "." + Const.STRING_TRANS_DEFAULT_EXT ) ); } }
@Test public void setTransMetaFileNamingWithRepTest() { RepositoryDirectoryInterface repDirMock = mock( RepositoryDirectoryInterface.class ); String directory = "directory"; DatabaseMeta sourceDataBaseMetaMock = mock( DatabaseMeta.class ); DatabaseMeta targetDataBaseMetaMock = mock( DatabaseMeta.class ); String[] tables = { "table1", "table2", "table3" }; int index = 1; TransMeta transMeta = new TransMeta(); doCallRealMethod().when( delegate ).setTransMetaFileNaming( repDirMock, directory, sourceDataBaseMetaMock, targetDataBaseMetaMock, tables, index, transMeta ); delegate.setTransMetaFileNaming( repDirMock, directory, sourceDataBaseMetaMock, targetDataBaseMetaMock, tables, index, transMeta ); String transname = "copy [" + sourceDataBaseMetaMock + "].[" + "table2" + "] to [" + targetDataBaseMetaMock + "]"; assertEquals( repDirMock, transMeta.getRepositoryDirectory() ); assertEquals( transname, transMeta.getName() ); assertNull( transMeta.getFilename() ); }
@Override public int compareTo(Uuid other) { if (mostSignificantBits > other.mostSignificantBits) { return 1; } else if (mostSignificantBits < other.mostSignificantBits) { return -1; } else if (leastSignificantBits > other.leastSignificantBits) { return 1; } else if (leastSignificantBits < other.leastSignificantBits) { return -1; } else { return 0; } }
@Test public void testCompareUuids() { Uuid id00 = new Uuid(0L, 0L); Uuid id01 = new Uuid(0L, 1L); Uuid id10 = new Uuid(1L, 0L); assertEquals(0, id00.compareTo(id00)); assertEquals(0, id01.compareTo(id01)); assertEquals(0, id10.compareTo(id10)); assertEquals(-1, id00.compareTo(id01)); assertEquals(-1, id00.compareTo(id10)); assertEquals(1, id01.compareTo(id00)); assertEquals(1, id10.compareTo(id00)); assertEquals(-1, id01.compareTo(id10)); assertEquals(1, id10.compareTo(id01)); }