focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String validateColumnName(@Nullable String columnName) { String name = requireNonNull(columnName, "Column name cannot be null"); checkDbIdentifierCharacters(columnName, "Column name"); return name; }
@Test public void fail_when_column_name_contains_invalid_character() { assertThatThrownBy(() -> validateColumnName("date-in/ms")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Column name must be lower case and contain only alphanumeric chars or '_', got 'date-in/ms'"); }
@Override public NacosGrpcProtocolNegotiator build() { Properties properties = EnvUtil.getProperties(); RpcServerTlsConfig config = RpcServerTlsConfigFactory.getInstance().createSdkConfig(properties); if (config.getEnableTls()) { SslContext sslContext = DefaultTlsContextBuilder.getSslContext(config); return new OptionalTlsProtocolNegotiator(sslContext, config); } return null; }
@Test void testBuildDisabled() { assertNull(builder.build()); }
@Override public RedisClusterNode clusterGetNodeForKey(byte[] key) { int slot = executorService.getConnectionManager().calcSlot(key); return clusterGetNodeForSlot(slot); }
@Test public void testClusterGetNodeForKey() { RedisClusterNode node = connection.clusterGetNodeForKey("123".getBytes()); assertThat(node).isNotNull(); }
@Override public CircuitBreaker circuitBreaker(String name) { return circuitBreaker(name, getDefaultConfig()); }
@Test public void testCreateCircuitBreakerWithConfigNameNotFound() { CircuitBreakerRegistry circuitBreakerRegistry = CircuitBreakerRegistry.ofDefaults(); assertThatThrownBy(() -> circuitBreakerRegistry.circuitBreaker("circuitBreaker", "testConfig")).isInstanceOf(ConfigurationNotFoundException.class); }
@Override public Object[] toArray() { List<Object> result = new ArrayList<>(); for (M member : members) { if (selector.select(member)) { result.add(member); } } return result.toArray(new Object[0]); }
@Test public void testToArrayWhenLiteMembersSelectedAndNoLocalMember() { Collection<MemberImpl> collection = new MemberSelectingCollection<>(members, and(LITE_MEMBER_SELECTOR, NON_LOCAL_MEMBER_SELECTOR)); Object[] array = collection.toArray(); assertArray(collection, array); }
@Override public Batch toBatch() { return new SparkBatch( sparkContext, table, readConf, groupingKeyType(), taskGroups(), expectedSchema, hashCode()); }
@Test public void testUnpartitionedAnd() throws Exception { createUnpartitionedTable(spark, tableName); SparkScanBuilder builder = scanBuilder(); YearsFunction.TimestampToYearsFunction tsToYears = new YearsFunction.TimestampToYearsFunction(); UserDefinedScalarFunc udf1 = toUDF(tsToYears, expressions(fieldRef("ts"))); Predicate predicate1 = new Predicate("=", expressions(udf1, intLit(2017 - 1970))); BucketFunction.BucketLong bucketLong = new BucketFunction.BucketLong(DataTypes.LongType); UserDefinedScalarFunc udf = toUDF(bucketLong, expressions(intLit(5), fieldRef("id"))); Predicate predicate2 = new Predicate(">=", expressions(udf, intLit(2))); Predicate predicate = new And(predicate1, predicate2); pushFilters(builder, predicate); Batch scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); // NOT (years(ts) = 47 AND bucket(id, 5) >= 2) builder = scanBuilder(); predicate = new Not(predicate); pushFilters(builder, predicate); scan = builder.build().toBatch(); assertThat(scan.planInputPartitions().length).isEqualTo(10); }
@Override public boolean equals(@Nullable Object object) { if (object instanceof DirtyState) { DirtyState dirtyState = (DirtyState) object; return Objects.equals(dirty.get(), dirtyState.dirty.get()); } return false; }
@Test public void testEquals() { DirtyState dirtyState = new DirtyState(); DirtyState equal = new DirtyState(); Assert.assertEquals(dirtyState, equal); Assert.assertEquals(dirtyState.hashCode(), equal.hashCode()); }
private void readObject(ObjectInputStream in) throws IOException { // Don't permit directly deserializing this class; writeReplace() should have written a // replacement throw new InvalidObjectException("Deserialization is unsupported"); }
@Test public void testJavaSerialization() throws IOException, ClassNotFoundException { ByteArrayOutputStream out = new ByteArrayOutputStream(); ObjectOutputStream objOut = new ObjectOutputStream(out); objOut.writeObject(new LazilyParsedNumber("123")); objOut.close(); ObjectInputStream objIn = new ObjectInputStream(new ByteArrayInputStream(out.toByteArray())); Number deserialized = (Number) objIn.readObject(); assertThat(deserialized).isEqualTo(new BigDecimal("123")); }
public static AwsClientFactory from(Map<String, String> properties) { String factoryImpl = PropertyUtil.propertyAsString( properties, AwsProperties.CLIENT_FACTORY, DefaultAwsClientFactory.class.getName()); return loadClientFactory(factoryImpl, properties); }
@Test public void testDefaultAwsClientFactorySerializable() throws IOException { Map<String, String> properties = Maps.newHashMap(); AwsClientFactory defaultAwsClientFactory = AwsClientFactories.from(properties); AwsClientFactory roundTripResult = TestHelpers.KryoHelpers.roundTripSerialize(defaultAwsClientFactory); assertThat(roundTripResult).isInstanceOf(AwsClientFactories.DefaultAwsClientFactory.class); byte[] serializedFactoryBytes = SerializationUtil.serializeToBytes(defaultAwsClientFactory); AwsClientFactory deserializedClientFactory = SerializationUtil.deserializeFromBytes(serializedFactoryBytes); assertThat(deserializedClientFactory) .isInstanceOf(AwsClientFactories.DefaultAwsClientFactory.class); }
public static QueryOptimizer newOptimizer(HazelcastProperties properties) { HazelcastProperty property = ClusterProperty.QUERY_OPTIMIZER_TYPE; String string = properties.getString(property); Type type; try { type = Type.valueOf(string); } catch (IllegalArgumentException e) { throw onInvalidOptimizerType(string); } switch (type) { case RULES: return new RuleBasedQueryOptimizer(); default: return new EmptyOptimizer(); } }
@Test public void newOptimizer_whenPropertyContainsNone_thenCreateEmptyOptimizer() { HazelcastProperties hazelcastProperties = createMockHazelcastProperties(QUERY_OPTIMIZER_TYPE, "NONE"); QueryOptimizer queryOptimizer = QueryOptimizerFactory.newOptimizer(hazelcastProperties); assertThat(queryOptimizer).isInstanceOf(EmptyOptimizer.class); }
public static int checkPositiveOrZero(int n, String name) { if (n < 0) { throw new IllegalArgumentException(name + ": " + n + " (expected: >= 0)"); } return n; }
@Test public void checkPositiveOrZeroMustPassIfArgumentIsGreaterThanZero() { final int n = 1; final int actual = RangeUtil.checkPositiveOrZero(n, "var"); assertThat(actual, is(equalTo(n))); }
public <T> List<T> newPlugins(List<String> klassNames, AbstractConfig config, Class<T> pluginKlass) { List<T> plugins = new ArrayList<>(); if (klassNames != null) { for (String klassName : klassNames) { plugins.add(newPlugin(klassName, config, pluginKlass)); } } return plugins; }
@Test public void shouldInstantiateAndConfigureConnectRestExtension() { props.clear(); props.put(RestServerConfig.REST_EXTENSION_CLASSES_CONFIG, TestConnectRestExtension.class.getName()); config = RestServerConfig.forPublic(null, props); List<ConnectRestExtension> connectRestExtensions = plugins.newPlugins(config.getList(RestServerConfig.REST_EXTENSION_CLASSES_CONFIG), config, ConnectRestExtension.class); assertNotNull(connectRestExtensions); assertEquals(1, connectRestExtensions.size(), "One Rest Extension expected"); assertNotNull(connectRestExtensions.get(0)); assertTrue(connectRestExtensions.get(0) instanceof TestConnectRestExtension, "Should be instance of TestConnectRestExtension"); assertNotNull(((TestConnectRestExtension) connectRestExtensions.get(0)).configs); assertEquals(config.originals(), ((TestConnectRestExtension) connectRestExtensions.get(0)).configs); }
@VisibleForTesting static TupleDomain<Subfield> canonicalizeDomainPredicate(TupleDomain<Subfield> domainPredicate, Map<String, HiveColumnHandle> predicateColumns, PlanCanonicalizationStrategy strategy) { if (strategy == PlanCanonicalizationStrategy.DEFAULT) { return domainPredicate.canonicalize(ignored -> false); } return domainPredicate .transform(subfield -> { if (!subfield.getPath().isEmpty() || !predicateColumns.containsKey(subfield.getRootName())) { return subfield; } return isPartitionKey(predicateColumns.get(subfield.getRootName())) || strategy.equals(PlanCanonicalizationStrategy.IGNORE_SCAN_CONSTANTS) ? null : subfield; }) .canonicalize(ignored -> false); }
@Test public void testCanonicalizeDomain() { Map<String, HiveColumnHandle> predicateColumns = ImmutableMap.of( "ds", getColumnHandle("ds", true), "col", getColumnHandle("col", false)); TupleDomain<Subfield> domain = TupleDomain.withColumnDomains(ImmutableMap.of( new Subfield("ds"), singleValue(VARCHAR, utf8Slice("2022-01-01")), new Subfield("col"), singleValue(VARCHAR, utf8Slice("id")))); TupleDomain<Subfield> newDomain = canonicalizeDomainPredicate(domain, predicateColumns, CONNECTOR); assertTrue(newDomain.getDomains().isPresent()); assertEquals(newDomain.getDomains().get().size(), 1); assertEquals(newDomain.getDomains().get().get(new Subfield("col")), singleValue(VARCHAR, utf8Slice("id"))); }
private boolean processBackgroundEvents() { AtomicReference<KafkaException> firstError = new AtomicReference<>(); LinkedList<BackgroundEvent> events = new LinkedList<>(); backgroundEventQueue.drainTo(events); for (BackgroundEvent event : events) { try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent<?>) event); backgroundEventProcessor.process(event); } catch (Throwable t) { KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); if (!firstError.compareAndSet(null, e)) log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } backgroundEventReaper.reap(time.milliseconds()); if (firstError.get() != null) throw firstError.get(); return !events.isEmpty(); }
@Test public void testProcessBackgroundEventsTimesOut() throws Exception { consumer = newConsumer(); Timer timer = time.timer(1000); CompletableFuture<?> future = mock(CompletableFuture.class); doAnswer(invocation -> { long timeout = invocation.getArgument(0, Long.class); timer.sleep(timeout); throw new java.util.concurrent.TimeoutException("Intentional timeout"); }).when(future).get(any(Long.class), any(TimeUnit.class)); assertThrows(TimeoutException.class, () -> consumer.processBackgroundEvents(future, timer)); // Because we forced our mocked future to continuously time out, we should have no time remaining. assertEquals(0, timer.remainingMs()); }
@ConstantFunction(name = "quarters_sub", argTypes = {DATETIME, INT}, returnType = DATETIME, isMonotonic = true) public static ConstantOperator quartersSub(ConstantOperator date, ConstantOperator quarter) { return ConstantOperator.createDatetimeOrNull( date.getDatetime().minus(quarter.getInt(), IsoFields.QUARTER_YEARS)); }
@Test public void quartersSub() { assertEquals("2014-12-23T09:23:55", ScalarOperatorFunctions.quartersSub(O_DT_20150323_092355, O_INT_1).getDatetime().toString()); }
@Override public <T> UncommittedBundle<T> createBundle(PCollection<T> output) { if (Enforcement.IMMUTABILITY.appliesTo(output, graph)) { return new ImmutabilityEnforcingBundle<>(underlying.createBundle(output)); } return underlying.createBundle(output); }
@Test public void mutationBeforeAddCreateBundleSucceeds() { UncommittedBundle<byte[]> intermediate = factory.createBundle(transformed); byte[] array = new byte[] {4, 8, 12}; WindowedValue<byte[]> windowedArray = WindowedValue.of( array, new Instant(891L), new IntervalWindow(new Instant(0), new Instant(1000)), PaneInfo.ON_TIME_AND_ONLY_FIRING); array[2] = -3; intermediate.add(windowedArray); CommittedBundle<byte[]> committed = intermediate.commit(Instant.now()); assertThat(committed.getElements(), containsInAnyOrder(windowedArray)); }
@Override public String convertTo(SearchVersion value) { return value.encode(); }
@Test void testConvertToString() { final String converted = converter.convertTo(SearchVersion.create(SearchVersion.Distribution.OPENSEARCH, Version.parse("1.2.0"))); assertThat(converted).isEqualTo("OPENSEARCH:1.2.0"); }
synchronized void ensureTokenInitialized() throws IOException { // we haven't inited yet, or we used to have a token but it expired if (!hasInitedToken || (action != null && !action.isValid())) { //since we don't already have a token, go get one Token<?> token = fs.getDelegationToken(null); // security might be disabled if (token != null) { fs.setDelegationToken(token); addRenewAction(fs); LOG.debug("Created new DT for {}", token.getService()); } hasInitedToken = true; } }
@Test public void testGetRemoteTokenFailure() throws IOException, URISyntaxException { Configuration conf = new Configuration(); DummyFs fs = spy(new DummyFs()); IOException e = new IOException(); doThrow(e).when(fs).getDelegationToken(anyString()); fs.emulateSecurityEnabled = true; fs.initialize(new URI("dummyfs://127.0.0.1:1234"), conf); try { fs.tokenAspect.ensureTokenInitialized(); } catch (IOException exc) { assertEquals(e, exc); } }
@Override public void accept(MetadataShellState state) { String fullGlob = glob.startsWith("/") ? glob : state.workingDirectory() + "/" + glob; List<String> globComponents = CommandUtils.stripDotPathComponents(CommandUtils.splitPath(fullGlob)); MetadataNode root = state.root(); if (root == null) { throw new RuntimeException("Invalid null root"); } if (!accept(globComponents, 0, root, new String[0])) { handler.accept(Optional.empty()); } }
@Test public void testZGlob() { InfoConsumer consumer = new InfoConsumer(); GlobVisitor visitor = new GlobVisitor("../z*", consumer); visitor.accept(DATA); assertEquals(Optional.of(Arrays.asList( new MetadataNodeInfo(new String[] {"zeta"}, DATA.root().child("zeta")), new MetadataNodeInfo(new String[] {"zzz"}, DATA.root().child("zzz")))), consumer.infos); }
public V merge( final Integer key, final V value, final BiFunction<? super V, ? super V, ? extends V> remappingFunction) { return merge((int)key, value, remappingFunction); }
@Test void mergeThrowsNullPointerExceptionIfValueIsNull() { final int key = -9; final BiFunction<String, String, String> remappingFunction = (v1, v2) -> "NEW"; assertThrowsExactly(NullPointerException.class, () -> cache.merge(key, null, remappingFunction)); }
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) { if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) { throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\""); } LoggerContext rootContext = getRootContext(); logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value)); logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value)); Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey()); boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE; logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled)); return rootContext; }
@Test public void apply_fails_with_IAE_if_process_property_has_unsupported_level() { LogLevelConfig config = newLogLevelConfig().rootLevelFor(WEB_SERVER).build(); props.set("sonar.log.level.web", "ERROR"); assertThatThrownBy(() -> underTest.apply(config, props)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("log level ERROR in property sonar.log.level.web is not a supported value (allowed levels are [TRACE, DEBUG, INFO])"); }
public void flush() throws IOException { InterpreterResultMessageOutput out = getCurrentOutput(); if (out != null) { out.flush(); } }
@Test void testFlush() throws IOException { out.write("hello\nworld"); assertEquals("hello\n", new String(out.getOutputAt(0).toByteArray())); assertEquals(1, numAppendEvent); assertEquals(1, numUpdateEvent); out.flush(); assertEquals("hello\nworld", new String(out.getOutputAt(0).toByteArray())); assertEquals(2, numAppendEvent); assertEquals(1, numUpdateEvent); out.clear(); out.write("%html div"); assertEquals("", new String(out.getOutputAt(0).toByteArray())); assertEquals(InterpreterResult.Type.HTML, out.getOutputAt(0).getType()); out.flush(); assertEquals("div", new String(out.getOutputAt(0).toByteArray())); }
public static Builder builder() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void testIllegalUnicastTypeConstruction() { IpPrefix ip = IpPrefix.valueOf(IP_ADDRESS_1); MappingAddress address = MappingAddresses.ipv4MappingAddress(ip); DefaultMappingTreatment.builder() .withAddress(address) .setUnicastPriority(10) .setUnicastWeight(10) .setUnicastPriority(20) .build(); }
@Override public boolean syncVerifyData(DistroData verifyData, String targetServer) { if (isNoExistTarget(targetServer)) { return true; } // replace target server as self server so that can callback. verifyData.getDistroKey().setTargetServer(memberManager.getSelf().getAddress()); DistroDataRequest request = new DistroDataRequest(verifyData, DataOperation.VERIFY); Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { Loggers.DISTRO .warn("[DISTRO] Cancel distro verify caused by target server {} unhealthy, key: {}", targetServer, verifyData.getDistroKey()); return false; } try { Response response = clusterRpcClientProxy.sendRequest(member, request); return checkResponse(response); } catch (NacosException e) { Loggers.DISTRO.error("[DISTRO-FAILED] Verify distro data failed! key: {} ", verifyData.getDistroKey(), e); } return false; }
@Test void testSyncVerifyDataWithCallbackForMemberNonExist() throws NacosException { DistroData verifyData = new DistroData(); verifyData.setDistroKey(new DistroKey()); transportAgent.syncVerifyData(verifyData, member.getAddress(), distroCallback); verify(distroCallback).onSuccess(); verify(memberManager, never()).find(member.getAddress()); verify(clusterRpcClientProxy, never()).asyncRequest(any(Member.class), any(), any()); }
static QueryId buildId( final Statement statement, final EngineContext engineContext, final QueryIdGenerator idGenerator, final OutputNode outputNode, final boolean createOrReplaceEnabled, final Optional<String> withQueryId) { if (withQueryId.isPresent()) { final String queryId = withQueryId.get().toUpperCase(); validateWithQueryId(queryId); return new QueryId(queryId); } if (statement instanceof CreateTable && ((CreateTable) statement).isSource()) { // Use the CST name as part of the QueryID final String suffix = ((CreateTable) statement).getName().text().toUpperCase() + "_" + idGenerator.getNext().toUpperCase(); return new QueryId(ReservedQueryIdsPrefixes.CST + suffix); } if (!outputNode.getSinkName().isPresent()) { final String prefix = "transient_" + outputNode.getSource().getLeftmostSourceNode().getAlias().text() + "_"; return new QueryId(prefix + Math.abs(ThreadLocalRandom.current().nextLong())); } final KsqlStructuredDataOutputNode structured = (KsqlStructuredDataOutputNode) outputNode; if (!structured.createInto()) { return new QueryId(ReservedQueryIdsPrefixes.INSERT + idGenerator.getNext()); } final SourceName sink = outputNode.getSinkName().get(); final Set<QueryId> queriesForSink = engineContext.getQueryRegistry().getQueriesWithSink(sink); if (queriesForSink.size() > 1) { throw new KsqlException("REPLACE for sink " + sink + " is not supported because there are " + "multiple queries writing into it: " + queriesForSink); } else if (!queriesForSink.isEmpty()) { if (!createOrReplaceEnabled) { final String type = outputNode.getNodeOutputType().getKsqlType().toLowerCase(); throw new UnsupportedOperationException( String.format( "Cannot add %s '%s': A %s with the same name already exists", type, sink.text(), type)); } return Iterables.getOnlyElement(queriesForSink); } final String suffix = outputNode.getId().toString().toUpperCase() + "_" + idGenerator.getNext().toUpperCase(); return new QueryId( outputNode.getNodeOutputType() == DataSourceType.KTABLE ? ReservedQueryIdsPrefixes.CTAS + suffix : ReservedQueryIdsPrefixes.CSAS + suffix ); }
@Test(expected = UnsupportedOperationException.class) public void shouldThrowOnReuseIfCreateOrReplacedIsDisabled() { // Given: when(plan.getSinkName()).thenReturn(Optional.of(SINK)); when(plan.createInto()).thenReturn(true); when(plan.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); when(queryRegistry.getQueriesWithSink(SINK)) .thenReturn(ImmutableSet.of(new QueryId("CTAS_FOO_10"))); // When: QueryIdUtil.buildId(statement, engineContext, idGenerator, plan, false, Optional.empty()); }
static Optional<Catalog> loadCatalog(Configuration conf, String catalogName) { String catalogType = getCatalogType(conf, catalogName); if (NO_CATALOG_TYPE.equalsIgnoreCase(catalogType)) { return Optional.empty(); } else { String name = catalogName == null ? ICEBERG_DEFAULT_CATALOG_NAME : catalogName; return Optional.of(CatalogUtil.buildIcebergCatalog(name, getCatalogProperties(conf, name, catalogType), conf)); } }
@Test public void testLoadCatalogLocation() { Assert.assertFalse(Catalogs.loadCatalog(conf, Catalogs.ICEBERG_HADOOP_TABLE_NAME).isPresent()); }
public ParamsConfig referredParams() { ParamReferenceCollectorFactory paramHandlerFactory = new ParamReferenceCollectorFactory(); new ParamResolver(paramHandlerFactory, FIELD_CACHE).resolve(CLONER.deepClone(this)); ParamsConfig paramsConfig = new ParamsConfig(); for (String param : paramHandlerFactory.referredParams()) { paramsConfig.add(new ParamConfig(param, null)); } return paramsConfig; }
@Test public void shouldUnderstandUsedParams() { PipelineTemplateConfig template = PipelineTemplateConfigMother.createTemplateWithParams("template-name", "foo", "bar", "baz"); ParamsConfig params = template.referredParams(); assertThat(params.size(), is(3)); assertThat(params, hasItem(new ParamConfig("foo", null))); assertThat(params, hasItem(new ParamConfig("bar", null))); assertThat(params, hasItem(new ParamConfig("baz", null))); params = template.referredParams();//should not mutate assertThat(params.size(), is(3)); }
public static String[] getRackHostName(String hostname) { NodeBase node = new NodeBase(hostname); return new String[] {node.getNetworkLocation().substring(1), node.getName()}; }
@Test public void testGetRackHostname() { String str = "/rack1/node1"; String[] rackHostname = SLSUtils.getRackHostName(str); Assert.assertEquals("rack1", rackHostname[0]); Assert.assertEquals("node1", rackHostname[1]); str = "/rackA/rackB/node1"; rackHostname = SLSUtils.getRackHostName(str); Assert.assertEquals("rackA/rackB", rackHostname[0]); Assert.assertEquals("node1", rackHostname[1]); }
@Override // Exposes internal mutable reference by design - Spotbugs is right to warn that this is dangerous public synchronized byte[] toByteArray() { // Note: count == buf.length is not a correct criteria to "return buf;", because the internal // buf may be reused after reset(). if (!isFallback && count > 0) { return buf; } else { return super.toByteArray(); } }
@Test public void testWriteSingleArrayWithLength() { writeToBoth(TEST_DATA, 0, TEST_DATA.length); assertStreamContentsEquals(stream, exposedStream); assertNotSame(TEST_DATA, exposedStream.toByteArray()); }
public static Class<?> getLiteral(String className, String literal) { LiteralAnalyzer analyzer = ANALYZERS.get( className ); Class result = null; if ( analyzer != null ) { analyzer.validate( literal ); result = analyzer.getLiteral(); } return result; }
@Test public void testIntPrimitiveWithLongSuffix() { assertThat( getLiteral( int.class.getCanonicalName(), "156l" ) ).isNull(); assertThat( getLiteral( int.class.getCanonicalName(), "156L" ) ).isNull(); }
@Override public Iterator iterator() { if (entries == null) { return Collections.emptyIterator(); } return new ResultIterator(); }
@Test public void testIterator_whenNotEmpty_IterationType_Value() { List<Map.Entry> entries = new ArrayList<>(); MapEntrySimple entry = new MapEntrySimple("key", "value"); entries.add(entry); ResultSet resultSet = new ResultSet(entries, IterationType.VALUE); Iterator iterator = resultSet.iterator(); assertTrue(iterator.hasNext()); assertEquals("value", iterator.next()); }
public boolean isConnected() { return !isClosed && LogBufferDescriptor.isConnected(logMetaDataBuffer); }
@Test void shouldReportThatPublicationHasBeenConnectedYet() { isConnected(logMetaDataBuffer, true); assertTrue(publication.isConnected()); }
@Override public List<FileMetadata> listFilesWithMetadata(URI fileUri, boolean recursive) throws IOException { File file = toFile(fileUri); if (!recursive) { return Arrays.stream(file.list()).map(s -> getFileMetadata(new File(file, s))).collect(Collectors.toList()); } else { try (Stream<Path> pathStream = Files.walk(Paths.get(fileUri))) { return pathStream.filter(s -> !s.equals(file.toPath())).map(p -> getFileMetadata(p.toFile())) .collect(Collectors.toList()); } } }
@Test public void testListFilesWithMetadata() throws IOException { LocalPinotFS localPinotFS = new LocalPinotFS(); File tempDirPath = new File(_absoluteTmpDirPath, "test-list-files-with-md"); Assert.assertTrue(tempDirPath.mkdir()); // Create a testDir and file underneath directory int count = 5; List<String> expectedNonRecursive = new ArrayList<>(); List<String> expectedRecursive = new ArrayList<>(); for (int i = 0; i < count; i++) { File testDir = new File(tempDirPath, "testDir" + i); Assert.assertTrue(testDir.mkdir()); expectedNonRecursive.add(testDir.getAbsolutePath()); File testFile = new File(testDir, "testFile" + i); Assert.assertTrue(testFile.createNewFile()); expectedRecursive.add(testDir.getAbsolutePath()); expectedRecursive.add(testFile.getAbsolutePath()); } File testDirEmpty = new File(tempDirPath, "testDirEmpty"); Assert.assertTrue(testDirEmpty.mkdir()); expectedNonRecursive.add(testDirEmpty.getAbsolutePath()); expectedRecursive.add(testDirEmpty.getAbsolutePath()); File testRootFile = new File(tempDirPath, "testRootFile"); Assert.assertTrue(testRootFile.createNewFile()); expectedNonRecursive.add(testRootFile.getAbsolutePath()); expectedRecursive.add(testRootFile.getAbsolutePath()); // Assert that recursive list files and nonrecursive list files are as expected String[] files = localPinotFS.listFiles(tempDirPath.toURI(), false); Assert.assertEquals(files.length, count + 2); Assert.assertTrue(expectedNonRecursive.containsAll(Arrays.asList(files)), Arrays.toString(files)); files = localPinotFS.listFiles(tempDirPath.toURI(), true); Assert.assertEquals(files.length, count * 2 + 2); Assert.assertTrue(expectedRecursive.containsAll(Arrays.asList(files)), Arrays.toString(files)); // Assert that recursive list files and nonrecursive list files with file info are as expected List<FileMetadata> fileMetadata = localPinotFS.listFilesWithMetadata(tempDirPath.toURI(), false); Assert.assertEquals(fileMetadata.size(), count + 2); Assert.assertEquals(fileMetadata.stream().filter(FileMetadata::isDirectory).count(), count + 1); Assert.assertEquals(fileMetadata.stream().filter(f -> !f.isDirectory()).count(), 1); Assert.assertTrue(expectedNonRecursive .containsAll(fileMetadata.stream().map(FileMetadata::getFilePath).collect(Collectors.toSet())), fileMetadata.toString()); fileMetadata = localPinotFS.listFilesWithMetadata(tempDirPath.toURI(), true); Assert.assertEquals(fileMetadata.size(), count * 2 + 2); Assert.assertEquals(fileMetadata.stream().filter(FileMetadata::isDirectory).count(), count + 1); Assert.assertEquals(fileMetadata.stream().filter(f -> !f.isDirectory()).count(), count + 1); Assert.assertTrue( expectedRecursive.containsAll(fileMetadata.stream().map(FileMetadata::getFilePath).collect(Collectors.toSet())), fileMetadata.toString()); }
public static LeaveGroupResponse parse(ByteBuffer buffer, short version) { return new LeaveGroupResponse(new LeaveGroupResponseData(new ByteBufferAccessor(buffer), version)); }
@Test public void testParse() { Map<Errors, Integer> expectedErrorCounts = Collections.singletonMap(Errors.NOT_COORDINATOR, 1); LeaveGroupResponseData data = new LeaveGroupResponseData() .setErrorCode(Errors.NOT_COORDINATOR.code()) .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.LEAVE_GROUP.allVersions()) { ByteBuffer buffer = MessageUtil.toByteBuffer(data, version); LeaveGroupResponse leaveGroupResponse = LeaveGroupResponse.parse(buffer, version); assertEquals(expectedErrorCounts, leaveGroupResponse.errorCounts()); if (version >= 1) { assertEquals(throttleTimeMs, leaveGroupResponse.throttleTimeMs()); } else { assertEquals(DEFAULT_THROTTLE_TIME, leaveGroupResponse.throttleTimeMs()); } assertEquals(Errors.NOT_COORDINATOR, leaveGroupResponse.error()); } }
public int wipeWritePermOfBrokerByLock(final String brokerName) { try { try { this.lock.writeLock().lockInterruptibly(); return operateWritePermOfBroker(brokerName, RequestCode.WIPE_WRITE_PERM_OF_BROKER); } finally { this.lock.writeLock().unlock(); } } catch (Exception e) { log.error("wipeWritePermOfBrokerByLock Exception", e); } return 0; }
@Test public void testWipeWritePermOfBrokerByLock() throws Exception { Map<String, QueueData> qdMap = new HashMap<>(); QueueData qd = new QueueData(); qd.setPerm(PermName.PERM_READ | PermName.PERM_WRITE); qd.setBrokerName("broker-a"); qdMap.put("broker-a",qd); HashMap<String, Map<String, QueueData>> topicQueueTable = new HashMap<>(); topicQueueTable.put("topic-a", qdMap); Field filed = RouteInfoManager.class.getDeclaredField("topicQueueTable"); filed.setAccessible(true); filed.set(routeInfoManager, topicQueueTable); int addTopicCnt = routeInfoManager.wipeWritePermOfBrokerByLock("broker-a"); assertThat(addTopicCnt).isEqualTo(1); assertThat(qd.getPerm()).isEqualTo(PermName.PERM_READ); }
@Override public void waitFor( final KsqlEntityList previousCommands, final Class<? extends Statement> statementClass) { if (mustSync.test(statementClass)) { final ArrayList<KsqlEntity> reversed = new ArrayList<>(previousCommands); Collections.reverse(reversed); reversed.stream() .filter(e -> e instanceof CommandStatusEntity) .map(CommandStatusEntity.class::cast) .map(CommandStatusEntity::getCommandSequenceNumber) .findFirst() .ifPresent(seqNum -> { try { commandQueue.ensureConsumedPast(seqNum, timeout); } catch (final InterruptedException e) { throw new KsqlRestException(Errors.serverShuttingDown()); } catch (final TimeoutException e) { throw new KsqlRestException(Errors.commandQueueCatchUpTimeout(seqNum)); } }); } }
@Test public void shouldNotWaitIfNotMustSync() throws Exception { // Given: givenSyncWithPredicate(clazz -> false); givenEntities(commandStatusEntity1); // When: commandQueueSync.waitFor(entities, CreateStreamAsSelect.class); // Then: verify(commandQueue, never()).ensureConsumedPast(anyLong(), any()); }
@Override public Set<Permission> readerBasePermissions() { return READER_BASE_PERMISSIONS; }
@Test public void testReaderBasePermissions() throws Exception { final RestPermissions restPermissions = new RestPermissions(); Assertions.assertThat(restPermissions.readerBasePermissions()) .hasSize(RestPermissions.READER_BASE_PERMISSION_SELECTION.size()); }
public static Coin parseCoin(final String str) { try { long satoshis = btcToSatoshi(new BigDecimal(str)); return Coin.valueOf(satoshis); } catch (ArithmeticException e) { throw new IllegalArgumentException(e); // Repackage exception to honor method contract } }
@Test public void testParseCoin() { // String version assertEquals(CENT, parseCoin("0.01")); assertEquals(CENT, parseCoin("1E-2")); assertEquals(COIN.add(CENT), parseCoin("1.01")); assertEquals(COIN.negate(), parseCoin("-1")); try { parseCoin("2E-20"); org.junit.Assert.fail("should not have accepted fractional satoshis"); } catch (IllegalArgumentException expected) { } catch (Exception e) { org.junit.Assert.fail("should throw IllegalArgumentException"); } assertEquals(1, parseCoin("0.00000001").value); assertEquals(1, parseCoin("0.000000010").value); }
public synchronized boolean registerConsumer(final String group, final MQConsumerInner consumer) { if (null == group || null == consumer) { return false; } MQConsumerInner prev = this.consumerTable.putIfAbsent(group, consumer); if (prev != null) { log.warn("the consumer group[" + group + "] exist already."); return false; } return true; }
@Test public void testRegisterConsumer() { boolean flag = mqClientInstance.registerConsumer(group, mock(MQConsumerInner.class)); assertThat(flag).isTrue(); flag = mqClientInstance.registerConsumer(group, mock(MQConsumerInner.class)); assertThat(flag).isFalse(); mqClientInstance.unregisterConsumer(group); flag = mqClientInstance.registerConsumer(group, mock(MQConsumerInner.class)); assertThat(flag).isTrue(); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { this.trash(files, prompt, callback); for(Path f : files.keySet()) { fileid.cache(f, null); } }
@Test public void testDeleteLockOwnerFile() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path folder = new EueDirectoryFeature(session, fileid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(AbstractPath.Type.directory)), new TransferStatus()); final String filename = String.format("~$%s.docx", new AlphanumericRandomStringService().random()); { final Path file1 = new Path(folder, filename, EnumSet.of(Path.Type.file)); createFile(fileid, file1, RandomUtils.nextBytes(511)); assertTrue(new EueFindFeature(session, fileid).find(file1)); new EueTrashFeature(session, fileid).delete(Collections.singletonList(file1), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse((new EueFindFeature(session, fileid).find(file1, new DisabledListProgressListener()))); } { final Path file1 = new Path(folder, filename, EnumSet.of(Path.Type.file)); createFile(fileid, file1, RandomUtils.nextBytes(511)); assertTrue(new EueFindFeature(session, fileid).find(file1)); new EueTrashFeature(session, fileid).delete(Collections.singletonList(file1), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse((new EueFindFeature(session, fileid).find(file1, new DisabledListProgressListener()))); } new EueTrashFeature(session, fileid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse((new EueFindFeature(session, fileid).find(folder, new DisabledListProgressListener()))); }
public boolean startSpilling(CompletableFuture<Void> spilledFuture) { if (isReleased() || isSpillStarted()) { return false; } spillStarted = true; this.spilledFuture = spilledFuture; // increase ref count when buffer is decided to spill. buffer.retainBuffer(); // decrease ref count when buffer spilling is finished. spilledFuture.thenRun(buffer::recycleBuffer); return true; }
@Test void testBufferStartSpillingRepeatedly() { assertThat(bufferContext.startSpilling(new CompletableFuture<>())).isTrue(); assertThat(bufferContext.startSpilling(new CompletableFuture<>())).isFalse(); }
@VisibleForTesting void validateDeptExists(Long id) { if (id == null) { return; } DeptDO dept = deptMapper.selectById(id); if (dept == null) { throw exception(DEPT_NOT_FOUND); } }
@Test public void testValidateDeptExists_notFound() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> deptService.validateDeptExists(id), DEPT_NOT_FOUND); }
static String convertEnvVars(String input){ // check for any non-alphanumeric chars and convert to underscore // convert to upper case if (input == null) { return null; } return input.replaceAll("[^A-Za-z0-9]", "_").toUpperCase(); }
@Test public void testConvertEnvVarsUsingDotInValueWithMixCases() { String testInput = ConfigInjection.convertEnvVars("serVER.ENVironment"); Assert.assertEquals("SERVER_ENVIRONMENT", testInput); }
public static String normalise(String ensName) { try { return ENSNormalize.ENSIP15.normalize(ensName); } catch (InvalidLabelException e) { throw new EnsResolutionException("Invalid ENS name provided: " + ensName); } }
@Test public void testNormalise() { assertEquals(normalise("foo"), ("foo")); assertEquals(normalise("foo.bar.baz.eth"), ("foo.bar.baz.eth")); assertEquals(normalise("fOo.eth"), ("foo.eth")); assertEquals(normalise("foo-bar.eth"), ("foo-bar.eth")); assertEquals(normalise("Obb.at"), ("obb.at")); assertEquals(normalise("TESTER.eth"), ("tester.eth")); assertEquals(normalise("test\u200btest.com"), ("testtest.com")); assertEquals(normalise("hyph-‐‑‒–—―⁃−⎯⏤﹘e⸺n⸻s.eth"), ("hyph------------e--n---s.eth")); }
@VisibleForTesting public static JobGraph createJobGraph(StreamGraph streamGraph) { return new StreamingJobGraphGenerator( Thread.currentThread().getContextClassLoader(), streamGraph, null, Runnable::run) .createJobGraph(); }
@Test void testInputOutputFormat() { StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(); DataStream<Long> source = env.addSource( new InputFormatSourceFunction<>( new TypeSerializerInputFormat<>( TypeInformation.of(Long.class)), TypeInformation.of(Long.class)), TypeInformation.of(Long.class)) .name("source"); source.writeUsingOutputFormat(new DiscardingOutputFormat<>()).name("sink1"); source.writeUsingOutputFormat(new DiscardingOutputFormat<>()).name("sink2"); StreamGraph streamGraph = env.getStreamGraph(); JobGraph jobGraph = StreamingJobGraphGenerator.createJobGraph(streamGraph); assertThat(jobGraph.getNumberOfVertices()).isEqualTo(1); JobVertex jobVertex = jobGraph.getVertices().iterator().next(); assertThat(jobVertex).isInstanceOf(InputOutputFormatVertex.class); InputOutputFormatContainer formatContainer = new InputOutputFormatContainer( new TaskConfig(jobVertex.getConfiguration()), Thread.currentThread().getContextClassLoader()); Map<OperatorID, UserCodeWrapper<? extends InputFormat<?, ?>>> inputFormats = formatContainer.getInputFormats(); Map<OperatorID, UserCodeWrapper<? extends OutputFormat<?>>> outputFormats = formatContainer.getOutputFormats(); assertThat(inputFormats).hasSize(1); assertThat(outputFormats).hasSize(2); Map<String, OperatorID> nameToOperatorIds = new HashMap<>(); StreamConfig headConfig = new StreamConfig(jobVertex.getConfiguration()); nameToOperatorIds.put(headConfig.getOperatorName(), headConfig.getOperatorID()); Map<Integer, StreamConfig> chainedConfigs = headConfig.getTransitiveChainedTaskConfigs( Thread.currentThread().getContextClassLoader()); for (StreamConfig config : chainedConfigs.values()) { nameToOperatorIds.put(config.getOperatorName(), config.getOperatorID()); } InputFormat<?, ?> sourceFormat = inputFormats.get(nameToOperatorIds.get("Source: source")).getUserCodeObject(); assertThat(sourceFormat).isInstanceOf(TypeSerializerInputFormat.class); OutputFormat<?> sinkFormat1 = outputFormats.get(nameToOperatorIds.get("Sink: sink1")).getUserCodeObject(); assertThat(sinkFormat1).isInstanceOf(DiscardingOutputFormat.class); OutputFormat<?> sinkFormat2 = outputFormats.get(nameToOperatorIds.get("Sink: sink2")).getUserCodeObject(); assertThat(sinkFormat2).isInstanceOf(DiscardingOutputFormat.class); }
@SuppressWarnings("unchecked") public QueryMetadataHolder handleStatement( final ServiceContext serviceContext, final Map<String, Object> configOverrides, final Map<String, Object> requestProperties, final PreparedStatement<?> statement, final Optional<Boolean> isInternalRequest, final MetricsCallbackHolder metricsCallbackHolder, final Context context, final boolean excludeTombstones ) { if (statement.getStatement() instanceof Query) { return handleQuery( serviceContext, (PreparedStatement<Query>) statement, isInternalRequest, metricsCallbackHolder, configOverrides, requestProperties, context, excludeTombstones ); } else { return QueryMetadataHolder.unhandled(); } }
@Test public void shouldRunScalablePushQuery_success() { // Given: when(ksqlEngine.executeScalablePushQuery(any(), any(), any(), any(), any(), any(), any(), any())) .thenReturn(scalablePushQueryMetadata); // When: final QueryMetadataHolder queryMetadataHolder = queryExecutor.handleStatement( serviceContext, ImmutableMap.of( KsqlConfig.KSQL_QUERY_PUSH_V2_ENABLED, true, ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest"), ImmutableMap.of(), pushQuery, Optional.empty(), metricsCallbackHolder, context, false); // Should be no metrics reported for push queries metricsCallbackHolder.reportMetrics(200, 1000L, 5000L, 20000L); // Then: verifyNoMoreInteractions(pullQueryExecutorMetrics); assertThat(queryMetadataHolder.getScalablePushQueryMetadata().isPresent(), is(true)); verify(scalablePushQueryMetrics).recordStatusCode(200); verify(scalablePushQueryMetrics).recordRequestSize(1000L); verify(scalablePushQueryMetrics).recordResponseSize(5000d, QuerySourceType.NON_WINDOWED, RoutingNodeType.SOURCE_NODE); verify(scalablePushQueryMetrics).recordConnectionDuration(20000L, QuerySourceType.NON_WINDOWED, RoutingNodeType.SOURCE_NODE); verify(scalablePushQueryMetrics).recordRowsProcessed(ROWS_PROCESSED, QuerySourceType.NON_WINDOWED, RoutingNodeType.SOURCE_NODE); verify(scalablePushQueryMetrics).recordRowsReturned(ROWS_RETURNED, QuerySourceType.NON_WINDOWED, RoutingNodeType.SOURCE_NODE); }
@PostMapping("/api/v1/meetings") public ResponseEntity<MomoApiResponse<MeetingCreateResponse>> create( @RequestBody @Valid MeetingCreateRequest request ) { MeetingCreateResponse response = meetingService.create(request); String path = cookieManager.pathOf(response.uuid()); String cookie = cookieManager.createNewCookie(response.token(), path); return ResponseEntity.created(URI.create("/meeting/" + response.uuid())) .header(HttpHeaders.SET_COOKIE, cookie) .body(new MomoApiResponse<>(response)); }
@DisplayName("주최자가 아닌 참가자가 약속 일정을 확정하면 403 상태 코드를 응답한다.") @Test void confirmScheduleNotHost() { Meeting meeting = createLockedMovieMeeting(); AvailableDate tomorrow = availableDateRepository.save(new AvailableDate(LocalDate.now().plusDays(1), meeting)); Attendee guest = attendeeRepository.save(AttendeeFixture.GUEST_MARK.create(meeting)); String token = getToken(guest, meeting); MeetingConfirmRequest request = getValidFindRequest(tomorrow); RestAssured.given().log().all() .cookie("ACCESS_TOKEN", token) .pathParam("uuid", meeting.getUuid()) .contentType(ContentType.JSON) .body(request) .when().post("/api/v1/meetings/{uuid}/confirm") .then().log().all() .statusCode(HttpStatus.FORBIDDEN.value()); }
@Override public byte[] evaluateResponse( @Nonnull final byte[] response ) throws SaslException { if ( isComplete() ) { throw new IllegalStateException( "Authentication exchange already completed." ); } // The value as sent to us in the 'from' attribute of the stream element sent by the remote server. final String defaultIdentity = session.getDefaultIdentity(); // RFC 6120 Section 4.7.1: // "Because a server is a "public entity" on the XMPP network, it MUST include the 'from' attribute after the // confidentiality and integrity of the stream are protected via TLS or an equivalent security layer." // // When doing SASL EXTERNAL, TLS must already have been negotiated, which means that the 'from' attribute must have been set. if (defaultIdentity == null || defaultIdentity.isEmpty()) { throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Peer does not provide 'from' attribute value on stream."); } final String requestedId; if (response.length == 0 && session.getSessionData(SASLAuthentication.SASL_LAST_RESPONSE_WAS_PROVIDED_BUT_EMPTY) == null) { if (PROPERTY_SASL_EXTERNAL_SERVER_REQUIRE_AUTHZID.getValue()) { // No initial response. Send a challenge to get one, per RFC 4422 appendix-A. return new byte[0]; } else { requestedId = defaultIdentity; } } else { requestedId = new String( response, StandardCharsets.UTF_8 ); } complete = true; Log.trace("Completing handshake with '{}' using authzid value: '{}'", defaultIdentity, requestedId); // Added for backwards compatibility. Not required by XMPP, but versions of Openfire prior to 4.8.0 did require the authzid to be present. if (SASLAuthentication.EXTERNAL_S2S_REQUIRE_AUTHZID.getValue() && requestedId.isEmpty()) { throw new SaslFailureException(Failure.INVALID_AUTHZID, "Peer does not provide authzid, which is required by configuration."); } // When an authorization identity is provided, make sure that it matches the 'from' value from the session stream. if (!requestedId.isEmpty() && !requestedId.equals(defaultIdentity)) { throw new SaslFailureException(Failure.INVALID_AUTHZID, "Stream 'from' attribute value '" + defaultIdentity + "' does not equal SASL authzid '" + requestedId + "'"); } if (!SASLAuthentication.verifyCertificates(session.getConnection().getPeerCertificates(), defaultIdentity, true)) { throw new SaslFailureException(Failure.NOT_AUTHORIZED, "Server-to-Server certificate verification failed."); } authorizationID = defaultIdentity; Log.trace("Successfully authenticated '{}'", authorizationID); return null; // Success! }
@Test public void testInitialResponseMatchingStreamID() throws Exception { // Setup test fixture. final String streamID = "example.org"; when(session.getDefaultIdentity()).thenReturn(streamID); when(session.getConnection()).thenReturn(connection); saslAuthentication.when(() -> SASLAuthentication.verifyCertificates(any(), eq(streamID), anyBoolean())).thenReturn(true); final ExternalServerSaslServer server = new ExternalServerSaslServer(session); final byte[] input = streamID.getBytes(StandardCharsets.UTF_8); // Execute system under test. final byte[] response = server.evaluateResponse(input); // Verify results. assertNull(response); // This asserts for successful authentication }
@Override public void execute(Exchange exchange) throws SmppException { byte[] message = getShortMessage(exchange.getIn()); ReplaceSm replaceSm = createReplaceSmTempate(exchange); replaceSm.setShortMessage(message); if (log.isDebugEnabled()) { log.debug("Sending replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } try { session.replaceShortMessage( replaceSm.getMessageId(), TypeOfNumber.valueOf(replaceSm.getSourceAddrTon()), NumberingPlanIndicator.valueOf(replaceSm.getSourceAddrNpi()), replaceSm.getSourceAddr(), replaceSm.getScheduleDeliveryTime(), replaceSm.getValidityPeriod(), new RegisteredDelivery(replaceSm.getRegisteredDelivery()), replaceSm.getSmDefaultMsgId(), replaceSm.getShortMessage()); } catch (Exception e) { throw new SmppException(e); } if (log.isDebugEnabled()) { log.debug("Sent replacement command for a short message for exchange id '{}' and message id '{}'", exchange.getExchangeId(), replaceSm.getMessageId()); } Message rspMsg = ExchangeHelper.getResultMessage(exchange); rspMsg.setHeader(SmppConstants.ID, replaceSm.getMessageId()); }
@Test public void bodyWithSMPP8bitDataCodingNotModified() throws Exception { final int dataCoding = 0x04; /* SMPP 8-bit */ byte[] body = { (byte) 0xFF, 'A', 'B', (byte) 0x00, (byte) 0xFF, (byte) 0x7F, 'C', (byte) 0xFF }; Exchange exchange = new DefaultExchange(new DefaultCamelContext(), ExchangePattern.InOut); exchange.getIn().setHeader(SmppConstants.COMMAND, "ReplaceSm"); exchange.getIn().setHeader(SmppConstants.DATA_CODING, dataCoding); exchange.getIn().setBody(body); command.execute(exchange); verify(session).replaceShortMessage((String) isNull(), eq(TypeOfNumber.UNKNOWN), eq(NumberingPlanIndicator.UNKNOWN), eq("1616"), (String) isNull(), (String) isNull(), eq(new RegisteredDelivery(SMSCDeliveryReceipt.SUCCESS_FAILURE)), eq((byte) 0), eq(body)); }
@Override public ResultSet getAttributes(final String catalog, final String schemaPattern, final String typeNamePattern, final String attributeNamePattern) throws SQLException { return createDatabaseMetaDataResultSet(getDatabaseMetaData().getAttributes(getActualCatalog(catalog), getActualSchema(schemaPattern), typeNamePattern, attributeNamePattern)); }
@Test void assertGetAttributes() throws SQLException { when(databaseMetaData.getAttributes("test", null, null, null)).thenReturn(resultSet); assertThat(shardingSphereDatabaseMetaData.getAttributes("test", null, null, null), instanceOf(DatabaseMetaDataResultSet.class)); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { int time = payload.getByteBuf().readUnsignedMedium(); if (0x800000 == time) { return MySQLTimeValueUtils.ZERO_OF_TIME; } MySQLFractionalSeconds fractionalSeconds = new MySQLFractionalSeconds(columnDef.getColumnMeta(), payload); int hour = (time >> 12) % (1 << 10); int minute = (time >> 6) % (1 << 6); int second = time % (1 << 6); return LocalTime.of(hour, minute, second).withNano(fractionalSeconds.getNanos()); }
@Test void assertReadWithFraction6() { columnDef.setColumnMeta(6); when(payload.getByteBuf()).thenReturn(byteBuf); when(byteBuf.readUnsignedMedium()).thenReturn(0x800000 | (0x10 << 12) | (0x08 << 6) | 0x04, 10123); assertThat(new MySQLTime2BinlogProtocolValue().read(columnDef, payload), is(LocalTime.of(16, 8, 4).withNano(10123000))); }
public static Driver load(String className) throws DriverLoadException { final ClassLoader loader = DriverLoader.class.getClassLoader(); return load(className, loader); }
@Test(expected = DriverLoadException.class) public void testLoad_String_ex() throws Exception { final String className = "bad.Driver"; DriverLoader.load(className); }
@Override public long getNumBytesProduced() { checkState( subpartitionBytesByPartitionIndex.size() == numOfPartitions, "Not all partition infos are ready"); return subpartitionBytesByPartitionIndex.values().stream() .flatMapToLong(Arrays::stream) .reduce(0L, Long::sum); }
@Test void testPartitionFinishedMultiTimes() { PointwiseBlockingResultInfo resultInfo = new PointwiseBlockingResultInfo(new IntermediateDataSetID(), 2, 2); resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {32L, 64L})); resultInfo.recordPartitionInfo(1, new ResultPartitionBytes(new long[] {64L, 128L})); assertThat(resultInfo.getNumOfRecordedPartitions()).isEqualTo(2); assertThat(resultInfo.getNumBytesProduced()).isEqualTo(288L); // reset partition info resultInfo.resetPartitionInfo(0); assertThat(resultInfo.getNumOfRecordedPartitions()).isOne(); // record partition info again resultInfo.recordPartitionInfo(0, new ResultPartitionBytes(new long[] {64L, 128L})); assertThat(resultInfo.getNumBytesProduced()).isEqualTo(384L); }
public boolean isUniqueRoleName(final CaseInsensitiveString roleName) { return Collections.frequency(roleNames(), roleName) <= 1; }
@Test public void isUniqueRoleName_shouldBeFalseWithMultipleRolesWithSameName() throws Exception { RolesConfig rolesConfig = new RolesConfig(new RoleConfig(new CaseInsensitiveString("admin")), new RoleConfig(new CaseInsensitiveString("view")), new RoleConfig(new CaseInsensitiveString("view"))); assertFalse(rolesConfig.isUniqueRoleName(new CaseInsensitiveString("view"))); }
@Override public HashSlotCursor12byteKey cursor() { return new CursorIntKey2(); }
@Test public void testCursor_withManyValues() { final int factor = 123456; final int k = 1000; for (int i = 1; i <= k; i++) { insert(i, factor * i); } boolean[] verifiedKeys = new boolean[k]; HashSlotCursor12byteKey cursor = hsa.cursor(); while (cursor.advance()) { long key1 = cursor.key1(); int key2 = cursor.key2(); long valueAddress = cursor.valueAddress(); assertEquals(key1 * factor, key2); verifyValue(key1, key2, valueAddress); verifiedKeys[((int) key1) - 1] = true; } for (int i = 0; i < k; i++) { assertTrue("Failed to encounter key " + i, verifiedKeys[i]); } }
@Override public void updateDiyPage(DiyPageUpdateReqVO updateReqVO) { // 校验存在 validateDiyPageExists(updateReqVO.getId()); // 校验名称唯一 validateNameUnique(updateReqVO.getId(), updateReqVO.getTemplateId(), updateReqVO.getName()); // 更新 DiyPageDO updateObj = DiyPageConvert.INSTANCE.convert(updateReqVO); diyPageMapper.updateById(updateObj); }
@Test public void testUpdateDiyPage_notExists() { // 准备参数 DiyPageUpdateReqVO reqVO = randomPojo(DiyPageUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> diyPageService.updateDiyPage(reqVO), DIY_PAGE_NOT_EXISTS); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() == 1) { final int batteryLevel = data.getIntValue(Data.FORMAT_UINT8, 0); if (batteryLevel >= 0 && batteryLevel <= 100) { onBatteryLevelChanged(device, batteryLevel); return; } } onInvalidDataReceived(device, data); }
@Test public void onBatteryLevelChanged_fullBattery() { final DataReceivedCallback callback = new BatteryLevelDataCallback() { @Override public void onBatteryLevelChanged(@NonNull final BluetoothDevice device, final int batteryLevel) { assertEquals("Correct data", batteryLevel, 100); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct data reported as invalid", 1, 2); } }; final Data data = new Data(new byte[] { 0x64 }); callback.onDataReceived(null, data); }
@Override public String pluginNamed() { return PluginEnum.DIVIDE.getName(); }
@Test public void pluginNamedTest() { assertEquals(dividePluginDataHandler.pluginNamed(), PluginEnum.DIVIDE.getName()); }
@Override public long computeLocalQuota(long confUsage, long myUsage, long[] allUsages) throws PulsarAdminException { // ToDo: work out the initial conditions: we may allow a small number of "first few iterations" to go // unchecked as we get some history of usage, or follow some other "TBD" method. if (confUsage < 0) { // This can happen if the RG is not configured with this particular limit (message or byte count) yet. val retVal = -1; if (log.isDebugEnabled()) { log.debug("Configured usage ({}) is not set; returning a special value ({}) for calculated quota", confUsage, retVal); } return retVal; } long totalUsage = 0; for (long usage : allUsages) { totalUsage += usage; } if (myUsage < 0 || totalUsage < 0) { String errMesg = String.format("Local usage (%d) or total usage (%d) is negative", myUsage, totalUsage); log.error(errMesg); throw new PulsarAdminException(errMesg); } // If the total usage is zero (which may happen during initial transients), just return the configured value. // The caller is expected to check the value returned, or not call here with a zero global usage. // [This avoids a division by zero when calculating the local share.] if (totalUsage == 0) { if (log.isDebugEnabled()) { log.debug("computeLocalQuota: totalUsage is zero; " + "returning the configured usage ({}) as new local quota", confUsage); } return confUsage; } if (myUsage > totalUsage) { String errMesg = String.format("Local usage (%d) is greater than total usage (%d)", myUsage, totalUsage); // Log as a warning [in case this can happen transiently (?)]. log.warn(errMesg); } // How much unused capacity is left over? float residual = confUsage - totalUsage; // New quota is the old usage incremented by any residual as a ratio of the local usage to the total usage. // This should result in the calculatedQuota increasing proportionately if total usage is less than the // configured usage, and reducing proportionately if the total usage is greater than the configured usage. // Capped to 1, to prevent negative or zero setting of quota. // the rate limiter code assumes that rate value of 0 or less to mean that no rate limit should be applied float myUsageFraction = (float) myUsage / totalUsage; float calculatedQuota = max(myUsage + residual * myUsageFraction, 1); val longCalculatedQuota = (long) calculatedQuota; if (log.isDebugEnabled()) { log.debug("computeLocalQuota: myUsage={}, totalUsage={}, myFraction={}; newQuota returned={} [long: {}]", myUsage, totalUsage, myUsageFraction, calculatedQuota, longCalculatedQuota); } return longCalculatedQuota; }
@Test public void testRQCalcNegativeLocalUsageTest() { final long[] allUsage = { 0 }; Assert.assertThrows(PulsarAdminException.class, () -> this.rqCalc.computeLocalQuota(0, -1, allUsage)); }
public void setInputChannels(InputChannel... channels) { if (channels.length != numberOfInputChannels) { throw new IllegalArgumentException( "Expected " + numberOfInputChannels + " channels, " + "but got " + channels.length); } synchronized (requestLock) { System.arraycopy(channels, 0, this.channels, 0, numberOfInputChannels); for (InputChannel inputChannel : channels) { if (inputChannels .computeIfAbsent( inputChannel.getPartitionId().getPartitionId(), ignored -> new HashMap<>()) .put(inputChannel.getChannelInfo(), inputChannel) == null && inputChannel instanceof UnknownInputChannel) { numberOfUninitializedChannels++; } } } }
@Test void testCheckpointsDeclinedUnlessAllChannelsAreKnown() { SingleInputGate gate = createInputGate(createNettyShuffleEnvironment(), 1, ResultPartitionType.PIPELINED); gate.setInputChannels( new InputChannelBuilder().setChannelIndex(0).buildUnknownChannel(gate)); assertThatThrownBy( () -> gate.checkpointStarted( new CheckpointBarrier( 1L, 1L, alignedNoTimeout(CHECKPOINT, getDefault())))) .isInstanceOf(CheckpointException.class); }
@Override public Dataset<PartitionStat> get() { StructType partitionSchema = spark.sql(String.format("SELECT * FROM %s.partitions", tableName)).schema(); try { partitionSchema.apply("partition"); return spark .sql(String.format("SELECT partition, file_count FROM %s.partitions", tableName)) .map(new TablePartitionStats.PartitionStatMapper(), Encoders.bean(PartitionStat.class)); } catch (IllegalArgumentException e) { return spark .sql(String.format("SELECT null, file_count FROM %s.partitions", tableName)) .map(new TablePartitionStats.PartitionStatMapper(), Encoders.bean(PartitionStat.class)); } }
@Test public void testPartitionedTablePartitionStats() throws Exception { final String testTable = "db.test_table_partition_stats_partitioned"; try (SparkSession spark = getSparkSession()) { spark.sql("USE openhouse"); spark.sql( String.format( "CREATE TABLE %s (id INT, data STRING, dt STRING) PARTITIONED BY (dt, id)", testTable)); spark.sql(String.format("INSERT INTO %s VALUES (0, '0', '2024-01-01')", testTable)); spark.sql(String.format("INSERT INTO %s VALUES (1, '1', '2024-01-02')", testTable)); spark.sql(String.format("INSERT INTO %s VALUES (1, '2', '2024-01-02')", testTable)); TablePartitionStats tablePartitionStats = TablePartitionStats.builder().spark(spark).tableName(testTable).build(); List<PartitionStat> stats = tablePartitionStats.get().collectAsList(); Assertions.assertEquals(2, stats.size()); stats.sort(Comparator.comparing(a -> a.getValues().get(0))); Assertions.assertEquals(Arrays.asList("2024-01-01", "0"), stats.get(0).getValues()); Assertions.assertEquals(1, stats.get(0).getFileCount()); Assertions.assertEquals(Arrays.asList("2024-01-02", "1"), stats.get(1).getValues()); Assertions.assertEquals(2, stats.get(1).getFileCount()); } }
@VisibleForTesting static int getIdForInsertionRequest(EditorInfo info) { return info == null ? 0 : Arrays.hashCode(new int[] {info.fieldId, info.packageName.hashCode()}); }
@Test public void testDoesNotCommitIfRequestCancelled() { simulateFinishInputFlow(); EditorInfo info = createEditorInfoTextWithSuggestionsForSetUp(); EditorInfoCompat.setContentMimeTypes(info, new String[] {"image/gif"}); simulateOnStartInputFlow(false, info); mAnySoftKeyboardUnderTest.simulateKeyPress(KeyCodes.IMAGE_MEDIA_POPUP); ArgumentCaptor<InsertionRequestCallback> argumentCaptor = ArgumentCaptor.forClass(InsertionRequestCallback.class); Mockito.verify(mRemoteInsertion) .startMediaRequest(Mockito.any(), Mockito.anyInt(), argumentCaptor.capture()); argumentCaptor .getValue() .onMediaRequestCancelled(AnySoftKeyboardMediaInsertion.getIdForInsertionRequest(info)); Assert.assertNull(mAnySoftKeyboardUnderTest.getCommitedInputContentInfo()); }
public void fillMaxSpeed(Graph graph, EncodingManager em) { // In DefaultMaxSpeedParser and in OSMMaxSpeedParser we don't have the rural/urban info, // but now we have and can fill the country-dependent max_speed value where missing. EnumEncodedValue<UrbanDensity> udEnc = em.getEnumEncodedValue(UrbanDensity.KEY, UrbanDensity.class); fillMaxSpeed(graph, em, edge -> edge.get(udEnc) != UrbanDensity.RURAL); }
@Test public void testSurface() { ReaderWay way = new ReaderWay(0L); way.setTag("country", Country.LTU); way.setTag("highway", "primary"); EdgeIteratorState edge = createEdge(way).set(urbanDensity, RURAL); calc.fillMaxSpeed(graph, em); assertEquals(90, edge.get(maxSpeedEnc), 1); way = new ReaderWay(0L); way.setTag("country", Country.LTU); way.setTag("highway", "primary"); way.setTag("surface", "compacted"); edge = createEdge(way).set(urbanDensity, RURAL); calc.fillMaxSpeed(graph, em); assertEquals(70, edge.get(maxSpeedEnc), 1); }
public int base() { return this.alphabet.alphabetChars.length; }
@Test void base58_codec_test_cases_pass() { var b58 = Base58.codec(); assertEquals(58, b58.base()); // https://datatracker.ietf.org/doc/html/draft-msporny-base58-03 test vectors: verifyRoundtrip(b58, "Hello World!", "2NEpo7TZRRrLZSi2U"); verifyRoundtrip(b58, "The quick brown fox jumps over the lazy dog.", "USm3fpXnKG5EUBx2ndxBDMPVciP5hGey2Jh4NDv6gmeo1LkMeiKrLJUUBk6Z"); verifyRoundtrip(b58, unhex("0000287fb4cd"), "11233QC4"); // Values that have been cross-referenced with other encoder implementations: verifyRoundtrip(b58, "", ""); verifyRoundtrip(b58, unhex("00"), "1"); verifyRoundtrip(b58, unhex("0000"), "11"); verifyRoundtrip(b58, unhex("ff"), "5Q"); verifyRoundtrip(b58, unhex("00ff"), "15Q"); verifyRoundtrip(b58, unhex("ff00"), "LQX"); verifyRoundtrip(b58, unhex("ffffff"), "2UzHL"); verifyRoundtrip(b58, unhex("287fb4cd"), "233QC4"); }
protected synchronized boolean download(final DownloadableFile downloadableFile) throws IOException, GeneralSecurityException { File toDownload = downloadableFile.getLocalFile(); LOG.info("Downloading {}", toDownload); String url = downloadableFile.url(urlGenerator); final HttpRequestBase request = new HttpGet(url); request.setConfig(RequestConfig.custom().setConnectTimeout(HTTP_TIMEOUT_IN_MILLISECONDS).build()); try (CloseableHttpClient httpClient = httpClientBuilder.build(); CloseableHttpResponse response = httpClient.execute(request)) { LOG.info("Got server response"); if (response.getEntity() == null) { LOG.error("Unable to read file from the server response"); return false; } handleInvalidResponse(response, url); try (BufferedOutputStream outStream = new BufferedOutputStream(new FileOutputStream(downloadableFile.getLocalFile()))) { response.getEntity().writeTo(outStream); LOG.info("Piped the stream to {}", downloadableFile); } } return true; }
@Test public void shouldReturnFalseIfTheServerDoesNotRespondWithEntity() throws Exception { GoAgentServerHttpClientBuilder builder = mock(GoAgentServerHttpClientBuilder.class); CloseableHttpClient closeableHttpClient = mock(CloseableHttpClient.class); when(builder.build()).thenReturn(closeableHttpClient); CloseableHttpResponse httpResponse = mock(CloseableHttpResponse.class); when(closeableHttpClient.execute(any(HttpRequestBase.class))).thenReturn(httpResponse); ServerBinaryDownloader downloader = new ServerBinaryDownloader(builder, ServerUrlGeneratorMother.generatorFor("localhost", server.getPort())); assertThat(downloader.download(DownloadableFile.AGENT), is(false)); }
public static Read read() { return new AutoValue_MongoDbIO_Read.Builder() .setMaxConnectionIdleTime(60000) .setNumSplits(0) .setBucketAuto(false) .setSslEnabled(false) .setIgnoreSSLCertificate(false) .setSslInvalidHostNameAllowed(false) .setQueryFn(FindQuery.create()) .build(); }
@Test public void testReadWithAggregateWithLimit() throws Exception { List<BsonDocument> aggregates = new ArrayList<BsonDocument>(); aggregates.add( new BsonDocument( "$match", new BsonDocument("country", new BsonDocument("$eq", new BsonString("England"))))); aggregates.add(new BsonDocument("$limit", new BsonInt32(10))); PCollection<Document> output = pipeline.apply( MongoDbIO.read() .withUri("mongodb://localhost:" + port) .withDatabase(DATABASE_NAME) .withCollection(COLLECTION_NAME) .withQueryFn(AggregationQuery.create().withMongoDbPipeline(aggregates))); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(10L); pipeline.run(); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void testUnencodableOutputElement() throws Exception { Pipeline p = getPipeline(); PCollection<Long> pcollection = p.apply(Create.of((Void) null)) .apply( ParDo.of( new DoFn<Void, Long>() { @ProcessElement public void processElement(ProcessContext c) { c.output(null); } })) .setCoder(VarLongCoder.of()); pcollection.apply( ParDo.of( new DoFn<Long, Long>() { @ProcessElement public void unreachable(ProcessContext c) { fail("Pipeline should fail to encode a null Long in VarLongCoder"); } })); thrown.expectCause(isA(CoderException.class)); thrown.expectMessage("cannot encode a null Long"); p.run(); }
public static int getDelayToNextMinute(long rightnow) { return (int) (MILLISECONDS_PER_MINUTE - (rightnow % MILLISECONDS_PER_MINUTE)); }
@Test public void getDelayToNextMinute() throws Exception { long now = System.currentTimeMillis(); int delay = DateUtils.getDelayToNextMinute(now); Assert.assertTrue(delay < 60000); }
protected static PrivateKey toPrivateKey(File keyFile, String keyPassword) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeySpecException, InvalidAlgorithmParameterException, KeyException, IOException { return toPrivateKey(keyFile, keyPassword, true); }
@Test public void testPkcs1AesEncryptedDsaWrongPassword() throws Exception { assertThrows(IOException.class, new Executable() { @Override public void execute() throws Throwable { SslContext.toPrivateKey(new File(getClass().getResource("dsa_pkcs1_aes_encrypted.key") .getFile()), "wrong"); } }); }
public static YamlSequence asSequence(YamlNode node) { if (node != null && !(node instanceof YamlSequence)) { String nodeName = node.nodeName(); throw new YamlException(String.format("Child %s is not a sequence, it's actual type is %s", nodeName, node.getClass())); } return (YamlSequence) node; }
@Test public void asSequenceReturnsIfSequencePassed() { YamlNode genericNode = new YamlSequenceImpl(null, "sequence"); YamlUtil.asSequence(genericNode); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); String shardingResultSuffix = getShardingResultSuffix(cutShardingValue(shardingValue.getValue()).mod(new BigInteger(String.valueOf(shardingCount))).toString()); return ShardingAutoTableAlgorithmUtils.findMatchedTargetName(availableTargetNames, shardingResultSuffix, shardingValue.getDataNodeInfo()).orElse(null); }
@Test void assertPreciseDoShardingWithValueIsBigIntegerAndZeroPadding() { ModShardingAlgorithm algorithm = (ModShardingAlgorithm) TypedSPILoader.getService(ShardingAlgorithm.class, "MOD", createZeroPaddingProperties()); assertThat(algorithm.doSharding(createAvailableIncludeZeroTargetNames(), new PreciseShardingValue<>("t_order", "order_id", DATA_NODE_INFO, "12345678910111213141516")), is("t_order_07")); }
public static byte[] generateKey(ZUCAlgorithm algorithm) { return KeyUtil.generateKey(algorithm.value).getEncoded(); }
@Test public void zuc256Test(){ final byte[] secretKey = ZUC.generateKey(ZUC.ZUCAlgorithm.ZUC_256); byte[] iv = RandomUtil.randomBytes(25); final ZUC zuc = new ZUC(ZUC.ZUCAlgorithm.ZUC_256, secretKey, iv); String msg = RandomUtil.randomString(500); byte[] crypt2 = zuc.encrypt(msg); String msg2 = zuc.decryptStr(crypt2, CharsetUtil.CHARSET_UTF_8); assertEquals(msg, msg2); }
public static String formatSql(final AstNode root) { final StringBuilder builder = new StringBuilder(); new Formatter(builder).process(root, 0); return StringUtils.stripEnd(builder.toString(), "\n"); }
@Test public void shouldFormatCsasPartitionBy() { final String statementString = "CREATE STREAM S AS SELECT * FROM ADDRESS PARTITION BY ADDRESS;"; final Statement statement = parseSingle(statementString); final String result = SqlFormatter.formatSql(statement); assertThat(result, is("CREATE STREAM S AS SELECT *\n" + "FROM ADDRESS ADDRESS\n" + "PARTITION BY ADDRESS\n" + "EMIT CHANGES" )); }
public FEELFnResult<Boolean> invoke(@ParameterName( "list" ) List list) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } boolean result = false; boolean containsNull = false; // Spec. definition: return true if any item is true, else false if all items are false, else null for ( final Object element : list ) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not a Boolean")); } else { if (element != null) { result |= (Boolean) element; } else if (!containsNull) { containsNull = true; } } } if (containsNull && !result) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeArrayParamReturnFalse() { FunctionTestUtil.assertResult(anyFunction.invoke(new Object[]{Boolean.FALSE, Boolean.FALSE}), false); }
public boolean overlaps(Range other) { checkTypeCompatibility(other); return this.getLow().compareTo(other.getHigh()) <= 0 && other.getLow().compareTo(this.getHigh()) <= 0; }
@Test public void testOverlaps() { assertTrue(Range.greaterThan(BIGINT, 1L).overlaps(Range.lessThanOrEqual(BIGINT, 2L))); assertFalse(Range.greaterThan(BIGINT, 2L).overlaps(Range.lessThan(BIGINT, 2L))); assertTrue(Range.range(BIGINT, 1L, true, 3L, false).overlaps(Range.equal(BIGINT, 2L))); assertTrue(Range.range(BIGINT, 1L, true, 3L, false).overlaps(Range.range(BIGINT, 2L, false, 10L, false))); assertFalse(Range.range(BIGINT, 1L, true, 3L, false).overlaps(Range.range(BIGINT, 3L, true, 10L, false))); assertTrue(Range.range(BIGINT, 1L, true, 3L, true).overlaps(Range.range(BIGINT, 3L, true, 10L, false))); assertTrue(Range.all(BIGINT).overlaps(Range.equal(BIGINT, Long.MAX_VALUE))); }
public String getName() { return name; }
@Test public void getName() { String name = "getName"; Env.addEnvironment(name); assertEquals(name.trim().toUpperCase(), Env.valueOf(name).toString()); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testCustomizedUserAndGroupNames() throws Exception { // Start server with default configuration Server server = createHttpFSServer(false, false); final Configuration conf = HttpFSServerWebApp.get() .get(FileSystemAccess.class).getFileSystemConfiguration(); // Change pattern config conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, "^[A-Za-z0-9_][A-Za-z0-9._-]*[$]?$"); conf.set(HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_KEY, "^(default:)?(user|group|mask|other):" + "[[0-9A-Za-z_][@A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?" + "(user|group|mask|other):[[0-9A-Za-z_][@A-Za-z0-9._-]]*:" + "([rwx-]{3})?)*$"); // Save configuration to site file writeConf(conf, "hdfs-site.xml"); // Restart the HttpFS server to apply new config server.stop(); server.start(); final String aclUser = "user:123:rw-"; final String aclGroup = "group:foo@bar:r--"; final String aclSpec = "aclspec=user::rwx," + aclUser + ",group::rwx," + aclGroup + ",other::---"; final String dir = "/aclFileTestCustom"; final String path = dir + "/test"; // Create test dir FileSystem fs = FileSystem.get(conf); fs.mkdirs(new Path(dir)); createWithHttp(path, null); // Set ACL putCmd(path, "SETACL", aclSpec); // Verify ACL String statusJson = getStatus(path, "GETACLSTATUS"); List<String> aclEntries = getAclEntries(statusJson); Assert.assertTrue(aclEntries.contains(aclUser)); Assert.assertTrue(aclEntries.contains(aclGroup)); }
@VisibleForTesting static String extractTableName(MultivaluedMap<String, String> pathParameters, MultivaluedMap<String, String> queryParameters) { String tableName = extractTableName(pathParameters); if (tableName != null) { return tableName; } return extractTableName(queryParameters); }
@Test public void testExtractTableNameWithTableNameInPathParams() { MultivaluedMap<String, String> pathParams = new MultivaluedHashMap<>(); MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<>(); pathParams.putSingle("tableName", "A"); pathParams.putSingle("tableNameWithType", "B"); pathParams.putSingle("schemaName", "C"); queryParams.putSingle("tableName", "D"); queryParams.putSingle("tableNameWithType", "E"); queryParams.putSingle("schemaName", "F"); assertEquals(AuthenticationFilter.extractTableName(pathParams, queryParams), "A"); }
@PutMapping @Secured(resource = AuthConstants.UPDATE_PASSWORD_ENTRY_POINT, action = ActionTypes.WRITE) public Object updateUser(@RequestParam String username, @RequestParam String newPassword, HttpServletResponse response, HttpServletRequest request) throws IOException { // admin or same user try { if (!hasPermission(username, request)) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "authorization failed!"); return null; } } catch (HttpSessionRequiredException e) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "session expired!"); return null; } catch (AccessException exception) { response.sendError(HttpServletResponse.SC_FORBIDDEN, "authorization failed!"); return null; } User user = userDetailsService.getUserFromDatabase(username); if (user == null) { throw new IllegalArgumentException("user " + username + " not exist!"); } userDetailsService.updateUserPassword(username, PasswordEncoderUtil.encode(newPassword)); return RestResultUtils.success("update user ok!"); }
@Test void testUpdateUser5() throws IOException, AccessException { RequestContextHolder.getContext().getAuthContext().getIdentityContext() .setParameter(AuthConstants.NACOS_USER_KEY, null); when(authConfigs.isAuthEnabled()).thenReturn(true); when(userDetailsService.getUserFromDatabase(anyString())).thenReturn(new User()); when(authenticationManager.authenticate(any(MockHttpServletRequest.class))).thenReturn(user); MockHttpServletRequest mockHttpServletRequest = new MockHttpServletRequest(); MockHttpServletResponse mockHttpServletResponse = new MockHttpServletResponse(); RestResult<String> result = (RestResult<String>) userController.updateUser("nacos", "test", mockHttpServletResponse, mockHttpServletRequest); assertEquals(200, result.getCode()); }
public static String wrap(String input, Formatter formatter) throws FormatterException { return StringWrapper.wrap(Formatter.MAX_LINE_LENGTH, input, formatter); }
@Test public void textBlockTrailingWhitespaceUnicodeEscape() throws Exception { assumeTrue(Runtime.version().feature() >= 15); // We want a unicode escape in the Java source being formatted, so it needs to be escaped // in the string literal in this test. String input = lines( "public class T {", " String s =", " \"\"\"", " lorem\\u0020", " ipsum", " \"\"\";", "}"); String expected = lines( "public class T {", " String s =", " \"\"\"", " lorem\\u0020", " ipsum", " \"\"\";", "}"); String actual = StringWrapper.wrap(100, input, new Formatter()); assertThat(actual).isEqualTo(expected); }
@Override @PublicAPI(usage = ACCESS) public boolean isAnnotatedWith(Class<? extends Annotation> annotationType) { return packageInfo.map(it -> it.isAnnotatedWith(annotationType)).orElse(false); }
@Test public void test_isAnnotatedWith_predicate() { JavaPackage annotatedPackage = importPackage("packageexamples.annotated"); JavaPackage nonAnnotatedPackage = importPackage("packageexamples"); assertThat(annotatedPackage.isAnnotatedWith(alwaysTrue())).isTrue(); assertThat(annotatedPackage.isAnnotatedWith(alwaysFalse())).isFalse(); assertThat(nonAnnotatedPackage.isAnnotatedWith(alwaysTrue())).isFalse(); assertThat(nonAnnotatedPackage.isAnnotatedWith(alwaysFalse())).isFalse(); }
public static KeyPair loadKey(File f, String passwd) throws IOException, GeneralSecurityException { return loadKey(readPemFile(f), passwd); }
@Test public void loadBlankKey() throws IOException, GeneralSecurityException { File file = new File(this.getClass().getResource("blank").getFile()); String password = "password"; assertThrows(InvalidKeyException.class, () -> PrivateKeyProvider.loadKey(file, password)); }
@Override public byte[] get(byte[] key) { return read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); }
@Test public void testGeo() { RedisTemplate<String, String> redisTemplate = new RedisTemplate<>(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); String key = "test_geo_key"; Point point = new Point(116.401001, 40.119499); redisTemplate.opsForGeo().add(key, point, "a"); point = new Point(111.545998, 36.133499); redisTemplate.opsForGeo().add(key, point, "b"); point = new Point(111.483002, 36.030998); redisTemplate.opsForGeo().add(key, point, "c"); Circle within = new Circle(116.401001, 40.119499, 80000); RedisGeoCommands.GeoRadiusCommandArgs args = RedisGeoCommands.GeoRadiusCommandArgs.newGeoRadiusArgs().includeCoordinates(); GeoResults<RedisGeoCommands.GeoLocation<String>> res = redisTemplate.opsForGeo().radius(key, within, args); assertThat(res.getContent().get(0).getContent().getName()).isEqualTo("a"); }
@Override public boolean equals(@Nullable Object o) { if (o == this) { return true; } if (!(o instanceof ByteKey)) { return false; } ByteKey other = (ByteKey) o; return (other.value.size() == value.size()) && this.compareTo(other) == 0; }
@Test public void testEquals() { // Verify that the comparison gives the correct result for all values in both directions. for (int i = 0; i < TEST_KEYS.length; ++i) { for (int j = 0; j < TEST_KEYS.length; ++j) { ByteKey left = TEST_KEYS[i]; ByteKey right = TEST_KEYS[j]; boolean eq = left.equals(right); if (i == j) { assertTrue(String.format("Expected that %s is equal to itself.", left), eq); assertTrue( String.format("Expected that %s is equal to a copy of itself.", left), left.equals(ByteKey.copyFrom(right.getValue()))); } else { assertFalse(String.format("Expected that %s is not equal to %s", left, right), eq); } } } }
@SneakyThrows @Override public Integer call() throws Exception { super.call(); PicocliRunner.call(App.class, "namespace", "kv", "--help"); return 0; }
@Test void runWithNoParam() { ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); try (ApplicationContext ctx = ApplicationContext.builder().deduceEnvironment(false).start()) { String[] args = {}; Integer call = PicocliRunner.call(KvCommand.class, ctx, args); assertThat(call, is(0)); assertThat(out.toString(), containsString("Usage: kestra namespace kv")); } }
@Override public V put(final K key, final V value) { checkAndScheduleRefresh(this); final V previous = cache.getIfPresent(key); cache.put(key, value); return previous; }
@Test public void testWindowTinyLFUOutOufMemoryException() { final int mb = 1024 * 1024; for (int i = 0; i < 1000; i++) { MemorySafeWindowTinyLFUMap<String, Byte[]> instance = new MemorySafeWindowTinyLFUMap<>(1, 1024); instance.put(String.valueOf(1), new Byte[mb]); } Set<WeakReference<MemorySafeWindowTinyLFUMap<?, ?>>> all = (Set<WeakReference<MemorySafeWindowTinyLFUMap<?, ?>>>) ReflectUtils.getFieldValue(new MemorySafeWindowTinyLFUMap(1, 1024), "ALL"); Assert.assertNotEquals(1000, all.size()); }
public static RequestContext getContext() { return CONTEXT_HOLDER.get(); }
@Test void testGetContext() { long timestamp = System.currentTimeMillis(); RequestContext requestContext = RequestContextHolder.getContext(); assertNotNull(requestContext); assertNotNull(requestContext.getRequestId()); assertTrue(requestContext.getRequestTimestamp() >= timestamp); assertNotNull(requestContext.getBasicContext()); assertNotNull(requestContext.getEngineContext()); assertNotNull(requestContext.getAuthContext()); }
public synchronized void setLevel(Level newLevel) { if (level == newLevel) { // nothing to do; return; } if (newLevel == null && isRootLogger()) { throw new IllegalArgumentException( "The level of the root logger cannot be set to null"); } level = newLevel; if (newLevel == null) { effectiveLevelInt = parent.effectiveLevelInt; newLevel = parent.getEffectiveLevel(); } else { effectiveLevelInt = newLevel.levelInt; } if (childrenList != null) { int len = childrenList.size(); for (int i = 0; i < len; i++) { Logger child = (Logger) childrenList.get(i); // tell child to handle parent levelInt change child.handleParentLevelChange(effectiveLevelInt); } } // inform listeners loggerContext.fireOnLevelChange(this, newLevel); }
@Test public void testEnabledX_Off() throws Exception { root.setLevel(Level.OFF); checkLevelThreshold(loggerTest, Level.OFF); }
public void run() throws Exception { if (nodeId < 0) { throw new RuntimeException("You must specify a valid non-negative node ID."); } if (clusterId == null) { throw new FormatterException("You must specify the cluster id."); } if (directories.isEmpty()) { throw new FormatterException("You must specify at least one directory to format"); } if (controllerListenerName == null) { throw new FormatterException("You must specify the name of the initial controller listener."); } if (metadataLogDirectory == null) { throw new FormatterException("You must specify the metadata log directory."); } if (!directories.contains(metadataLogDirectory)) { throw new FormatterException("The specified metadata log directory, " + metadataLogDirectory + " was not one of the given directories: " + directories); } releaseVersion = calculateEffectiveReleaseVersion(); featureLevels = calculateEffectiveFeatureLevels(); this.bootstrapMetadata = calculateBootstrapMetadata(); doFormat(bootstrapMetadata); }
@Test public void testFormattingCreatesLogDirId() throws Exception { try (TestEnv testEnv = new TestEnv(1)) { FormatterContext formatter1 = testEnv.newFormatter(); formatter1.formatter.run(); MetaPropertiesEnsemble ensemble = new MetaPropertiesEnsemble.Loader(). addLogDirs(testEnv.directories). load(); MetaProperties logDirProps = ensemble.logDirProps().get(testEnv.directory(0)); assertNotNull(logDirProps); assertTrue(logDirProps.directoryId().isPresent()); } }
@Override public ResultSet executeQuery(String sql) throws SQLException { validateState(); try { _resultSetGroup = _session.execute(DriverUtils.enableQueryOptions(sql, _connection.getQueryOptions())); if (_resultSetGroup.getResultSetCount() == 0) { _resultSet = PinotResultSet.empty(); return _resultSet; } _resultSet = new PinotResultSet(_resultSetGroup.getResultSet(0)); return _resultSet; } catch (PinotClientException e) { throw new SQLException(String.format("Failed to execute query : %s", sql), e); } }
@Test public void testSetEnableNullHandling2() throws Exception { Properties props = new Properties(); props.put(QueryOptionKey.ENABLE_NULL_HANDLING, "true"); PinotConnection pinotConnection = new PinotConnection(props, "dummy", _dummyPinotClientTransport, "dummy", _dummyPinotControllerTransport); PreparedStatement preparedStatement = pinotConnection.prepareStatement(""); preparedStatement.executeQuery(BASIC_TEST_QUERY); String expectedSql = DriverUtils.createSetQueryOptionString(QueryOptionKey.ENABLE_NULL_HANDLING, true) + BASIC_TEST_QUERY; Assert.assertEquals(_dummyPinotClientTransport.getLastQuery().substring(0, expectedSql.length()), expectedSql); }
public static String clientTagPrefix(final String clientTagKey) { return CLIENT_TAG_PREFIX + clientTagKey; }
@Test public void shouldSetRackAwareAssignmentTags() { props.put(StreamsConfig.clientTagPrefix("cluster"), "cluster-1"); props.put(StreamsConfig.clientTagPrefix("zone"), "eu-central-1a"); props.put(StreamsConfig.RACK_AWARE_ASSIGNMENT_TAGS_CONFIG, "cluster,zone"); final StreamsConfig config = new StreamsConfig(props); assertEquals(new HashSet<>(config.getList(StreamsConfig.RACK_AWARE_ASSIGNMENT_TAGS_CONFIG)), mkSet("cluster", "zone")); }
public static boolean test(byte[] bloomBytes, byte[]... topics) { Bloom bloom = new Bloom(bloomBytes); if (topics == null) { throw new IllegalArgumentException("topics can not be null"); } for (byte[] topic : topics) { if (!bloom.test(topic)) { return false; } } return true; }
@Test public void testEthereumSampleLogsWithItsLogsBloomToReturnTrueForAllTopics() { Bloom bloom = new Bloom(ethereumSampleLogsBloom); for (String topic : ethereumSampleLogs) { boolean result = bloom.test(topic); assertTrue(result, "must return true"); } }
@Override protected void reportTracedLeak(String resourceType, String records) { super.reportTracedLeak(resourceType, records); leakCounter.incrementAndGet(); resetReportedLeaks(); }
@Test void test() { leakDetector.reportTracedLeak("test", "test"); assertEquals(1, leakDetector.leakCounter.get()); leakDetector.reportTracedLeak("test", "test"); assertEquals(2, leakDetector.leakCounter.get()); leakDetector.reportTracedLeak("test", "test"); assertEquals(3, leakDetector.leakCounter.get()); }
boolean isAll() { return (from == null || (from == AbstractIndex.NULL && isFromInclusive())) && (to == null); }
@Test void isAll() { assertFalse(pointer(singleton(5)).isAll()); assertFalse(IS_NULL.isAll()); assertFalse(IS_NOT_NULL.isAll()); assertTrue(ALL.isAll()); assertTrue(ALL_ALT.isAll()); }
@Override public List<Object> getUpdateValues() { if (ast.getTop() != null) { //deal with top sql dealTop(ast); } List<SQLUpdateSetItem> updateSetItems = ast.getItems(); List<Object> list = new ArrayList<>(updateSetItems.size()); for (SQLUpdateSetItem updateSetItem : updateSetItems) { SQLExpr expr = updateSetItem.getValue(); if (expr instanceof SQLNullExpr) { list.add(Null.get()); } else if (expr instanceof SQLValuableExpr) { list.add(((SQLValuableExpr) expr).getValue()); } else if (expr instanceof SQLVariantRefExpr) { //add '?' list.add(((SQLVariantRefExpr) expr).getName()); } else if (expr instanceof SQLMethodInvokeExpr) { list.add(SqlMethodExpr.get()); } else if (expr instanceof SQLDefaultExpr) { list.add(SqlDefaultExpr.get()); } else if (expr instanceof SQLSequenceExpr) { //Supported only since 2012 version of SQL Server,use next value for SQLSequenceExpr sequenceExpr = (SQLSequenceExpr) expr; String sequence = sequenceExpr.getSequence().getSimpleName(); String function = sequenceExpr.getFunction().name; list.add(new SqlSequenceExpr(sequence, function)); } else { wrapSQLParsingException(expr); } } return list; }
@Test public void testGetUpdateValues() { // test with normal String sql = "update t set a = ?, b = ?, c = ?"; SQLStatement sqlStatement = getSQLStatement(sql); SqlServerUpdateRecognizer recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement); List<Object> updateValues = recognizer.getUpdateValues(); Assertions.assertEquals(updateValues.size(), 3); // test with values sql = "update t set a = 1, b = 2, c = 3"; sqlStatement = getSQLStatement(sql); recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement); updateValues = recognizer.getUpdateValues(); Assertions.assertEquals(updateValues.size(), 3); // test with default、method、NULL sql = "update t set a = default, b = now(), c = null"; sqlStatement = getSQLStatement(sql); recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement); updateValues = recognizer.getUpdateValues(); Assertions.assertEquals(updateValues.size(), 3); // test with sequence sql = "update t set a = next value for t1.id"; sqlStatement = getSQLStatement(sql); recognizer = new SqlServerUpdateRecognizer(sql, sqlStatement); updateValues = recognizer.getUpdateValues(); Assertions.assertEquals(updateValues.size(), 1); //test with top Assertions.assertThrows(NotSupportYetException.class, () -> { String s = "update top(1) t set a = ?"; SQLStatement sqlStatement1 = getSQLStatement(s); SqlServerUpdateRecognizer sqlServerUpdateRecognizer = new SqlServerUpdateRecognizer(s, sqlStatement1); sqlServerUpdateRecognizer.getUpdateValues(); }); // test with error Assertions.assertThrows(SQLParsingException.class, () -> { String s = "update t set a = ?"; SQLStatement sqlStatement1 = getSQLStatement(s); SQLUpdateStatement sqlUpdateStatement = (SQLUpdateStatement) sqlStatement1; List<SQLUpdateSetItem> updateSetItems = sqlUpdateStatement.getItems(); for (SQLUpdateSetItem updateSetItem : updateSetItems) { updateSetItem.setValue(new MySqlOrderingExpr()); } SqlServerUpdateRecognizer sqlServerUpdateRecognizer = new SqlServerUpdateRecognizer(s, sqlUpdateStatement); sqlServerUpdateRecognizer.getUpdateValues(); }); }
@Override public boolean equals(final Object o) { if(this == o) { return true; } if(o == null || getClass() != o.getClass()) { return false; } final LoginOptions that = (LoginOptions) o; return user == that.user && password == that.password && keychain == that.keychain && publickey == that.publickey && anonymous == that.anonymous && Objects.equals(icon, that.icon); }
@Test public void testEquals() { assertEquals(new LoginOptions(), new LoginOptions()); final LoginOptions a = new LoginOptions(); a.keychain = false; final LoginOptions b = new LoginOptions(); b.keychain = true; assertNotEquals(a, b); }
public static Optional<DatabaseAdminExecutor> tryGetSystemVariableQueryExecutor(final SelectStatement selectStatement) { Collection<ProjectionSegment> projections = selectStatement.getProjections().getProjections(); List<ExpressionProjectionSegment> expressionProjectionSegments = new ArrayList<>(projections.size()); List<MySQLSystemVariable> variables = new ArrayList<>(projections.size()); for (ProjectionSegment each : projections) { if (!(each instanceof ExpressionProjectionSegment)) { return Optional.empty(); } ExpressionProjectionSegment expression = (ExpressionProjectionSegment) each; if (!(expression.getExpr() instanceof VariableSegment)) { return Optional.empty(); } expressionProjectionSegments.add(expression); VariableSegment variable = (VariableSegment) expression.getExpr(); Optional<MySQLSystemVariable> systemVariable = MySQLSystemVariable.findSystemVariable(variable.getVariable()); if (!systemVariable.isPresent()) { return Optional.empty(); } variables.add(systemVariable.get()); } return Optional.of(new MySQLSystemVariableQueryExecutor(expressionProjectionSegments, variables)); }
@Test void assertTryGetSystemVariableQueryExecutorWithOtherExpressionProjection() { MySQLSelectStatement selectStatement = new MySQLSelectStatement(); selectStatement.setProjections(new ProjectionsSegment(0, 0)); VariableSegment variable = new VariableSegment(0, 0, "max_connections"); variable.setScope("session"); selectStatement.getProjections().getProjections().add(new ExpressionProjectionSegment(0, 0, "@@session.max_connections", variable)); selectStatement.getProjections().getProjections().add(new ColumnProjectionSegment(new ColumnSegment(0, 0, new IdentifierValue("some_column")))); assertFalse(MySQLSystemVariableQueryExecutor.tryGetSystemVariableQueryExecutor(selectStatement).isPresent()); }
@Override public KTable<K, V> filterNot(final Predicate<? super K, ? super V> predicate) { Objects.requireNonNull(predicate, "predicate can't be null"); return doFilter(predicate, NamedInternal.empty(), null, true); }
@Test public void shouldThrowNullPointerOnFilterNotWhenMaterializedIsNull() { assertThrows(NullPointerException.class, () -> table.filterNot((key, value) -> false, (Materialized) null)); }
public static List<String> findPlaceholder(String sql) { Matcher matcher = pattern.matcher(sql); List<String> list = new ArrayList<>(); while (matcher.find()) { list.add(matcher.group()); } return list; }
@Test void m1() { List<String> list = SqlUtils.findPlaceholder("select {@table},{@table:t},{@table:t:r} from table"); assertThat(list).contains("{@table}", "{@table:t}", "{@table:t:r}"); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullMaterializedOnTwoOptionAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, (Materialized<String, String, WindowStore<Bytes, byte[]>>) null)); }