focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public V get(K key) { checkState(!destroyed, destroyedMessage); checkNotNull(key, ERROR_NULL_KEY); MapValue<V> value = items.get(key); return (value == null || value.isTombstone()) ? null : value.get(); }
@Test public void testGet() throws Exception { expectPeerMessage(clusterCommunicator); CountDownLatch latch; // Local put assertNull(ecMap.get(KEY1)); ecMap.put(KEY1, VALUE1); assertEquals(VALUE1, ecMap.get(KEY1)); // Remote put List<UpdateEntry<String, String>> message = ImmutableList.of(generatePutMessage(KEY2, VALUE2, clockService.getTimestamp(KEY2, VALUE2))); // Create a latch so we know when the put operation has finished latch = new CountDownLatch(1); ecMap.addListener(new TestListener(latch)); assertNull(ecMap.get(KEY2)); updateHandler.accept(message); assertTrue("External listener never got notified of internal event", latch.await(100, TimeUnit.MILLISECONDS)); assertEquals(VALUE2, ecMap.get(KEY2)); // Local remove ecMap.remove(KEY2); assertNull(ecMap.get(KEY2)); // Remote remove message = ImmutableList.of(generateRemoveMessage(KEY1, clockService.getTimestamp(KEY1, VALUE1))); // Create a latch so we know when the remove operation has finished latch = new CountDownLatch(1); ecMap.addListener(new TestListener(latch)); updateHandler.accept(message); assertTrue("External listener never got notified of internal event", latch.await(100, TimeUnit.MILLISECONDS)); assertNull(ecMap.get(KEY1)); }
public abstract void innerReceive(Properties properties);
@Test void testInnerReceive() { final Deque<Properties> q2 = new ArrayDeque<Properties>(); PropertiesListener a = new PropertiesListener() { @Override public void innerReceive(Properties properties) { q2.offer(properties); } }; Properties input = new Properties(); input.put("foo", "bar"); a.innerReceive(input); final Properties actual = q2.poll(); assertEquals(1, actual.size()); assertEquals("bar", actual.getProperty("foo")); }
@VisibleForTesting public long getPurgeIntervalNanos() { return this.purgeIntervalNanos; }
@Test (timeout=300000) public void testPurgeIntervalNanosConf() throws Exception { Configuration conf = new Configuration(); conf.setInt(CommonConfigurationKeysPublic. IPC_SERVER_PURGE_INTERVAL_MINUTES_KEY, 3); Server server = new Server("0.0.0.0", 0, LongWritable.class, 1, conf) { @Override public Writable call( RPC.RpcKind rpcKind, String protocol, Writable param, long receiveTime) throws Exception { return null; } }; long purgeInterval = TimeUnit.NANOSECONDS.convert(3, TimeUnit.MINUTES); assertEquals(server.getPurgeIntervalNanos(), purgeInterval); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void testPathValidatorOverride() { String[] args = new String[] { "--runner=DataflowRunner", "--region=some-region-1", "--tempLocation=/tmp/testing", "--project=test-project", "--credentialFactoryClass=" + NoopCredentialFactory.class.getName(), "--pathValidatorClass=" + NoopPathValidator.class.getName(), }; // Should not crash, because gcpTempLocation should get set from tempLocation TestPipeline.fromOptions(PipelineOptionsFactory.fromArgs(args).create()); }
@NonNull @Override public String getId() { return ID; }
@Test public void getRepositoriesWithCredentialId() throws IOException, UnirestException { String credentialId = createCredential(BitbucketServerScm.ID); Map repoResp = new RequestBuilder(baseUrl) .crumb(crumb) .status(200) .jwtToken(getJwtToken(j.jenkins, authenticatedUser.getId(), authenticatedUser.getId())) .post("/organizations/jenkins/scm/"+BitbucketServerScm.ID+"/organizations/TESTP/repositories/?apiUrl="+apiUrl+"&credentialId="+credentialId) .build(Map.class); List repos = (List) ((Map)repoResp.get("repositories")).get("items"); assertEquals(2, repos.size()); assertEquals("empty-repo-test", ((Map)repos.get(0)).get("name")); assertEquals("empty-repo-test", ((Map)repos.get(0)).get("description")); assertTrue((Boolean) ((Map)repos.get(0)).get("private")); assertNull(((Map)repos.get(0)).get("defaultBranch")); assertEquals("pipeline-demo-test", ((Map)repos.get(1)).get("name")); assertEquals("pipeline-demo-test", ((Map)repos.get(1)).get("description")); assertTrue((Boolean) ((Map)repos.get(1)).get("private")); assertEquals("master",((Map)repos.get(1)).get("defaultBranch")); }
@Override public Result analyze() { checkState(!analyzed); analyzed = true; Result result = analyzeIsFinal(); if (result != null && result != Result.OK) return result; return analyzeIsStandard(); }
@Test public void standardOutputs() { Transaction tx = new Transaction(); tx.addInput(MAINNET.getGenesisBlock().getTransactions().get(0).getOutput(0)); // A pay to address output tx.addOutput(Coin.CENT, ScriptBuilder.createP2PKHOutputScript(key1)); // A P2PK output tx.addOutput(Coin.CENT, ScriptBuilder.createP2PKOutputScript(key1)); tx.addOutput(Coin.CENT, ScriptBuilder.createP2PKOutputScript(key1)); // 1-of-2 multisig output. List<ECKey> keys = Arrays.asList(key1, new ECKey()); tx.addOutput(Coin.CENT, ScriptBuilder.createMultiSigOutputScript(1, keys)); // 2-of-2 multisig output. tx.addOutput(Coin.CENT, ScriptBuilder.createMultiSigOutputScript(2, keys)); // P2SH tx.addOutput(Coin.CENT, ScriptBuilder.createP2SHOutputScript(1, keys)); // OP_RETURN tx.addOutput(Coin.CENT, ScriptBuilder.createOpReturnScript("hi there".getBytes())); assertEquals(RiskAnalysis.Result.OK, DefaultRiskAnalysis.FACTORY.create(wallet, tx, NO_DEPS).analyze()); }
@Override public List<RegisteredMigrationStep> readFrom(long migrationNumber) { validate(migrationNumber); int startingIndex = lookupIndexOfClosestTo(migrationNumber); if (startingIndex < 0) { return Collections.emptyList(); } return steps.subList(startingIndex, steps.size()); }
@Test public void readFrom_throws_IAE_if_number_is_less_than_0() { assertThatThrownBy(() -> underTest.readFrom(-1)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Migration number must be >= 0"); }
public static Long getHeartbeatIntervalMs() { String interval = SentinelConfig.getConfig(HEARTBEAT_INTERVAL_MS); try { return interval == null ? null : Long.parseLong(interval); } catch (Exception ex) { RecordLog.warn("[TransportConfig] Failed to parse heartbeat interval: " + interval); return null; } }
@Test public void testGetHeartbeatInterval() { long interval = 20000; assertNull(TransportConfig.getHeartbeatIntervalMs()); // Set valid interval. SentinelConfig.setConfig(TransportConfig.HEARTBEAT_INTERVAL_MS, String.valueOf(interval)); assertEquals(new Long(interval), TransportConfig.getHeartbeatIntervalMs()); // Set invalid interval. SentinelConfig.setConfig(TransportConfig.HEARTBEAT_INTERVAL_MS, "Sentinel"); assertNull(TransportConfig.getHeartbeatIntervalMs()); }
@Override public int run(String[] args) throws Exception { if (args.length != 2) { return usage(args); } String action = args[0]; String name = args[1]; int result; if (A_LOAD.equals(action)) { result = loadClass(name); } else if (A_CREATE.equals(action)) { //first load to separate load errors from create result = loadClass(name); if (result == SUCCESS) { //class loads, so instantiate it result = createClassInstance(name); } } else if (A_RESOURCE.equals(action)) { result = loadResource(name); } else if (A_PRINTRESOURCE.equals(action)) { result = dumpResource(name); } else { result = usage(args); } return result; }
@Test public void testLoadPrivateClass() throws Throwable { run(FindClass.SUCCESS, FindClass.A_LOAD, "org.apache.hadoop.util.TestFindClass$PrivateClass"); }
public static <E> Set<E> createLinkedHashSet(int expectedMapSize) { final int initialCapacity = (int) (expectedMapSize / HASHSET_DEFAULT_LOAD_FACTOR) + 1; return new LinkedHashSet<>(initialCapacity, HASHSET_DEFAULT_LOAD_FACTOR); }
@Test public void testCreatedLinkedHashSet() { Set set = createLinkedHashSet(5); assertInstanceOf(LinkedHashSet.class, set); }
public static AWSCredentialsProvider createAwsCredentialsProvider( UnderFileSystemConfiguration conf) { // Set the aws credential system properties based on Alluxio properties, if they are set; // otherwise, use the default credential provider. if (conf.isSet(PropertyKey.S3A_ACCESS_KEY) && conf.isSet(PropertyKey.S3A_SECRET_KEY)) { return new AWSStaticCredentialsProvider(new BasicAWSCredentials( conf.getString(PropertyKey.S3A_ACCESS_KEY), conf.getString(PropertyKey.S3A_SECRET_KEY))); } // Checks, in order, env variables, system properties, profile file, and instance profile. return new DefaultAWSCredentialsProviderChain(); }
@Test public void createCredentialsFromDefault() throws Exception { // Unset AWS properties if present Map<PropertyKey, Object> conf = new HashMap<>(); conf.put(PropertyKey.S3A_ACCESS_KEY, null); conf.put(PropertyKey.S3A_SECRET_KEY, null); try (Closeable c = new ConfigurationRule(conf, CONF).toResource()) { UnderFileSystemConfiguration ufsConf = UnderFileSystemConfiguration.defaults(CONF); AWSCredentialsProvider credentialsProvider = S3AUnderFileSystem.createAwsCredentialsProvider(ufsConf); Assert.assertTrue(credentialsProvider instanceof DefaultAWSCredentialsProviderChain); } }
public static CustomWeighting.Parameters createWeightingParameters(CustomModel customModel, EncodedValueLookup lookup) { String key = customModel.toString(); Class<?> clazz = customModel.isInternal() ? INTERNAL_CACHE.get(key) : null; if (CACHE_SIZE > 0 && clazz == null) clazz = CACHE.get(key); if (clazz == null) { clazz = createClazz(customModel, lookup); if (customModel.isInternal()) { INTERNAL_CACHE.put(key, clazz); if (INTERNAL_CACHE.size() > 100) { CACHE.putAll(INTERNAL_CACHE); INTERNAL_CACHE.clear(); LoggerFactory.getLogger(CustomModelParser.class).warn("Internal cache must stay small but was " + INTERNAL_CACHE.size() + ". Cleared it. Misuse of CustomModel::internal?"); } } else if (CACHE_SIZE > 0) { CACHE.put(key, clazz); } } try { // The class does not need to be thread-safe as we create an instance per request CustomWeightingHelper prio = (CustomWeightingHelper) clazz.getDeclaredConstructor().newInstance(); prio.init(customModel, lookup, CustomModel.getAreasAsMap(customModel.getAreas())); return new CustomWeighting.Parameters( prio::getSpeed, prio::calcMaxSpeed, prio::getPriority, prio::calcMaxPriority, customModel.getDistanceInfluence() == null ? 0 : customModel.getDistanceInfluence(), customModel.getHeadingPenalty() == null ? Parameters.Routing.DEFAULT_HEADING_PENALTY : customModel.getHeadingPenalty()); } catch (ReflectiveOperationException ex) { throw new IllegalArgumentException("Cannot compile expression " + ex.getMessage(), ex); } }
@Test public void parseBlock() { DecimalEncodedValue maxSpeedEnc = encodingManager.getDecimalEncodedValue(MaxSpeed.KEY); EdgeIteratorState edge60 = graph.edge(0, 1).setDistance(10). set(maxSpeedEnc, 60).set(avgSpeedEnc, 70).set(accessEnc, true, true); EdgeIteratorState edge70 = graph.edge(1, 2).setDistance(10). set(maxSpeedEnc, 70).set(avgSpeedEnc, 70).set(accessEnc, true, true); CustomModel customModel = new CustomModel(); customModel.addToSpeed(If("true", LIMIT, "200")); customModel.addToSpeed(If("max_speed > 65", List.of(If("true", LIMIT, "65")))); CustomWeighting.EdgeToDoubleMapping speedMapping = CustomModelParser.createWeightingParameters(customModel, encodingManager). getEdgeToSpeedMapping(); assertEquals(65.0, speedMapping.get(edge70, false), 0.01); assertEquals(200.0, speedMapping.get(edge60, false), 0.01); }
@Override public void configure(final Map<String, ?> config) { configure( config, new Options(), org.rocksdb.LRUCache::new, org.rocksdb.WriteBufferManager::new ); }
@Test public void shouldUseStrictCacheIfConfigured() { // When: CONFIG_PROPS.put(KsqlBoundedMemoryRocksDBConfig.STRICT_CACHE_LIMIT, true); KsqlBoundedMemoryRocksDBConfigSetter.configure( CONFIG_PROPS, rocksOptions, cacheFactory, bufferManagerFactory ); // Then: verify(cacheFactory).create(anyLong(), anyInt(), eq(true), anyDouble()); }
@Override public void getConfig(ZookeeperServerConfig.Builder builder) { ConfigServer[] configServers = getConfigServers(); int[] zookeeperIds = getConfigServerZookeeperIds(); if (configServers.length != zookeeperIds.length) { throw new IllegalArgumentException(String.format("Number of provided config server hosts (%d) must be the " + "same as number of provided config server zookeeper ids (%d)", configServers.length, zookeeperIds.length)); } String myhostname = HostName.getLocalhost(); // TODO: Server index should be in interval [1, 254] according to doc, // however, we cannot change this id for an existing server for (int i = 0; i < configServers.length; i++) { if (zookeeperIds[i] < 0) { throw new IllegalArgumentException(String.format("Zookeeper ids cannot be negative, was %d for %s", zookeeperIds[i], configServers[i].hostName)); } if (configServers[i].hostName.equals(myhostname)) { builder.myid(zookeeperIds[i]); } builder.server(getZkServer(configServers[i], zookeeperIds[i])); } if (options.zookeeperClientPort().isPresent()) { builder.clientPort(options.zookeeperClientPort().get()); } if (options.hostedVespa().orElse(false)) { builder.vespaTlsConfigFile(Defaults.getDefaults().underVespaHome("var/zookeeper/conf/tls.conf.json")); } boolean isHostedVespa = options.hostedVespa().orElse(false); builder.dynamicReconfiguration(isHostedVespa); builder.reconfigureEnsemble(!isHostedVespa); builder.snapshotMethod(options.zooKeeperSnapshotMethod()); builder.juteMaxBuffer(options.zookeeperJuteMaxBuffer()); }
@Test void zookeeperConfig_with_config_servers_and_zk_ids_hosted() { TestOptions testOptions = createTestOptions(List.of("cfg1", "localhost", "cfg3"), List.of(4, 2, 3)); ZookeeperServerConfig config = getConfig(ZookeeperServerConfig.class, testOptions); assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::hostname, "cfg1", "localhost", "cfg3"); assertZookeeperServerProperty(config.server(), ZookeeperServerConfig.Server::id, 4, 2, 3); assertEquals(2, config.myid()); assertEquals("/opt/vespa/var/zookeeper/conf/tls.conf.json", config.vespaTlsConfigFile()); }
static <E extends Enum<E>> EnumSet<E> parseEventCodes( final Class<E> eventCodeType, final String eventCodes, final Map<String, EnumSet<E>> specialEvents, final IntFunction<E> eventCodeById, final Function<String, E> eventCodeByName) { if (Strings.isEmpty(eventCodes)) { return EnumSet.noneOf(eventCodeType); } final EnumSet<E> eventCodeSet = EnumSet.noneOf(eventCodeType); final String[] codeIds = eventCodes.split(","); for (final String codeId : codeIds) { final EnumSet<E> specialCodes = specialEvents.get(codeId); if (null != specialCodes) { eventCodeSet.addAll(specialCodes); } else { E code = null; try { code = eventCodeByName.apply(codeId); } catch (final IllegalArgumentException ignore) { } if (null == code) { try { code = eventCodeById.apply(Integer.parseInt(codeId)); } catch (final IllegalArgumentException ignore) { } } if (null != code) { eventCodeSet.add(code); } else { err.println("unknown event code: " + codeId); } } } return eventCodeSet; }
@Test void nullValueMeansNoEventsEnabled() { final EnumSet<TestEvent> parsedEvents = parseEventCodes( TestEvent.class, null, Collections.emptyMap(), (i) -> TestEvent.values()[i], TestEvent::valueOf); assertEquals(EnumSet.noneOf(TestEvent.class), parsedEvents); }
public static ApiVersionCollection filterApis( RecordVersion minRecordVersion, ApiMessageType.ListenerType listenerType, boolean enableUnstableLastVersion, boolean clientTelemetryEnabled ) { ApiVersionCollection apiKeys = new ApiVersionCollection(); for (ApiKeys apiKey : ApiKeys.apisForListener(listenerType)) { // Skip telemetry APIs if client telemetry is disabled. if ((apiKey == ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS || apiKey == ApiKeys.PUSH_TELEMETRY) && !clientTelemetryEnabled) continue; if (apiKey.minRequiredInterBrokerMagic <= minRecordVersion.value) { apiKey.toApiVersion(enableUnstableLastVersion).ifPresent(apiKeys::add); } } return apiKeys; }
@Test public void shouldCreateApiResponseWithTelemetryWhenEnabled() { ApiVersionsResponse response = new ApiVersionsResponse.Builder(). setThrottleTimeMs(10). setApiVersions(ApiVersionsResponse.filterApis( RecordVersion.V1, ListenerType.BROKER, true, true)). setSupportedFeatures(Features.emptySupportedFeatures()). setFinalizedFeatures(Collections.emptyMap()). setFinalizedFeaturesEpoch(ApiVersionsResponse.UNKNOWN_FINALIZED_FEATURES_EPOCH). build(); verifyApiKeysForTelemetry(response, 2); }
public Long value() { return value; }
@Test void testSetValue() { LongNode n = new LongNode(); assertFalse(n.setValue("invalid")); assertTrue(n.setValue("10")); assertEquals(10L, n.value().longValue()); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { try { session.sftp().rename(file.getAbsolute(), renamed.getAbsolute(), status.isExists() ? new HashSet<>(Arrays.asList(RenameFlags.OVERWRITE, RenameFlags.NATIVE)) : Collections.singleton(RenameFlags.NATIVE)); // Copy original file attributes return renamed.withAttributes(file.attributes()); } catch(IOException e) { throw new SFTPExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMoveOverride() throws Exception { final Path workdir = new SFTPHomeDirectoryService(session).find(); final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new SFTPTouchFeature(session).touch(test, new TransferStatus()); final Path target = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new SFTPTouchFeature(session).touch(target, new TransferStatus()); assertThrows(ConflictException.class, () -> new SFTPMoveFeature(session).move(test, target, new TransferStatus().exists(false), new Delete.DisabledCallback(), new DisabledConnectionCallback())); new SFTPMoveFeature(session).move(test, target, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new SFTPFindFeature(session).find(test)); assertTrue(new SFTPFindFeature(session).find(target)); new SFTPDeleteFeature(session).delete(Collections.<Path>singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public boolean checkAndSetSupportPartitionInformation(Connection connection) { String catalogSchema = "information_schema"; String partitionInfoTable = "partitions"; // Different types of MySQL protocol databases have different case names for schema and table names, // which need to be converted to lowercase for comparison try (ResultSet catalogSet = connection.getMetaData().getCatalogs()) { while (catalogSet.next()) { String schemaName = catalogSet.getString("TABLE_CAT"); if (schemaName.equalsIgnoreCase(catalogSchema)) { try (ResultSet tableSet = connection.getMetaData().getTables(catalogSchema, null, null, null)) { while (tableSet.next()) { String tableName = tableSet.getString("TABLE_NAME"); if (tableName.equalsIgnoreCase(partitionInfoTable)) { return this.supportPartitionInformation = true; } } } } } } catch (SQLException e) { throw new StarRocksConnectorException(e.getMessage()); } return this.supportPartitionInformation = false; }
@Test public void testCheckPartitionWithPartitionsTable() throws SQLException { new Expectations() { { String catalogSchema = "information_schema"; dbResult = new MockResultSet("catalog"); dbResult.addColumn("TABLE_CAT", Arrays.asList(catalogSchema)); connection.getMetaData().getCatalogs(); result = dbResult; minTimes = 0; MockResultSet piResult = new MockResultSet("partitions"); piResult.addColumn("TABLE_NAME", Arrays.asList("partitions")); connection.getMetaData().getTables(anyString, null, null, null); result = piResult; minTimes = 0; } }; try { JDBCSchemaResolver schemaResolver = new MysqlSchemaResolver(); Assert.assertTrue(schemaResolver.checkAndSetSupportPartitionInformation(connection)); } catch (Exception e) { System.out.println(e.getMessage()); Assert.fail(); } }
@Override public void metricChange(final KafkaMetric metric) { if (!metric.metricName().name().equals("total-sst-files-size")) { return; } handleNewSstFilesSizeMetric( metric, metric.metricName().tags().getOrDefault(TASK_ID_TAG, ""), getQueryId(metric) ); }
@Test public void shouldCombineStorageMetricsToTaskMetric() { // When: listener.metricChange(mockMetric( KAFKA_METRIC_GROUP, KAFKA_METRIC_NAME, BigInteger.valueOf(2), ImmutableMap.of("store-id", "s1", "task-id", "t1", "thread-id", TRANSIENT_THREAD_ID)) ); listener.metricChange(mockMetric( KAFKA_METRIC_GROUP, KAFKA_METRIC_NAME, BigInteger.valueOf(5), ImmutableMap.of("store-id", "s2", "task-id", "t1", "thread-id", TRANSIENT_THREAD_ID)) ); // Then: final Gauge<?> taskGauge = verifyAndGetRegisteredMetric(TASK_STORAGE_METRIC, ImmutableMap.of("task-id", "t1", "query-id", "blahblah_4", "logical_cluster_id", "logical-id")); final Object taskValue = taskGauge.value(null, 0); assertThat(taskValue, equalTo(BigInteger.valueOf(7))); }
public String getURI() { COSBase base = action.getDictionaryObject(COSName.URI); if (base instanceof COSString) { byte[] bytes = ((COSString) base).getBytes(); if (bytes.length >= 2) { // UTF-16 (BE) if ((bytes[0] & 0xFF) == 0xFE && (bytes[1] & 0xFF) == 0xFF) { return action.getString(COSName.URI); } // UTF-16 (LE) if ((bytes[0] & 0xFF) == 0xFF && (bytes[1] & 0xFF) == 0xFE) { return action.getString(COSName.URI); } } return new String(bytes, StandardCharsets.UTF_8); } return null; }
@Test void testUTF16BEURI() throws IOException { PDActionURI actionURI = new PDActionURI(); // found in govdocs file 534948.pdf COSString utf16URI = COSString.parseHex("FEFF0068007400740070003A002F002F00770077" + "0077002E006E00610070002E006500640075002F0063006100740061006C006F006700" + "2F00310031003100340030002E00680074006D006C"); actionURI.getCOSObject().setItem(COSName.URI, utf16URI); assertEquals("http://www.nap.edu/catalog/11140.html", actionURI.getURI()); }
@Override public void createTable(ObjectPath tablePath, CatalogBaseTable catalogTable, boolean ignoreIfExists) throws TableAlreadyExistException, DatabaseNotExistException, CatalogException { if (!databaseExists(tablePath.getDatabaseName())) { throw new DatabaseNotExistException(getName(), tablePath.getDatabaseName()); } if (tableExists(tablePath)) { if (ignoreIfExists) { return; } else { throw new TableAlreadyExistException(getName(), tablePath); } } if (catalogTable instanceof CatalogView) { throw new UnsupportedOperationException( "Hudi catalog doesn't support to CREATE VIEW."); } ResolvedCatalogTable resolvedTable = (ResolvedCatalogTable) catalogTable; final String tablePathStr = inferTablePath(catalogPathStr, tablePath); Map<String, String> options = applyOptionsHook(tablePathStr, catalogTable.getOptions()); Configuration conf = Configuration.fromMap(options); conf.setString(FlinkOptions.PATH, tablePathStr); ResolvedSchema resolvedSchema = resolvedTable.getResolvedSchema(); if (!resolvedSchema.getPrimaryKey().isPresent() && !conf.containsKey(RECORD_KEY_FIELD.key())) { throw new CatalogException("Primary key definition is missing"); } final String avroSchema = AvroSchemaConverter.convertToSchema( resolvedSchema.toPhysicalRowDataType().getLogicalType(), AvroSchemaUtils.getAvroRecordQualifiedName(tablePath.getObjectName())).toString(); conf.setString(FlinkOptions.SOURCE_AVRO_SCHEMA, avroSchema); // stores two copies of options: // - partition keys // - primary keys // because the HoodieTableMetaClient is a heavy impl, we try to avoid initializing it // when calling #getTable. //set pk if (resolvedSchema.getPrimaryKey().isPresent() && !conf.containsKey(FlinkOptions.RECORD_KEY_FIELD.key())) { final String pkColumns = String.join(",", resolvedSchema.getPrimaryKey().get().getColumns()); conf.setString(RECORD_KEY_FIELD, pkColumns); } if (resolvedSchema.getPrimaryKey().isPresent()) { options.put(TableOptionProperties.PK_CONSTRAINT_NAME, resolvedSchema.getPrimaryKey().get().getName()); } if (conf.containsKey(RECORD_KEY_FIELD.key())) { options.put(TableOptionProperties.PK_COLUMNS, conf.getString(RECORD_KEY_FIELD)); } // check preCombine StreamerUtil.checkPreCombineKey(conf, resolvedSchema.getColumnNames()); if (resolvedTable.isPartitioned()) { final String partitions = String.join(",", resolvedTable.getPartitionKeys()); conf.setString(FlinkOptions.PARTITION_PATH_FIELD, partitions); options.put(TableOptionProperties.PARTITION_COLUMNS, partitions); final String[] pks = conf.getString(FlinkOptions.RECORD_KEY_FIELD).split(","); boolean complexHoodieKey = pks.length > 1 || resolvedTable.getPartitionKeys().size() > 1; StreamerUtil.checkKeygenGenerator(complexHoodieKey, conf); } else { conf.setString(FlinkOptions.KEYGEN_CLASS_NAME.key(), NonpartitionedAvroKeyGenerator.class.getName()); } conf.setString(FlinkOptions.TABLE_NAME, tablePath.getObjectName()); try { StreamerUtil.initTableIfNotExists(conf); // prepare the non-table-options properties if (!StringUtils.isNullOrEmpty(resolvedTable.getComment())) { options.put(TableOptionProperties.COMMENT, resolvedTable.getComment()); } TableOptionProperties.createProperties(tablePathStr, hadoopConf, options); } catch (IOException e) { throw new CatalogException(String.format("Initialize table path %s exception.", tablePathStr), e); } }
@Test public void testCreateTable() throws Exception { ObjectPath tablePath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb1"); // test create table catalog.createTable(tablePath, EXPECTED_CATALOG_TABLE, true); // test table exist assertTrue(catalog.tableExists(tablePath)); // validate the full name of table create schema HoodieTableConfig tableConfig = StreamerUtil.getTableConfig( catalog.getTable(tablePath).getOptions().get(FlinkOptions.PATH.key()), HadoopConfigurations.getHadoopConf(new Configuration())).get(); Option<org.apache.avro.Schema> tableCreateSchema = tableConfig.getTableCreateSchema(); assertTrue(tableCreateSchema.isPresent(), "Table should have been created"); assertThat(tableCreateSchema.get().getFullName(), is("hoodie.tb1.tb1_record")); // test create exist table assertThrows(TableAlreadyExistException.class, () -> catalog.createTable(tablePath, EXPECTED_CATALOG_TABLE, false)); // validate key generator for partitioned table HoodieTableMetaClient metaClient = createMetaClient( new HadoopStorageConfiguration(HadoopConfigurations.getHadoopConf(new Configuration())), catalog.inferTablePath(catalogPathStr, tablePath)); String keyGeneratorClassName = metaClient.getTableConfig().getKeyGeneratorClassName(); assertEquals(keyGeneratorClassName, SimpleAvroKeyGenerator.class.getName()); // validate single key and multiple partition for partitioned table ObjectPath singleKeyMultiplePartitionPath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb_skmp" + System.currentTimeMillis()); final ResolvedCatalogTable singleKeyMultiplePartitionTable = new ResolvedCatalogTable( CatalogTable.of( Schema.newBuilder().fromResolvedSchema(CREATE_TABLE_SCHEMA).build(), "test", Lists.newArrayList("par1", "par2"), EXPECTED_OPTIONS), CREATE_TABLE_SCHEMA ); catalog.createTable(singleKeyMultiplePartitionPath, singleKeyMultiplePartitionTable, false); metaClient = createMetaClient( new HadoopStorageConfiguration(HadoopConfigurations.getHadoopConf(new Configuration())), catalog.inferTablePath(catalogPathStr, singleKeyMultiplePartitionPath)); keyGeneratorClassName = metaClient.getTableConfig().getKeyGeneratorClassName(); assertThat(keyGeneratorClassName, is(ComplexAvroKeyGenerator.class.getName())); // validate multiple key and single partition for partitioned table ObjectPath multipleKeySinglePartitionPath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb_mksp" + System.currentTimeMillis()); final ResolvedCatalogTable multipleKeySinglePartitionTable = new ResolvedCatalogTable( CatalogTable.of( Schema.newBuilder().fromResolvedSchema(CREATE_MULTI_KEY_TABLE_SCHEMA).build(), "test", Lists.newArrayList("par1"), EXPECTED_OPTIONS), CREATE_TABLE_SCHEMA ); catalog.createTable(multipleKeySinglePartitionPath, multipleKeySinglePartitionTable, false); metaClient = createMetaClient( new HadoopStorageConfiguration(HadoopConfigurations.getHadoopConf(new Configuration())), catalog.inferTablePath(catalogPathStr, singleKeyMultiplePartitionPath)); keyGeneratorClassName = metaClient.getTableConfig().getKeyGeneratorClassName(); assertThat(keyGeneratorClassName, is(ComplexAvroKeyGenerator.class.getName())); // validate key generator for non partitioned table ObjectPath nonPartitionPath = new ObjectPath(TEST_DEFAULT_DATABASE, "tb"); final ResolvedCatalogTable nonPartitionCatalogTable = new ResolvedCatalogTable( CatalogTable.of( Schema.newBuilder().fromResolvedSchema(CREATE_TABLE_SCHEMA).build(), "test", new ArrayList<>(), EXPECTED_OPTIONS), CREATE_TABLE_SCHEMA ); catalog.createTable(nonPartitionPath, nonPartitionCatalogTable, false); metaClient = createMetaClient( new HadoopStorageConfiguration(HadoopConfigurations.getHadoopConf(new Configuration())), catalog.inferTablePath(catalogPathStr, nonPartitionPath)); keyGeneratorClassName = metaClient.getTableConfig().getKeyGeneratorClassName(); assertEquals(keyGeneratorClassName, NonpartitionedAvroKeyGenerator.class.getName()); }
@Override public Service service(String uid) { checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_SERVICE_UID); return k8sServiceStore.service(uid); }
@Test public void testGetServiceByUid() { createBasicServices(); assertNotNull("Service did not match", target.service(SERVICE_UID)); assertNull("Service did not match", target.service(UNKNOWN_UID)); }
@Override public void writeTo(Integer entity, ByteBuf target) { if (entity == null || target == null) { return; } target.writeInt(entity); }
@Test public void testWritePingResponseAndParse() { ByteBuf buf = Unpooled.buffer(); PingResponseDataWriter writer = new PingResponseDataWriter(); int small = 120; writer.writeTo(small, buf); assertThat(buf.readableBytes()).isGreaterThanOrEqualTo(4); assertThat(buf.readInt()).isEqualTo(small); int big = Integer.MAX_VALUE; writer.writeTo(big, buf); assertThat(buf.readableBytes()).isGreaterThanOrEqualTo(4); assertThat(buf.readInt()).isEqualTo(big); buf.release(); }
public void setConnectorSteps( StepInterface[] sourceSteps, List<MappingValueRename> valueRenames, String mappingStepname ) { if ( sourceSteps == null ) { throw new IllegalArgumentException( BaseMessages .getString( PKG, "MappingInput.Exception.IllegalArgumentSourceStep" ) ); } if ( valueRenames == null ) { throw new IllegalArgumentException( BaseMessages .getString( PKG, "MappingInput.Exception.IllegalArgumentValueRename" ) ); } if ( sourceSteps.length != 0 ) { if ( mappingStepname == null ) { throw new IllegalArgumentException( BaseMessages .getString( PKG, "MappingInput.Exception.IllegalArgumentStepName" ) ); } } for ( StepInterface sourceStep : sourceSteps ) { // We don't want to add the mapping-to-mapping rowset // if ( !sourceStep.isMapping() ) { // OK, before we leave, make sure there is a rowset that covers the path to this target step. // We need to create a new RowSet and add it to the Input RowSets of the target step // BlockingRowSet rowSet = new BlockingRowSet( getTransMeta().getSizeRowset() ); // This is always a single copy, both for source and target... // rowSet.setThreadNameFromToCopy( sourceStep.getStepname(), 0, mappingStepname, 0 ); // Make sure to connect it to both sides... // sourceStep.addRowSetToOutputRowSets( rowSet ); sourceStep.identifyErrorOutput(); addRowSetToInputRowSets( rowSet ); } } data.valueRenames = valueRenames; data.sourceSteps = sourceSteps; }
@Test public void testSetConnectorSteps() { when( stepMockHelper.transMeta.getSizeRowset() ).thenReturn( 1 ); MappingInputData mappingInputData = new MappingInputData(); MappingInput mappingInput = new MappingInput( stepMockHelper.stepMeta, mappingInputData, 0, stepMockHelper.transMeta, stepMockHelper.trans ); mappingInput.init( stepMockHelper.initStepMetaInterface, mappingInputData ); ValidatorData validatorData = new ValidatorData(); Validator previousStep = new Validator( stepMockHelper.stepMeta, validatorData, 0, stepMockHelper.transMeta, stepMockHelper.trans ); when( stepMockHelper.stepMeta.isDoingErrorHandling() ).thenReturn( true ); StepErrorMeta stepErrorMeta = mock( StepErrorMeta.class ); when( stepErrorMeta.getTargetStep() ).thenReturn( stepMockHelper.stepMeta ); when( stepMockHelper.stepMeta.getName() ).thenReturn( stepName ); when( stepMockHelper.stepMeta.getStepErrorMeta() ).thenReturn( stepErrorMeta ); StepInterface[] si = new StepInterface[] { previousStep }; mappingInput.setConnectorSteps( si, Collections.emptyList(), stepName ); assertEquals( previousStep.getOutputRowSets().size(), 0 ); }
@Override public Mono<Void> execute(final ServerWebExchange exchange, final ShenyuPluginChain chain) { return super.getWasmExtern(EXECUTE_METHOD_NAME).map(execute -> { final Long argumentId = callWASI(exchange, chain, execute); return doExecute(exchange, chain, argumentId); }).orElseGet(() -> { LOG.error("{} function not found in {}", EXECUTE_METHOD_NAME, super.getWasmName()); exchange.getResponse().setStatusCode(HttpStatus.INTERNAL_SERVER_ERROR); Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.WASM_FUNC_NOT_FOUND); return WebFluxResultUtils.result(exchange, error); }); }
@Test public void executePluginTest() { StepVerifier.create(rustWasmPlugin.execute(exchange, shenyuPluginChain)).expectSubscription().verifyComplete(); verify(shenyuPluginChain).execute(exchange); }
@Override public int read() { if (nextChar == UNSET || nextChar >= buf.length) { fill(); if (nextChar == UNSET) { return END_OF_STREAM; } } byte signedByte = buf[nextChar]; nextChar++; return signedByte & 0xFF; }
@Test void read_from_ClosableIterator_with_several_lines() throws IOException { assertThat(read(create("line1", "line2", "line3"))).isEqualTo("line1" + '\n' + "line2" + '\n' + "line3"); }
@Override public long selectNextWorker() throws NonRecoverableException { ComputeNode worker; if (usedComputeNode) { worker = getNextWorker(availableID2ComputeNode, DefaultWorkerProvider::getNextComputeNodeIndex); } else { worker = getNextWorker(availableID2Backend, DefaultWorkerProvider::getNextBackendIndex); } if (worker == null) { reportWorkerNotFoundException(); } Preconditions.checkNotNull(worker); selectWorkerUnchecked(worker.getId()); return worker.getId(); }
@Test public void testSelectNextWorker() throws UserException { DefaultWorkerProvider workerProvider; workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, availableId2ComputeNode, true); testSelectNextWorkerHelper(workerProvider, availableId2ComputeNode); workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, ImmutableMap.of(), true); testSelectNextWorkerHelper(workerProvider, availableId2Backend); workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, availableId2ComputeNode, false); testSelectNextWorkerHelper(workerProvider, availableId2Backend); workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, ImmutableMap.of(), ImmutableMap.of(), false); DefaultWorkerProvider finalWorkerProvider = workerProvider; Assert.assertThrows(SchedulerException.class, finalWorkerProvider::selectNextWorker); }
public final synchronized List<E> getAllAddOns() { Logger.d(mTag, "getAllAddOns has %d add on for %s", mAddOns.size(), getClass().getName()); if (mAddOns.size() == 0) { loadAddOns(); } Logger.d( mTag, "getAllAddOns will return %d add on for %s", mAddOns.size(), getClass().getName()); return unmodifiableList(mAddOns); }
@Test public void testFiltersDebugAddOnOnReleaseBuilds() throws Exception { TestableAddOnsFactory factory = new TestableAddOnsFactory(false); List<TestAddOn> list = factory.getAllAddOns(); Assert.assertEquals(STABLE_THEMES_COUNT, list.size()); }
public static double mean(int[] array) { return (double) sum(array) / array.length; }
@Test public void testMean_doubleArr() { System.out.println("mean"); double[] data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0}; assertEquals(5, MathEx.mean(data), 1E-6); }
@Nullable public Integer getIntValue(@IntFormat final int formatType, @IntRange(from = 0) final int offset) { if ((offset + getTypeLen(formatType)) > size()) return null; return switch (formatType) { case FORMAT_UINT8 -> unsignedByteToInt(mValue[offset]); case FORMAT_UINT16_LE -> unsignedBytesToInt(mValue[offset], mValue[offset + 1]); case FORMAT_UINT16_BE -> unsignedBytesToInt(mValue[offset + 1], mValue[offset]); case FORMAT_UINT24_LE -> unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], (byte) 0 ); case FORMAT_UINT24_BE -> unsignedBytesToInt( mValue[offset + 2], mValue[offset + 1], mValue[offset], (byte) 0 ); case FORMAT_UINT32_LE -> unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ); case FORMAT_UINT32_BE -> unsignedBytesToInt( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ); case FORMAT_SINT8 -> unsignedToSigned(unsignedByteToInt(mValue[offset]), 8); case FORMAT_SINT16_LE -> unsignedToSigned(unsignedBytesToInt(mValue[offset], mValue[offset + 1]), 16); case FORMAT_SINT16_BE -> unsignedToSigned(unsignedBytesToInt(mValue[offset + 1], mValue[offset]), 16); case FORMAT_SINT24_LE -> unsignedToSigned(unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], (byte) 0 ), 24); case FORMAT_SINT24_BE -> unsignedToSigned(unsignedBytesToInt( (byte) 0, mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 24); case FORMAT_SINT32_LE -> unsignedToSigned(unsignedBytesToInt( mValue[offset], mValue[offset + 1], mValue[offset + 2], mValue[offset + 3] ), 32); case FORMAT_SINT32_BE -> unsignedToSigned(unsignedBytesToInt( mValue[offset + 3], mValue[offset + 2], mValue[offset + 1], mValue[offset] ), 32); default -> null; }; }
@Test public void getValue_UINT24_BE() { final Data data = new Data(new byte[] { 0x01, 0x02, 0x03 }); final int value = data.getIntValue(Data.FORMAT_UINT24_BE, 0); assertEquals(0x010203, value); }
public void view(final boolean canAddViewCount) { this.statisticCount.view(canAddViewCount); }
@Test void 상품_조회수를_올린다() { // given Product product = 상품_생성(); Integer beforeViewCount = product.getStatisticCount().getVisitedCount(); // when product.view(true); // then assertThat(product.getStatisticCount().getVisitedCount()) .isEqualTo(beforeViewCount + 1); }
public static List<String> shellSplit(CharSequence string) { List<String> tokens = new ArrayList<>(); if ( string == null ) { return tokens; } boolean escaping = false; char quoteChar = ' '; boolean quoting = false; StringBuilder current = new StringBuilder() ; for (int i = 0; i<string.length(); i++) { char c = string.charAt(i); if (escaping) { current.append(c); escaping = false; } else if (c == '\\' && !(quoting && quoteChar == '\'')) { escaping = true; } else if (quoting && c == quoteChar) { quoting = false; } else if (!quoting && (c == '\'' || c == '"')) { quoting = true; quoteChar = c; } else if (!quoting && Character.isWhitespace(c)) { if (current.length() > 0) { tokens.add(current.toString()); current = new StringBuilder(); } } else { current.append(c); } } if (current.length() > 0) { tokens.add(current.toString()); } return tokens; }
@Test public void doubleQuotes() { assertEquals(List.of("hello world"), StringUtils.shellSplit("\"hello world\"")); }
public static String getRemoteAddrFromRequest(Request request, Set<IpSubnet> trustedSubnets) { final String remoteAddr = request.getRemoteAddr(); final String XForwardedFor = request.getHeader("X-Forwarded-For"); if (XForwardedFor != null) { for (IpSubnet s : trustedSubnets) { try { if (s.contains(remoteAddr)) { // Request came from trusted source, trust X-Forwarded-For and return it return XForwardedFor; } } catch (UnknownHostException e) { // ignore silently, probably not worth logging } } } // Request did not come from a trusted source, or the X-Forwarded-For header was not set return remoteAddr; }
@Test public void getRemoteAddrFromRequestReturnsClientAddressWithXForwardedForHeaderFromUntrustedNetwork() throws Exception { final Request request = mock(Request.class); when(request.getRemoteAddr()).thenReturn("192.168.0.1"); when(request.getHeader("X-Forwarded-For")).thenReturn("192.168.100.42"); final String s = RestTools.getRemoteAddrFromRequest(request, Collections.singleton(new IpSubnet("127.0.0.0/8"))); assertThat(s).isEqualTo("192.168.0.1"); }
static MiningField getTargetMiningField(final DataField dataField) { MiningField toReturn = new MiningField(); toReturn.setName(dataField.getName()); toReturn.setUsageType(MiningField.UsageType.TARGET); return toReturn; }
@Test void getTargetMiningField() { final DataField dataField = new DataField(); dataField.setName("FIELD_NAME"); final MiningField retrieved = KiePMMLUtil.getTargetMiningField(dataField); assertThat(retrieved.getName()).isEqualTo(dataField.getName()); assertThat(retrieved.getUsageType()).isEqualTo(MiningField.UsageType.TARGET); }
@Override public void updateService(String serviceName, String groupName, float protectThreshold) throws NacosException { Service service = new Service(); service.setName(serviceName); service.setGroupName(groupName); service.setProtectThreshold(protectThreshold); updateService(service, new NoneSelector()); }
@Test void testUpdateService3() throws NacosException { //given Service service = new Service(); AbstractSelector selector = new NoneSelector(); //when nacosNamingMaintainService.updateService(service, selector); //then verify(serverProxy, times(1)).updateService(service, selector); }
Object[] findValues(int ordinal) { return getAllValues(ordinal, type, 0); }
@Test public void testMultiFieldReference() throws Exception { IntegerReference simpleValue = new IntegerReference(); simpleValue.id = 3; MultiValue multiValue = new MultiValue(); multiValue.id = 2; multiValue.intRef = simpleValue; ObjectReferenceToMultiValue referenceToMultiValue = new ObjectReferenceToMultiValue(); referenceToMultiValue.multiValue = multiValue; referenceToMultiValue.refId = 1; objectMapper.add(referenceToMultiValue); StateEngineRoundTripper.roundTripSnapshot(writeStateEngine, readStateEngine); // with auto-expand FieldPath fieldPath = new FieldPath(readStateEngine, "ObjectReferenceToMultiValue", "multiValue.intRef"); Object[] values = fieldPath.findValues(0); Assert.assertEquals(3, ((int) values[0])); fieldPath = new FieldPath(readStateEngine, "ObjectReferenceToMultiValue", "multiValue.intRef.id.value"); values = fieldPath.findValues(0); Assert.assertEquals(3, ((int) values[0])); //without auto-complete but full path given fieldPath = new FieldPath(readStateEngine, "ObjectReferenceToMultiValue", "multiValue.intRef.id.value", false); values = fieldPath.findValues(0); Assert.assertEquals(3, ((int) values[0])); }
@Override public String getTableType() { return "pubsub"; }
@Test public void testTableTypePubsub() { PubsubTableProvider provider = new PubsubTableProvider(); assertEquals("pubsub", provider.getTableType()); }
public static void main(String[] args) throws Exception { try { final CommandLineParser parser = new GnuParser(); CommandLine cl = parser.parse(OPTIONS, args); if (cl.hasOption('h')) { help(); System.exit(0); } String sourceFormat = cl.getOptionValue('s', SchemaParser.FILETYPE).trim(); String destFormat = cl.getOptionValue('d', PdlSchemaParser.FILETYPE).trim(); boolean keepOriginal = cl.hasOption('o'); String preserveSourceCmd = cl.getOptionValue('p'); boolean skipVerification = cl.hasOption('k'); boolean forcePdscFullyQualifiedNames = cl.hasOption('q'); String[] cliArgs = cl.getArgs(); if (cliArgs.length != 3) { LOGGER.error("Missing arguments, expected 3 ([resolverPath] [sourceRoot] [destinationPath]), got " + cliArgs.length); help(); System.exit(1); } int i = 0; String resolverPaths = RestLiToolsUtils.readArgFromFileIfNeeded(cliArgs[i++]); String sourcePath = cliArgs[i++]; String destPath = cliArgs[i++]; File sourceDir = new File(sourcePath); File destDir = new File(destPath); if (!sourceDir.exists() || !sourceDir.canRead()) { LOGGER.error("Source directory does not exist or cannot be read: " + sourceDir.getAbsolutePath()); System.exit(1); } destDir.mkdirs(); if (!destDir.exists() || !destDir.canWrite()) { LOGGER.error("Destination directory does not exist or cannot be written to: " + destDir.getAbsolutePath()); System.exit(1); } SchemaFormatTranslator translator = new SchemaFormatTranslator( resolverPaths, sourceDir, destDir, sourceFormat, destFormat, keepOriginal, preserveSourceCmd, skipVerification, forcePdscFullyQualifiedNames); translator.translateFiles(); } catch (ParseException e) { LOGGER.error("Invalid arguments: " + e.getMessage()); help(); System.exit(1); } }
@Test(dataProvider = "fullClassName") public void testTranslatorWorksWithArgFile(String packageName, String className) throws Exception { File tempDir = Files.createTempDirectory("restli").toFile(); File argFile = new File(tempDir, "resolverPath"); Files.write(argFile.toPath(), Collections.singletonList(RESOLVER_DIR)); SchemaFormatTranslator.main( new String[]{"-o", String.format("@%s", argFile.toPath()), SOURCE_ROOT, tempDir.getAbsolutePath()}); MultiFormatDataSchemaResolver sourceResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(RESOLVER_DIR); MultiFormatDataSchemaResolver translatedResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(tempDir.getAbsolutePath() + File.pathSeparator + EXTERNAL_RESOURCES); assertSameSchemas(packageName + "." + className, sourceResolver, translatedResolver); }
@VisibleForTesting static Optional<String> findEntryClass(File jarFile) throws IOException { return findFirstManifestAttribute( jarFile, PackagedProgram.MANIFEST_ATTRIBUTE_ASSEMBLER_CLASS, PackagedProgram.MANIFEST_ATTRIBUTE_MAIN_CLASS); }
@Test void testFindEntryClassWithTestJobJar() throws IOException { File jarFile = TestJob.getTestJobJar(); Optional<String> entryClass = JarManifestParser.findEntryClass(jarFile); assertThat(entryClass).get().isEqualTo(TestJob.class.getCanonicalName()); }
public Writable get() { return instance; }
@Test public void testGet() throws Exception { Foo foo = new Foo(); FooGenericWritable generic = new FooGenericWritable(); generic.set(foo); assertEquals(foo, generic.get()); }
public static void init(String[] args) throws UnknownHostException { localAddr = null; if (!"0.0.0.0".equals(Config.frontend_address)) { if (!InetAddressValidator.getInstance().isValid(Config.frontend_address)) { throw new UnknownHostException("invalid frontend_address: " + Config.frontend_address); } localAddr = InetAddress.getByName(Config.frontend_address); LOG.info("use configured address. {}", localAddr); return; } List<InetAddress> hosts = NetUtils.getHosts(); if (hosts.isEmpty()) { LOG.error("fail to get localhost"); System.exit(-1); } HostType specifiedHostType = HostType.NOT_SPECIFIED; for (int i = 0; i < args.length; i++) { if (args[i].equalsIgnoreCase("-host_type")) { if (i + 1 >= args.length) { System.out.println("-host_type need parameter FQDN or IP"); System.exit(-1); } String inputHostType = args[i + 1]; try { inputHostType = inputHostType.toUpperCase(); specifiedHostType = HostType.valueOf(inputHostType); } catch (Exception e) { System.out.println("-host_type need parameter FQDN or IP"); System.exit(-1); } } } if (specifiedHostType == HostType.FQDN) { initAddrUseFqdn(hosts); return; } if (specifiedHostType == HostType.IP) { initAddrUseIp(hosts); return; } // Check if it is a new cluster, new clusters start with IP by default String roleFilePath = Config.meta_dir + ROLE_FILE_PATH; File roleFile = new File(roleFilePath); if (!roleFile.exists()) { initAddrUseIp(hosts); return; } Properties prop = new Properties(); String fileStoredHostType; try (FileInputStream in = new FileInputStream(roleFile)) { prop.load(in); } catch (IOException e) { LOG.error("failed to read role file"); System.exit(-1); } fileStoredHostType = prop.getProperty(HOST_TYPE, null); // Check if the ROLE file has property 'hostType' // If it not has property 'hostType', start with IP // If it has property 'hostType' & hostType = IP, start with IP if (Strings.isNullOrEmpty(fileStoredHostType) || fileStoredHostType.equals(HostType.IP.toString())) { initAddrUseIp(hosts); return; } // If it has property 'hostType' & hostType = FQDN, start with FQDN initAddrUseFqdn(hosts); }
@Test public void testChooseHostType() throws UnknownHostException { mockNet(); useFqdn = true; FrontendOptions.init(new String[] {"-host_type", "ip"}); Assert.assertTrue(!useFqdn); useFqdn = false; FrontendOptions.init(new String[] {"-host_type", "fqdn"}); Assert.assertTrue(useFqdn); useFqdn = false; FrontendOptions.init(new String[] {}); Assert.assertTrue(!useFqdn); }
@Override public synchronized void cleanupAll() { try { if (usingStaticInstance) { if (databaseAdminClient != null) { Failsafe.with(retryOnQuotaException()) .run(() -> databaseAdminClient.dropDatabase(instanceId, databaseId)); } } else { LOG.info("Deleting instance {}...", instanceId); if (instanceAdminClient != null) { Failsafe.with(retryOnQuotaException()) .run(() -> instanceAdminClient.deleteInstance(instanceId)); } hasInstance = false; } hasDatabase = false; } catch (SpannerException e) { throw new SpannerResourceManagerException("Failed to delete instance.", e); } finally { if (!spanner.isClosed()) { spanner.close(); } } LOG.info("Manager successfully cleaned up."); }
@Test public void testCleanupAllShouldThrowExceptionWhenSpannerDeleteInstanceFails() { // arrange doThrow(SpannerException.class).when(instanceAdminClient).deleteInstance(any()); when(spanner.getInstanceAdminClient()).thenReturn(instanceAdminClient); testManager = new SpannerResourceManager( spanner, TEST_ID, PROJECT_ID, REGION, DIALECT, false, null, NODE_COUNT); // act & assert assertThrows(SpannerResourceManagerException.class, () -> testManager.cleanupAll()); }
private Map<String,Object> webSearch( State state ) { log.debug("---WEB SEARCH---"); String question = state.question(); var result = WebSearchTool.of( tavilyApiKey ).apply(question); var webResult = result.stream() .map( content -> content.textSegment().text() ) .collect(Collectors.joining("\n")); return mapOf( "documents", listOf( webResult ) ); }
@Test public void WebSearchTest() { WebSearchTool webSearchTool = WebSearchTool.of(getTavilyApiKey()); List<Content> webSearchResults = webSearchTool.apply("agent memory"); String webSearchResultsText = webSearchResults.stream().map( content -> content.textSegment().text() ) .collect(Collectors.joining("\n")); assertNotNull( webSearchResultsText ); System.out.println( webSearchResultsText ); }
public Img round(double arc) { final Image srcImage = getValidSrcImg(); final int width = srcImage.getWidth(null); final int height = srcImage.getHeight(null); // 通过弧度占比计算弧度 arc = NumberUtil.mul(arc, Math.min(width, height)); final BufferedImage targetImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB); final Graphics2D g2 = targetImage.createGraphics(); g2.setComposite(AlphaComposite.Src); // 抗锯齿 g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); g2.fill(new RoundRectangle2D.Double(0, 0, width, height, arc, arc)); g2.setComposite(AlphaComposite.SrcAtop); g2.drawImage(srcImage, 0, 0, null); g2.dispose(); this.targetImage = targetImage; return this; }
@Test @Disabled public void roundTest() { Img.from(FileUtil.file("e:/pic/face.jpg")).round(0.5).write(FileUtil.file("e:/pic/face_round.png")); }
@SuppressWarnings("removal") // Since JDK 22 public static void acquireFence() { UnsafeAccess.UNSAFE.loadFence(); }
@Test void acquireFence() { MemoryAccess.acquireFence(); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Extensions Tests") public void testExtensions() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(ExtensionsResource.class); assertNotNull(openAPI); SerializationMatchers.assertEqualsToYaml(openAPI, ExtensionsResource.YAML); }
public static OutputStreamAndPath createEntropyAware( FileSystem fs, Path path, WriteMode writeMode) throws IOException { final Path processedPath = addEntropy(fs, path); // create the stream on the original file system to let the safety net // take its effect final FSDataOutputStream out = fs.create(processedPath, writeMode); return new OutputStreamAndPath(out, processedPath); }
@Test void testClassLoaderFixingFsWithSafeyNet() throws Exception { final String entropyKey = "__ekey__"; final String entropyValue = "abc"; final File folder = TempDirUtils.newFolder(tempFolder); final Path path = new Path(Path.fromLocalFile(folder), entropyKey + "/path/"); final Path pathWithEntropy = new Path(Path.fromLocalFile(folder), entropyValue + "/path/"); PluginFileSystemFactory pluginFsFactory = PluginFileSystemFactory.of(new TestFileSystemFactory(entropyKey, entropyValue)); FileSystem testFs = pluginFsFactory.create(URI.create("test")); FileSystemSafetyNet.initializeSafetyNetForThread(); FileSystem fs = FileSystemSafetyNet.wrapWithSafetyNetWhenActivated(testFs); try { OutputStreamAndPath streamAndPath = EntropyInjector.createEntropyAware(fs, path, WriteMode.NO_OVERWRITE); assertThat(streamAndPath.path()).isEqualTo(pathWithEntropy); } finally { FileSystemSafetyNet.closeSafetyNetAndGuardedResourcesForThread(); } }
@Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } JsonPrimitive other = (JsonPrimitive) obj; if (value == null) { return other.value == null; } if (isIntegral(this) && isIntegral(other)) { return (this.value instanceof BigInteger || other.value instanceof BigInteger) ? this.getAsBigInteger().equals(other.getAsBigInteger()) : this.getAsNumber().longValue() == other.getAsNumber().longValue(); } if (value instanceof Number && other.value instanceof Number) { if (value instanceof BigDecimal && other.value instanceof BigDecimal) { // Uses compareTo to ignore scale of values, e.g. `0` and `0.00` should be considered equal return this.getAsBigDecimal().compareTo(other.getAsBigDecimal()) == 0; } double thisAsDouble = this.getAsDouble(); double otherAsDouble = other.getAsDouble(); // Don't use Double.compare(double, double) because that considers -0.0 and +0.0 not equal return (thisAsDouble == otherAsDouble) || (Double.isNaN(thisAsDouble) && Double.isNaN(otherAsDouble)); } return value.equals(other.value); }
@Test public void testBigDecimalEqualsZero() { assertThat( new JsonPrimitive(new BigDecimal("0.0")) .equals(new JsonPrimitive(new BigDecimal("0.00")))) .isTrue(); assertThat( new JsonPrimitive(new BigDecimal("0.00")) .equals(new JsonPrimitive(Double.valueOf("0.00")))) .isTrue(); }
@Override public Money convertToEntityAttribute(String source) { if (source == null) { return null; } try { return Money.parse(source); } catch (RuntimeException e) { try { return Money.parse(source, FORMAT); } catch (RuntimeException inner) { // Propagate the original exception in case the fallback fails throw e; } } }
@Test void deserializesFormattedValues() { assertThat(converter.convertToEntityAttribute("EUR 123,456.78")).isEqualTo(Money.of(123456.78, "EUR")); }
public static String generateK8sName(String baseName, boolean randomSuffix) { String result = ZEPPELIN; if (StringUtils.isNotBlank(baseName)) { // all to lowerCase result = baseName.toLowerCase(); // Remove all disallowed values result = result.replaceAll("[^a-z0-9\\.-]", ""); // Remove all multiple dots result = result.replaceAll("\\.+", "."); if (result.isEmpty() || !Character.isLetterOrDigit(result.charAt(0))) { result = ZEPPELIN + result; } // 253 - 7 (suffix) = 246 if (result.length() > 246 - ZEPPELIN.length()) { result = result.substring(0, 246 - ZEPPELIN.length()); } if (!Character.isLetterOrDigit(result.charAt(result.length() - 1))) { result = result + ZEPPELIN; } } return randomSuffix ? result + "-" + RandomStringUtils.randomAlphabetic(6).toLowerCase() : result; }
@Test void testGenerateK8sName() { assertEquals("zeppelin", K8sUtils.generateK8sName("", false)); assertEquals("zeppelin", K8sUtils.generateK8sName(null, false)); assertEquals("test", K8sUtils.generateK8sName("test", false)); assertEquals("test", K8sUtils.generateK8sName("!test", false)); assertEquals("zeppelin", K8sUtils.generateK8sName("!", false)); assertEquals("zeppelin.test", K8sUtils.generateK8sName(".test", false)); assertEquals("zeppelin.test", K8sUtils.generateK8sName("...test", false)); assertEquals("zeppelin.test.zeppelin", K8sUtils.generateK8sName(".test.", false)); assertEquals("zeppelin.test.zeppelin", K8sUtils.generateK8sName("...test....", false)); assertEquals("test", K8sUtils.generateK8sName("Test", false)); assertEquals(253 - "zeppelin".length() , K8sUtils.generateK8sName(RandomStringUtils.randomAlphabetic(260), true).length()); }
@PostMapping("/get_logs") @Operation(summary = "Get paginated account logs") public DAccountLogsResult getAccountLogs(@RequestBody DAccountLogsRequest deprecatedRequest){ validatePageId(deprecatedRequest); AppSession appSession = validate(deprecatedRequest); var request = deprecatedRequest.getRequest(); var result = accountService.getAccountLogs(appSession.getAccountId(), appSession.getDeviceName(), appSession.getAppCode(), request); return DAccountLogsResult.copyFrom(result); }
@Test public void testValidRequest() { DAccountLogsRequest request = new DAccountLogsRequest(); request.setPageId(1); request.setAppSessionId("id"); AccountLogsResult result = new AccountLogsResult(); result.setTotalItems(10); result.setTotalPages(1); List<AccountLog> results = new ArrayList<>(); result.setResults(results); result.setStatus(Status.OK); result.setError("error"); when(accountService.getAccountLogs(eq(1L), any(), any(), any())).thenReturn(result); DAccountLogsResult accountLogs = accountLogsController.getAccountLogs(request); assertEquals(Status.OK, accountLogs.getStatus()); assertEquals("error", accountLogs.getError()); assertEquals(results, accountLogs.getLogs()); }
public void inject(Inspector inspector, Inserter inserter) { if (inspector.valid()) { injectValue(inserter, inspector, null); } }
@Test public void recursiveArrayInject() { Slime expect = new Slime(); { Cursor arr = expect.setArray(); arr.addLong(1); arr.addLong(2); arr.addLong(3); { Cursor arrCpy = arr.addArray(); arrCpy.addLong(1); arrCpy.addLong(2); arrCpy.addLong(3); } } Slime data = new Slime(); { Cursor arr = data.setArray(); arr.addLong(1); arr.addLong(2); arr.addLong(3); } inject(data.get(), new ArrayInserter(data.get())); assertEquals(expect.toString(), data.toString()); }
public Span nextSpan(Message message) { TraceContextOrSamplingFlags extracted = extractAndClearTraceIdProperties(processorExtractor, message, message); Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler. // When an upstream context was not present, lookup keys are unlikely added if (extracted.context() == null && !result.isNoop()) { // simplify code by re-using an existing MessagingRequest impl tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result); } return result; }
@Test void nextSpan_should_clear_propagation_headers() { Propagation.B3_STRING.injector(SETTER).inject(parent, message); jmsTracing.nextSpan(message); assertThat(ITJms.propertiesToMap(message)).isEmpty(); }
@Override public CoordinatorRecord deserialize( ByteBuffer keyBuffer, ByteBuffer valueBuffer ) throws RuntimeException { final short recordType = readVersion(keyBuffer, "key"); final ApiMessage keyMessage = apiMessageKeyFor(recordType); readMessage(keyMessage, keyBuffer, recordType, "key"); if (valueBuffer == null) { return new CoordinatorRecord(new ApiMessageAndVersion(keyMessage, recordType), null); } final ApiMessage valueMessage = apiMessageValueFor(recordType); final short valueVersion = readVersion(valueBuffer, "value"); readMessage(valueMessage, valueBuffer, valueVersion, "value"); return new CoordinatorRecord( new ApiMessageAndVersion(keyMessage, recordType), new ApiMessageAndVersion(valueMessage, valueVersion) ); }
@Test public void testDeserializeWithKeyEmptyBuffer() { GroupCoordinatorRecordSerde serde = new GroupCoordinatorRecordSerde(); ByteBuffer keyBuffer = ByteBuffer.allocate(0); ByteBuffer valueBuffer = ByteBuffer.allocate(64); RuntimeException ex = assertThrows(RuntimeException.class, () -> serde.deserialize(keyBuffer, valueBuffer)); assertEquals("Could not read version from key's buffer.", ex.getMessage()); }
public static StringBuilder format(StringBuilder sb, long value, int minimumDigits) { if (value < 0) { sb.append('-'); value = -value; } long tmp = value; do { tmp /= 10; } while (--minimumDigits > 0 && tmp > 0); for (int i = minimumDigits; i > 0; --i) { sb.append('0'); } sb.append(value); return sb; }
@Test(timeout = 1000) public void testLongWithPadding() throws Exception { NumberFormat numberFormat = NumberFormat.getInstance(); numberFormat.setGroupingUsed(false); numberFormat.setMinimumIntegerDigits(6); long[] testLongs = {1, 23, 456, 7890, 12345, 678901, 2345689, 0, -0, -1, -23, -456, -7890, -12345, -678901, -2345689}; for (long l: testLongs) { StringBuilder sb = new StringBuilder(); FastNumberFormat.format(sb, l, MIN_DIGITS); String fastNumberStr = sb.toString(); Assert.assertEquals("Number formats should be equal", numberFormat.format(l), fastNumberStr); } }
public void isIn(@Nullable Iterable<?> iterable) { checkNotNull(iterable); if (!contains(iterable, actual)) { failWithActual("expected any of", iterable); } }
@Test public void isInNonnullInListWithNull() { assertThat("b").isIn(oneShotIterable("a", "b", (String) null)); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schemas to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using Either<...> has a cost we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.push(start); Object current; while ((current = dq.poll()) != null) { if (current instanceof Supplier) { // We are executing a non-terminal post visit. SchemaVisitor.SchemaVisitorAction action = ((Supplier<SchemaVisitor.SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SIBLINGS: while (dq.peek() instanceof Schema) { dq.remove(); } break; case TERMINATE: return visitor.get(); case SKIP_SUBTREE: default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (visited.containsKey(schema)) { terminate = visitTerminal(visitor, schema, dq); } else { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; default: terminate = visitTerminal(visitor, schema, dq); break; } } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test(expected = UnsupportedOperationException.class) public void testVisit8() { String s8 = "{\"type\": \"record\", \"name\": \"c1\", \"fields\": [" + "{\"name\": \"f1\", \"type\": {\"type\": \"record\", \"name\": \"cst2\", \"fields\": " + "[{\"name\": \"f11\", \"type\": \"int\"}]}}," + "{\"name\": \"f2\", \"type\": \"int\"}" + "]}"; Schemas.visit(new Schema.Parser().parse(s8), new TestVisitor()); }
public OSSRecoverable getRecoverable(RefCountedFSOutputStream file) throws IOException { String incompletePartObjectName = uploadSmallPart(file); checkState(numberOfRegisteredParts - completeParts.size() == uploadsInProgress.size()); while (numberOfRegisteredParts - completeParts.size() > 0) { CompletableFuture<PartETag> next = uploadsInProgress.peekFirst(); PartETag nextPart; try { nextPart = next.get(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException("Interrupted while waiting for part uploads to complete"); } catch (ExecutionException e) { throw new IOException("Uploading parts failed ", e.getCause()); } completeParts.add(nextPart); uploadsInProgress.removeFirst(); } if (file == null) { return new OSSRecoverable( uploadId, objectName, completeParts, null, expectedSizeInBytes, 0); } else { return new OSSRecoverable( uploadId, objectName, completeParts, incompletePartObjectName, expectedSizeInBytes, file.getPos()); } }
@Test public void testRecoverableReflectsTheLatestPartialObject() throws IOException { final byte[] incompletePartOne = OSSTestUtils.bytesOf("AB", 1024); final byte[] incompletePartTwo = OSSTestUtils.bytesOf("ABC", 1024); RefCountedBufferingFileStream partFile = OSSTestUtils.writeData(temporaryFolder, incompletePartOne); partFile.close(); OSSRecoverable recoverableOne = uploader.getRecoverable(partFile); partFile = OSSTestUtils.writeData(temporaryFolder, incompletePartTwo); partFile.close(); OSSRecoverable recoverableTwo = uploader.getRecoverable(partFile); assertFalse(recoverableOne.getLastPartObject().equals(recoverableTwo.getLastPartObject())); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldApplySetUnsetCommands() throws Exception { // Given: command = PARSER.parse("-n"); createMigrationFile(1, NAME, migrationsDir, SET_COMMANDS); // extra migration to ensure only the first is applied createMigrationFile(3, NAME, migrationsDir, COMMAND); when(versionQueryResult.get()).thenReturn(ImmutableList.of()); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(0)); final InOrder inOrder = inOrder(ksqlClient); verifyMigratedVersion(inOrder, 1, "<none>", MigrationState.MIGRATED, () -> { inOrder.verify(ksqlClient).executeStatement(COMMAND, new HashMap<>()); inOrder.verify(ksqlClient).executeStatement(eq("CREATE TABLE BAR AS SELECT * FROM FOO GROUP BY A;"), propCaptor.capture()); assertThat(propCaptor.getValue().size(), is(1)); assertThat(propCaptor.getValue().get("auto.offset.reset"), is("earliest")); inOrder.verify(ksqlClient).executeStatement("CREATE STREAM MOO (A STRING) WITH (KAFKA_TOPIC='MOO', PARTITIONS=1, VALUE_FORMAT='DELIMITED');", new HashMap<>()); }); inOrder.verify(ksqlClient).close(); inOrder.verifyNoMoreInteractions(); }
public long addAll(PollResult pollResult) { Objects.requireNonNull(pollResult); addAll(pollResult.unsentRequests); return pollResult.timeUntilNextPollMs; }
@Test void testPollResultTimer() throws Exception { try (NetworkClientDelegate ncd = newNetworkClientDelegate()) { NetworkClientDelegate.UnsentRequest req = new NetworkClientDelegate.UnsentRequest( new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(FindCoordinatorRequest.CoordinatorType.TRANSACTION.id()) .setKey("foobar")), Optional.empty()); req.setTimer(time, DEFAULT_REQUEST_TIMEOUT_MS); // purposely setting a non-MAX time to ensure it is returning Long.MAX_VALUE upon success NetworkClientDelegate.PollResult success = new NetworkClientDelegate.PollResult( 10, Collections.singletonList(req)); assertEquals(10, ncd.addAll(success)); NetworkClientDelegate.PollResult failure = new NetworkClientDelegate.PollResult( 10, new ArrayList<>()); assertEquals(10, ncd.addAll(failure)); } }
public static JsonElement parseString(String json) throws JsonSyntaxException { return parseReader(new StringReader(json)); }
@Test public void testParseUnquotedStringArrayFails() { JsonElement element = JsonParser.parseString("[a,b,c]"); assertThat(element.getAsJsonArray().get(0).getAsString()).isEqualTo("a"); assertThat(element.getAsJsonArray().get(1).getAsString()).isEqualTo("b"); assertThat(element.getAsJsonArray().get(2).getAsString()).isEqualTo("c"); assertThat(element.getAsJsonArray()).hasSize(3); }
@Override public ProducerTableInfo getAllProducerInfo( final String brokerAddr) throws RemotingException, MQClientException, InterruptedException, MQBrokerException { return defaultMQAdminExtImpl.getAllProducerInfo(brokerAddr); }
@Test public void testGetAllProducerInfo() throws InterruptedException, RemotingException, MQClientException, MQBrokerException { ProducerTableInfo producerTableInfo = defaultMQAdminExt.getAllProducerInfo("127.0.0.1:10911"); assertThat(producerTableInfo.getData().size()).isEqualTo(1); }
@SuppressWarnings("MethodMayBeStatic") // Non-static to support DI. public long parse(final String text) { final String date; final String time; final String timezone; if (text.contains("T")) { date = text.substring(0, text.indexOf('T')); final String withTimezone = text.substring(text.indexOf('T') + 1); timezone = getTimezone(withTimezone); time = completeTime(withTimezone.substring(0, withTimezone.length() - timezone.length()) .replaceAll("Z$","")); } else { date = completeDate(text); time = completeTime(""); timezone = ""; } try { final ZoneId zoneId = parseTimezone(timezone); return PARSER.parse(date + "T" + time, zoneId); } catch (final RuntimeException e) { throw new KsqlException("Failed to parse timestamp '" + text + "': " + e.getMessage() + HELP_MESSAGE, e ); } }
@Test public void shouldParseFullDate() { // When: assertThat(parser.parse("2020-01-02"), is(fullParse("2020-01-02T00:00:00.000+0000"))); assertThat(parser.parse("2020-01-02T"), is(fullParse("2020-01-02T00:00:00.000+0000"))); assertThat(parser.parse("2020-01-02TZ"), is(fullParse("2020-01-02T00:00:00.000+0000"))); }
@UdafFactory(description = "collect values of a field into a single Array") public static <T> TableUdaf<T, List<T>, List<T>> createCollectListT() { return new Collect<>(); }
@Test public void shouldUndoAfterHittingLimit() { final int limit = 10; final TableUdaf<Integer, List<Integer>, List<Integer>> udaf = CollectListUdaf.createCollectListT(); ((Configurable) udaf).configure(ImmutableMap.of(CollectListUdaf.LIMIT_CONFIG, limit)); List<Integer> runningList = udaf.initialize(); for (int i = 0; i < limit ; i++) { runningList = udaf.aggregate(i, runningList); } runningList = udaf.aggregate(limit + 1, runningList); assertThat(limit + 1, not(isIn(runningList))); runningList = udaf.undo(limit + 1, runningList); assertThat(limit + 1, not(isIn(runningList))); }
public static boolean isBlank(String str) { return str == null || str.trim().isEmpty(); }
@Test void testIsBlank() { assertTrue(Utils.isBlank(null)); assertTrue(Utils.isBlank("")); assertTrue(Utils.isBlank(" ")); assertFalse(Utils.isBlank("bob")); assertFalse(Utils.isBlank(" bob ")); }
ImmutableMap<PCollection<?>, FieldAccessDescriptor> getPCollectionFieldAccess() { return ImmutableMap.copyOf(pCollectionFieldAccess); }
@Test public void testFieldAccessKnownMainAndUnknownSideInputs() { Pipeline p = Pipeline.create(); FieldAccessVisitor fieldAccessVisitor = new FieldAccessVisitor(); Schema schema = Schema.of(Field.of("field1", FieldType.STRING), Field.of("field2", FieldType.STRING)); PCollection<Row> source1 = p.apply(Create.of(Row.withSchema(schema).addValues("foo", "bar").build())) .setRowSchema(schema); source1.apply(new FieldAccessTransform(FieldAccessDescriptor.withFieldNames("field1"))); PCollectionView<Row> source1View = source1.apply(View.asSingleton()); PCollection<Row> source2 = p.apply(Create.of(Row.withSchema(schema).addValues("baz", "qux").build())) .setRowSchema(schema); source2 .apply(ParDo.of(new UnknownDoFn()).withSideInput("source1View", source1View)) .setRowSchema(schema); p.traverseTopologically(fieldAccessVisitor); assertTrue(fieldAccessVisitor.getPCollectionFieldAccess().get(source1).getAllFields()); assertTrue(fieldAccessVisitor.getPCollectionFieldAccess().get(source2).getAllFields()); }
public static List<String> finalDestination(List<String> elements) { if (isMagicPath(elements)) { List<String> destDir = magicPathParents(elements); List<String> children = magicPathChildren(elements); checkArgument(!children.isEmpty(), "No path found under the prefix " + MAGIC_PATH_PREFIX); ArrayList<String> dest = new ArrayList<>(destDir); if (containsBasePath(children)) { // there's a base marker in the path List<String> baseChildren = basePathChildren(children); checkArgument(!baseChildren.isEmpty(), "No path found under " + BASE); dest.addAll(baseChildren); } else { dest.add(filename(children)); } return dest; } else { return elements; } }
@Test public void testFinalDestinationMagic2() { assertEquals(l("first", "3.txt"), finalDestination(l("first", MAGIC_PATH_PREFIX, "2", "3.txt"))); }
public static GrpcServerExecutorMetric getClusterServerExecutorMetric() { return clusterServerExecutorMetric; }
@Test void testClusterServerExecutorMetric() { MetricsMonitor.getClusterServerExecutorMetric().getPoolSize().set(1); MetricsMonitor.getClusterServerExecutorMetric().getMaximumPoolSize().set(1); MetricsMonitor.getClusterServerExecutorMetric().getCorePoolSize().set(1); MetricsMonitor.getClusterServerExecutorMetric().getActiveCount().set(1); MetricsMonitor.getClusterServerExecutorMetric().getInQueueTaskCount().set(1); MetricsMonitor.getClusterServerExecutorMetric().getTaskCount().set(1); MetricsMonitor.getClusterServerExecutorMetric().getCompletedTaskCount().set(1); assertEquals("grpcClusterServer", MetricsMonitor.getClusterServerExecutorMetric().getType()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getPoolSize().get()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getMaximumPoolSize().get()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getCorePoolSize().get()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getActiveCount().get()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getInQueueTaskCount().get()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getTaskCount().get()); assertEquals(1, MetricsMonitor.getClusterServerExecutorMetric().getCompletedTaskCount().get()); }
@Override public EntityExcerpt createExcerpt(LookupTableDto lookupTableDto) { return EntityExcerpt.builder() .id(ModelId.of(lookupTableDto.id())) .type(ModelTypes.LOOKUP_TABLE_V1) .title(lookupTableDto.title()) .build(); }
@Test public void createExcerpt() { final LookupTableDto lookupTableDto = LookupTableDto.builder() .id("1234567890") .name("lookup-table-name") .title("Lookup Table Title") .description("Lookup Table Description") .dataAdapterId("data-adapter-1234") .cacheId("cache-1234") .defaultSingleValue("default-single") .defaultSingleValueType(LookupDefaultValue.Type.STRING) .defaultMultiValue("default-multi") .defaultMultiValueType(LookupDefaultValue.Type.STRING) .build(); final EntityExcerpt excerpt = facade.createExcerpt(lookupTableDto); assertThat(excerpt.id()).isEqualTo(ModelId.of("1234567890")); assertThat(excerpt.type()).isEqualTo(ModelTypes.LOOKUP_TABLE_V1); assertThat(excerpt.title()).isEqualTo("Lookup Table Title"); }
public static String trimStart( final String source, char c ) { if ( source == null ) { return null; } int length = source.length(); int index = 0; while ( index < length && source.charAt( index ) == c ) { index++; } return source.substring( index ); }
@Test public void testTrimStart_Single() { assertEquals( "file/path/", StringUtil.trimStart( "/file/path/", '/' ) ); }
public void deleteObject(String key) { cosClient.deleteObject(cosClientConfig.getBucket(), key); }
@Test void deleteObject() { cosManager.deleteObject("/test/1.jpg"); }
static String toJavaName(String opensslName) { if (opensslName == null) { return null; } Matcher matcher = PATTERN.matcher(opensslName); if (matcher.matches()) { String group1 = matcher.group(1); if (group1 != null) { return group1.toUpperCase(Locale.ROOT) + "with" + matcher.group(2).toUpperCase(Locale.ROOT); } if (matcher.group(3) != null) { return matcher.group(4).toUpperCase(Locale.ROOT) + "with" + matcher.group(3).toUpperCase(Locale.ROOT); } if (matcher.group(5) != null) { return matcher.group(6).toUpperCase(Locale.ROOT) + "with" + matcher.group(5).toUpperCase(Locale.ROOT); } } return null; }
@Test public void testWithUnderscore() { assertEquals("SHA256withDSA", SignatureAlgorithmConverter.toJavaName("dsa_with_SHA256")); }
public WithoutJsonPath(JsonPath jsonPath) { this.jsonPath = jsonPath; }
@Test public void shouldMatchNonExistingJsonPath() { assertThat(JSON, withoutJsonPath(compile("$.not_there"))); assertThat(JSON, withoutJsonPath("$.not_there")); }
public static Regression<double[]> fit(double[][] x, double[] y, double eps, double C, double tol) { smile.base.svm.SVR<double[]> svr = new smile.base.svm.SVR<>(new LinearKernel(), eps, C, tol); KernelMachine<double[]> svm = svr.fit(x, y); return new Regression<>() { final LinearKernelMachine model = LinearKernelMachine.of(svm); @Override public double predict(double[] x) { return model.f(x); } }; }
@Test public void tesAbalone() { System.out.println("Abalone"); GaussianKernel kernel = new GaussianKernel(5.0); RegressionValidation<Regression<double[]>> result = RegressionValidation.of(Abalone.x, Abalone.y, Abalone.testx, Abalone.testy, (x, y) -> SVM.fit(x, y, kernel, 1.5, 100, 1E-3)); System.out.println(result); assertEquals(2.1092, result.metrics.rmse, 1E-4); }
@Override public FailoverSwitch getSwitch() { try { File switchFile = Paths.get(failoverDir, UtilAndComs.FAILOVER_SWITCH).toFile(); if (!switchFile.exists()) { NAMING_LOGGER.debug("failover switch is not found, {}", switchFile.getName()); switchParams.put(FAILOVER_MODE_PARAM, Boolean.FALSE.toString()); return FAILOVER_SWITCH_FALSE; } long modified = switchFile.lastModified(); if (lastModifiedMillis < modified) { lastModifiedMillis = modified; String failover = ConcurrentDiskUtil.getFileContent(switchFile.getPath(), Charset.defaultCharset().toString()); if (!StringUtils.isEmpty(failover)) { String[] lines = failover.split(DiskCache.getLineSeparator()); for (String line : lines) { String line1 = line.trim(); if (IS_FAILOVER_MODE.equals(line1)) { switchParams.put(FAILOVER_MODE_PARAM, Boolean.TRUE.toString()); NAMING_LOGGER.info("failover-mode is on"); new FailoverFileReader().run(); return FAILOVER_SWITCH_TRUE; } else if (NO_FAILOVER_MODE.equals(line1)) { switchParams.put(FAILOVER_MODE_PARAM, Boolean.FALSE.toString()); NAMING_LOGGER.info("failover-mode is off"); return FAILOVER_SWITCH_FALSE; } } } } return switchParams.get(FAILOVER_MODE_PARAM).equals(Boolean.TRUE.toString()) ? FAILOVER_SWITCH_TRUE : FAILOVER_SWITCH_FALSE; } catch (Throwable e) { NAMING_LOGGER.error("[NA] failed to read failover switch.", e); switchParams.put(FAILOVER_MODE_PARAM, Boolean.FALSE.toString()); return FAILOVER_SWITCH_FALSE; } }
@Test void testGetSwitchForFailoverEnabledKeep() throws NoSuchFieldException, IllegalAccessException { String dir = DiskFailoverDataSourceTest.class.getResource("/").getPath() + "/failover_test/enabled"; injectFailOverDir(dir); assertTrue(dataSource.getSwitch().getEnabled()); assertTrue(dataSource.getSwitch().getEnabled()); }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthAccounts() throws Exception { web3j.ethAccounts().send(); verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"eth_accounts\",\"params\":[],\"id\":1}"); }
@Override public void trace(String msg) { logger.trace(msg); }
@Test public void testTraceWithException() { Logger mockLogger = mock(Logger.class); when(mockLogger.getName()).thenReturn("foo"); InternalLogger logger = new Slf4JLogger(mockLogger); logger.trace("a", e); verify(mockLogger).getName(); verify(mockLogger).trace("a", e); }
@Override public void finish() throws IOException { printRows(ImmutableList.of(), true); writer.flush(); }
@Test public void testTsvPrintingNoRows() throws Exception { StringWriter writer = new StringWriter(); List<String> fieldNames = ImmutableList.of("first", "last"); OutputPrinter printer = new TsvPrinter(fieldNames, writer, true); printer.finish(); assertEquals(writer.getBuffer().toString(), "first\tlast\n"); }
Map<String, String> describeInstances(AwsCredentials credentials) { Map<String, String> attributes = createAttributesDescribeInstances(); Map<String, String> headers = createHeaders(attributes, credentials); String response = callAwsService(attributes, headers); return parseDescribeInstances(response); }
@Test public void awsError() { // given int errorCode = HttpURLConnection.HTTP_UNAUTHORIZED; String errorMessage = "Error message retrieved from AWS"; stubFor(get(urlMatching("/.*")) .willReturn(aResponse().withStatus(errorCode).withBody(errorMessage))); // when Exception exception = assertThrows(Exception.class, () -> awsEc2Api.describeInstances(CREDENTIALS)); // then assertTrue(exception.getMessage().contains(Integer.toString(errorCode))); assertTrue(exception.getMessage().contains(errorMessage)); }
static Set<Path> getAllFiles(Set<Path> paths) throws IOException { Set<Path> expanded = new HashSet<>(); for (Path path : paths) { if (Files.isRegularFile(path)) { expanded.add(path); } else if (Files.isDirectory(path)) { try (Stream<Path> dirWalk = Files.walk(path)) { dirWalk.filter(Files::isRegularFile).forEach(expanded::add); } } } return expanded; }
@Test public void getAllFiles_doesntBreakForNonExistentFiles() throws IOException { Path testPath = Paths.get("/a/file/that/doesnt/exist"); assertThat(Files.exists(testPath)).isFalse(); assertThat(PluginConfigurationProcessor.getAllFiles(ImmutableSet.of(testPath))).isEmpty(); }
public static Iterator<Row> removeNetCarryovers(Iterator<Row> rowIterator, StructType rowType) { ChangelogIterator changelogIterator = new RemoveNetCarryoverIterator(rowIterator, rowType); return Iterators.filter(changelogIterator, Objects::nonNull); }
@Test public void testRemoveNetCarryovers() { List<Row> rowsWithDuplication = Lists.newArrayList( // this row are different from other rows, it is a net change, should be kept new GenericRowWithSchema(new Object[] {0, "d", "data", DELETE, 0, 0}, null), // a pair of delete and insert rows, should be removed new GenericRowWithSchema(new Object[] {1, "d", "data", DELETE, 0, 0}, null), new GenericRowWithSchema(new Object[] {1, "d", "data", INSERT, 0, 0}, null), // 2 delete rows and 2 insert rows, should be removed new GenericRowWithSchema(new Object[] {1, "d", "data", DELETE, 1, 1}, null), new GenericRowWithSchema(new Object[] {1, "d", "data", DELETE, 1, 1}, null), new GenericRowWithSchema(new Object[] {1, "d", "data", INSERT, 1, 1}, null), new GenericRowWithSchema(new Object[] {1, "d", "data", INSERT, 1, 1}, null), // a pair of insert and delete rows across snapshots, should be removed new GenericRowWithSchema(new Object[] {1, "d", "data", INSERT, 2, 2}, null), new GenericRowWithSchema(new Object[] {1, "d", "data", DELETE, 3, 3}, null), // extra insert rows, they are net changes, should be kept new GenericRowWithSchema(new Object[] {1, "d", "data", INSERT, 4, 4}, null), new GenericRowWithSchema(new Object[] {1, "d", "data", INSERT, 4, 4}, null), // different key, net changes, should be kept new GenericRowWithSchema(new Object[] {2, "d", "data", DELETE, 4, 4}, null)); List<Object[]> expectedRows = Lists.newArrayList( new Object[] {0, "d", "data", DELETE, 0, 0}, new Object[] {1, "d", "data", INSERT, 4, 4}, new Object[] {1, "d", "data", INSERT, 4, 4}, new Object[] {2, "d", "data", DELETE, 4, 4}); Iterator<Row> iterator = ChangelogIterator.removeNetCarryovers(rowsWithDuplication.iterator(), SCHEMA); List<Row> result = Lists.newArrayList(iterator); assertEquals("Rows should match.", expectedRows, rowsToJava(result)); }
public static IpAddress valueOf(int value) { byte[] bytes = ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array(); return new IpAddress(Version.INET, bytes); }
@Test(expected = NullPointerException.class) public void testInvalidValueOfNullArrayIPv6() { IpAddress ipAddress; byte[] value; value = null; ipAddress = IpAddress.valueOf(IpAddress.Version.INET6, value); }
@Override public boolean isOperational() { if (nodeOperational) { return true; } boolean flag = false; try { flag = checkOperational(); } catch (InterruptedException e) { LOG.trace("Interrupted while checking ES node is operational", e); Thread.currentThread().interrupt(); } finally { if (flag) { esConnector.stop(); nodeOperational = true; } } return nodeOperational; }
@Test public void isOperational_should_return_false_if_Elasticsearch_is_RED() { EsConnector esConnector = mock(EsConnector.class); when(esConnector.getClusterHealthStatus()).thenReturn(Optional.of(ClusterHealthStatus.RED)); EsManagedProcess underTest = new EsManagedProcess(mock(Process.class), ProcessId.ELASTICSEARCH, esConnector, WAIT_FOR_UP_TIMEOUT); assertThat(underTest.isOperational()).isFalse(); }
public static Ip6Prefix valueOf(byte[] address, int prefixLength) { return new Ip6Prefix(Ip6Address.valueOf(address), prefixLength); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfAddressTooLongPrefixLengthIPv6() { Ip6Address ipAddress; Ip6Prefix ipPrefix; ipAddress = Ip6Address.valueOf("1111:2222:3333:4444:5555:6666:7777:8888"); ipPrefix = Ip6Prefix.valueOf(ipAddress, 129); }
public Data getValueData() { if (valueData == null && serializationService != null) { valueData = serializationService.toData(value); } return valueData; }
@Test public void testGetValueData_withDataValue() { assertEquals(toData("value"), dataEvent.getValueData()); }
@Override public Object copy(Object value) { Class<?>[] interfaces = value.getClass().getInterfaces(); InvocationHandler invocationHandler = Proxy.getInvocationHandler(value); Preconditions.checkNotNull(interfaces); Preconditions.checkNotNull(invocationHandler); Object proxy = Proxy.newProxyInstance(fury.getClassLoader(), interfaces, STUB_HANDLER); if (needToCopyRef) { fury.reference(value, proxy); } Platform.putObject(proxy, PROXY_HANDLER_FIELD_OFFSET, fury.copyObject(invocationHandler)); return proxy; }
@Test(dataProvider = "furyCopyConfig") public void testJdkProxyRef(Fury fury) { RefTestInvocationHandler hdlr = new RefTestInvocationHandler(); Function function = (Function) Proxy.newProxyInstance(fury.getClassLoader(), new Class[] {Function.class}, hdlr); hdlr.setProxy(function); assertEquals(hdlr.getProxy(), function); Function copy = fury.copy(function); RefTestInvocationHandler copyHandler = (RefTestInvocationHandler) Proxy.getInvocationHandler(copy); assertEquals(copyHandler.getProxy(), copy); }
protected void recover() throws Exception { // register Set<URL> recoverRegistered = new HashSet<>(getRegistered()); if (!recoverRegistered.isEmpty()) { if (logger.isInfoEnabled()) { logger.info("Recover register url " + recoverRegistered); } for (URL url : recoverRegistered) { register(url); } } // subscribe Map<URL, Set<NotifyListener>> recoverSubscribed = new HashMap<>(getSubscribed()); if (!recoverSubscribed.isEmpty()) { if (logger.isInfoEnabled()) { logger.info("Recover subscribe url " + recoverSubscribed.keySet()); } for (Map.Entry<URL, Set<NotifyListener>> entry : recoverSubscribed.entrySet()) { URL url = entry.getKey(); for (NotifyListener listener : entry.getValue()) { subscribe(url, listener); } } } }
@Test void testRecover() throws Exception { // test recover nothing abstractRegistry.recover(); Assertions.assertFalse(abstractRegistry.getRegistered().contains(testUrl)); Assertions.assertNull(abstractRegistry.getSubscribed().get(testUrl)); // test recover abstractRegistry.register(testUrl); abstractRegistry.subscribe(testUrl, listener); abstractRegistry.recover(); // check if recover successfully Assertions.assertTrue(abstractRegistry.getRegistered().contains(testUrl)); Assertions.assertNotNull(abstractRegistry.getSubscribed().get(testUrl)); Assertions.assertTrue(abstractRegistry.getSubscribed().get(testUrl).contains(listener)); }
public Optional<Details> runForeachBatch( Workflow workflow, Long internalId, long workflowVersionId, RunProperties runProperties, String foreachStepId, ForeachArtifact artifact, List<RunRequest> requests, List<Long> instanceIds, int batchSize) { if (ObjectHelper.isCollectionEmptyOrNull(requests)) { return Optional.empty(); } Checks.checkTrue( requests.size() == instanceIds.size(), "Run request list size [%s] must match instance id list size [%s]", requests.size(), instanceIds.size()); List<WorkflowInstance> instances; if (artifact.isFreshRun()) { instances = createStartForeachInstances( workflow, internalId, workflowVersionId, artifact.getForeachRunId(), runProperties, requests, instanceIds); } else { instances = createRestartForeachInstances( workflow, internalId, workflowVersionId, runProperties, foreachStepId, artifact, requests, instanceIds); } if (ObjectHelper.isCollectionEmptyOrNull(instances)) { return Optional.empty(); } return instanceDao.runWorkflowInstances(workflow.getId(), instances, batchSize); }
@Test public void testCreateRestartForeachInstancesUpstreamModeFromIncomplete() { doNothing().when(workflowHelper).updateWorkflowInstance(any(), any()); when(instanceDao.getLatestWorkflowInstanceRun(anyString(), anyLong())) .thenReturn(new WorkflowInstance()); Map<String, Map<String, ParamDefinition>> stepRunParams = Collections.singletonMap( "job1", Collections.singletonMap("p1", ParamDefinition.buildParamDefinition("p1", "d1"))); ForeachArtifact artifact = new ForeachArtifact(); artifact.setRunPolicy(RunPolicy.RESTART_FROM_INCOMPLETE); artifact.setTotalLoopCount(10); artifact.setForeachWorkflowId("maestro_foreach_x"); artifact.setAncestorIterationCount(3L); artifact.setForeachRunId(3L); artifact.setForeachOverview(new ForeachStepOverview()); artifact.getForeachOverview().addOne(2, WorkflowInstance.Status.FAILED, null); RestartConfig restartConfig = RestartConfig.builder() .restartPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .downstreamPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .stepRestartParams(stepRunParams) .addRestartNode("maestro_foreach_x", 1, null) .build(); ForeachInitiator initiator = new ForeachInitiator(); UpstreamInitiator.Info parent = new UpstreamInitiator.Info(); parent.setWorkflowId("maestro_foreach_x"); parent.setInstanceId(1); parent.setRunId(1); parent.setStepId("foreach-step"); parent.setStepAttemptId(1); initiator.setAncestors(Collections.singletonList(parent)); RunRequest runRequest = RunRequest.builder() .initiator(initiator) .currentPolicy(RunPolicy.RESTART_FROM_INCOMPLETE) .restartConfig(restartConfig) .build(); Optional<Details> errors = actionHandler.runForeachBatch( definition.getWorkflow(), 123L, 10L, new RunProperties(), "foreach-step", artifact, Collections.singletonList(runRequest), Collections.singletonList(2L), 3); assertFalse(errors.isPresent()); verify(instanceDao, times(1)).runWorkflowInstances(any(), any(), anyInt()); ArgumentCaptor<RunRequest> captor = ArgumentCaptor.forClass(RunRequest.class); verify(workflowHelper, times(1)).updateWorkflowInstance(any(), captor.capture()); RunRequest res = captor.getValue(); assertEquals(RunPolicy.RESTART_FROM_INCOMPLETE, res.getCurrentPolicy()); // it will keep the restart config with step restart params assertEquals(restartConfig, res.getRestartConfig()); }
@Override public Object getObject(final int columnIndex) throws SQLException { return mergeResultSet.getValue(columnIndex, Object.class); }
@Test void assertGetObjectWithBigDecimal() throws SQLException { BigDecimal result = new BigDecimal("0"); when(mergeResultSet.getValue(1, BigDecimal.class)).thenReturn(result); assertThat(shardingSphereResultSet.getObject(1, BigDecimal.class), is(result)); }
public static DocumentBuilderFactory newSecureDocumentBuilderFactory() throws ParserConfigurationException { DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); dbf.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); dbf.setFeature(DISALLOW_DOCTYPE_DECL, true); dbf.setFeature(LOAD_EXTERNAL_DECL, false); dbf.setFeature(EXTERNAL_GENERAL_ENTITIES, false); dbf.setFeature(EXTERNAL_PARAMETER_ENTITIES, false); dbf.setFeature(CREATE_ENTITY_REF_NODES, false); return dbf; }
@Test(expected = SAXException.class) public void testEntityDtdWithSecureDocumentBuilderFactory() throws Exception { DocumentBuilder db = XMLUtils.newSecureDocumentBuilderFactory().newDocumentBuilder(); try (InputStream stream = getResourceStream("/xml/entity-dtd.xml")) { Document doc = db.parse(stream); } }
public static Set<String> auth(String username, String password) { return null; }
@Test public void testAuth() throws Exception { String user = "jduke"; String password = "theduke"; // function returns null always Assert.assertEquals(null, LdapUtil.auth(user, password)); }
public Node parse() throws ScanException { return E(); }
@Test public void empty() { try { Parser<Object> p = new Parser<>(""); p.parse(); Assertions.fail(""); } catch (ScanException e) { } }
public boolean isLessThan(Version version) { return !isUnknown() && compareTo(version) < 0; }
@Test public void isLessThan() throws Exception { assertFalse(V3_0.isLessThan(of(2, 0))); assertFalse(V3_0.isLessThan(of(3, 0))); assertTrue(V3_0.isLessThan(of(3, 1))); assertTrue(V3_0.isLessThan(of(4, 0))); assertTrue(V3_0.isLessThan(of(100, 0))); }
public int merge(final K key, final int value, final IntIntFunction remappingFunction) { requireNonNull(key); requireNonNull(remappingFunction); final int missingValue = this.missingValue; if (missingValue == value) { throw new IllegalArgumentException("cannot accept missingValue"); } final K[] keys = this.keys; final int[] values = this.values; @DoNotSub final int mask = values.length - 1; @DoNotSub int index = Hashing.hash(key, mask); int oldValue; while (missingValue != (oldValue = values[index])) { if (Objects.equals(keys[index], key)) { break; } index = ++index & mask; } final int newValue = missingValue == oldValue ? value : remappingFunction.apply(oldValue, value); if (missingValue != newValue) { keys[index] = key; values[index] = newValue; if (++size > resizeThreshold) { increaseCapacity(); } } else { keys[index] = null; values[index] = missingValue; --size; compactChain(index); } return newValue; }
@Test void mergeThrowsNullPointerExceptionIfRemappingFunctionIsNull() { assertThrowsExactly(NullPointerException.class, () -> objectToIntMap.merge("key", 42, null)); }
@Override public Path move(final Path file, final Path target, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { final EueApiClient client = new EueApiClient(session); if(status.isExists()) { if(!new CaseInsensitivePathPredicate(file).test(target)) { if(log.isWarnEnabled()) { log.warn(String.format("Trash file %s to be replaced with %s", target, file)); } new EueTrashFeature(session, fileid).delete(Collections.singletonMap(target, status), callback, delete); } } final String resourceId = fileid.getFileId(file); if(!new SimplePathPredicate(file.getParent()).test(target.getParent())) { final ResourceMoveResponseEntries resourceMoveResponseEntries; final String parentResourceId = fileid.getFileId(target.getParent()); switch(parentResourceId) { case EueResourceIdProvider.ROOT: case EueResourceIdProvider.TRASH: resourceMoveResponseEntries = new MoveChildrenForAliasApiApi(client) .resourceAliasAliasChildrenMovePost(parentResourceId, Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null, "rename", null); break; default: resourceMoveResponseEntries = new MoveChildrenApi(client) .resourceResourceIdChildrenMovePost(parentResourceId, Collections.singletonList(String.format("%s/resource/%s", session.getBasePath(), resourceId)), null, null, null, "rename", null); } if(null == resourceMoveResponseEntries) { // Move of single file will return 200 status code with empty response body } else { for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) { switch(resourceMoveResponseEntry.getStatusCode()) { case HttpStatus.SC_OK: break; default: log.warn(String.format("Failure %s moving file %s", resourceMoveResponseEntries, file)); final ResourceCreationResponseEntryEntity entity = resourceMoveResponseEntry.getEntity(); if(null == entity) { throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getEntity().getError(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } } } } if(!StringUtils.equals(file.getName(), target.getName())) { final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel(); final ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate(); final Uifs uifs = new Uifs(); uifs.setName(target.getName()); resourceUpdateModelUpdate.setUifs(uifs); resourceUpdateModel.setUpdate(resourceUpdateModelUpdate); final ResourceMoveResponseEntries resourceMoveResponseEntries = new UpdateResourceApi(client).resourceResourceIdPatch(resourceId, resourceUpdateModel, null, null, null); if(null == resourceMoveResponseEntries) { // Move of single file will return 200 status code with empty response body } else { for(ResourceMoveResponseEntry resourceMoveResponseEntry : resourceMoveResponseEntries.values()) { switch(resourceMoveResponseEntry.getStatusCode()) { case HttpStatus.SC_CREATED: break; default: log.warn(String.format("Failure %s renaming file %s", resourceMoveResponseEntry, file)); throw new EueExceptionMappingService().map(new ApiException(resourceMoveResponseEntry.getReason(), null, resourceMoveResponseEntry.getStatusCode(), client.getResponseHeaders())); } } } } fileid.cache(file, null); return target; } catch(ApiException e) { throw new EueExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test public void testMoveRecursive() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path sourceFolder = new EueDirectoryFeature(session, fileid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path sourceFile = new Path(sourceFolder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); createFile(fileid, sourceFile, RandomUtils.nextBytes(541)); final Path targetFolder = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)); final EueMoveFeature feature = new EueMoveFeature(session, fileid); assertTrue(feature.isRecursive(sourceFolder, targetFolder)); feature.move(sourceFolder, targetFolder, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertTrue(new EueFindFeature(session, fileid).find(targetFolder)); assertTrue(new EueFindFeature(session, fileid).find(new Path(targetFolder, sourceFile.getName(), sourceFile.getType()))); assertTrue(new DefaultFindFeature(session).find(new Path(targetFolder, sourceFile.getName(), sourceFile.getType()))); assertFalse(new EueFindFeature(session, fileid).find(sourceFolder)); assertFalse(new EueFindFeature(session, fileid).find(new Path(sourceFile).withAttributes(PathAttributes.EMPTY))); assertFalse(new DefaultFindFeature(session).find(new Path(sourceFile).withAttributes(PathAttributes.EMPTY))); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(targetFolder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override @SuppressWarnings({"unchecked", "rawtypes"}) public void executeUpdate(final LockClusterStatement sqlStatement, final ContextManager contextManager) { checkState(contextManager); checkAlgorithm(sqlStatement); LockContext lockContext = contextManager.getComputeNodeInstanceContext().getLockContext(); GlobalLockDefinition lockDefinition = new GlobalLockDefinition(GlobalLockNames.CLUSTER_LOCK.getLockName()); if (lockContext.tryLock(lockDefinition, 3000L)) { try { checkState(contextManager); TypedSPILoader.getService(ClusterLockStrategy.class, sqlStatement.getLockStrategy().getName()).lock(); } finally { lockContext.unlock(lockDefinition); } } }
@Test void assertExecuteUpdateWithWrongAlgorithm() { ContextManager contextManager = mock(ContextManager.class, RETURNS_DEEP_STUBS); when(contextManager.getStateContext().getClusterState()).thenReturn(ClusterState.OK); assertThrows(ServiceProviderNotFoundException.class, () -> executor.executeUpdate(new LockClusterStatement(new AlgorithmSegment("FOO", new Properties())), contextManager)); }
public static BootstrapMetadata fromRecords(List<ApiMessageAndVersion> records, String source) { MetadataVersion metadataVersion = null; for (ApiMessageAndVersion record : records) { Optional<MetadataVersion> version = recordToMetadataVersion(record.message()); if (version.isPresent()) { metadataVersion = version.get(); } } if (metadataVersion == null) { throw new RuntimeException("No FeatureLevelRecord for " + MetadataVersion.FEATURE_NAME + " was found in the bootstrap metadata from " + source); } return new BootstrapMetadata(records, metadataVersion, source); }
@Test public void testFromRecordsList() { assertEquals(new BootstrapMetadata(SAMPLE_RECORDS1, IBP_3_3_IV2, "bar"), BootstrapMetadata.fromRecords(SAMPLE_RECORDS1, "bar")); }
@Override public boolean skip(final ServerWebExchange exchange) { return skipExcept(exchange, RpcTypeEnum.TARS); }
@Test public void testSkip() { ShenyuContext context = mock(ShenyuContext.class); when(context.getRpcType()).thenReturn(RpcTypeEnum.TARS.getName()); exchange.getAttributes().put(Constants.CONTEXT, context); boolean result = tarsPluginUnderTest.skip(exchange); assertFalse(result); }
public ChannelUriStringBuilder nakDelay(final String nakDelay) { this.nakDelay = null != nakDelay ? parseDuration(NAK_DELAY_PARAM_NAME, nakDelay) : null; return this; }
@Test void shouldRejectInvalidNakDelay() { assertThrows(IllegalArgumentException.class, () -> new ChannelUriStringBuilder().nakDelay("foo")); }