focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testKarateFork() { run("fork.feature"); }
@Override public <K, V> Map<K, V> toMap(DataTable dataTable, Type keyType, Type valueType) { requireNonNull(dataTable, "dataTable may not be null"); requireNonNull(keyType, "keyType may not be null"); requireNonNull(valueType, "valueType may not be null"); if (dataTable.isEmpty()) { return emptyMap(); } DataTable keyColumn = dataTable.columns(0, 1); DataTable valueColumns = dataTable.columns(1); String firstHeaderCell = keyColumn.cell(0, 0); boolean firstHeaderCellIsBlank = firstHeaderCell == null || firstHeaderCell.isEmpty(); List<K> keys = convertEntryKeys(keyType, keyColumn, valueType, firstHeaderCellIsBlank); if (valueColumns.isEmpty()) { return createMap(keyType, keys, valueType, nCopies(keys.size(), null)); } boolean keysImplyTableRowTransformer = keys.size() == dataTable.height() - 1; List<V> values = convertEntryValues(valueColumns, keyType, valueType, keysImplyTableRowTransformer); if (keys.size() != values.size()) { throw keyValueMismatchException(firstHeaderCellIsBlank, keys.size(), keyType, values.size(), valueType); } return createMap(keyType, keys, valueType, values); }
@Test void to_map_of_primitive_to_entry__throws_exception__more_keys_then_values() { DataTable table = parse("", "| code | lat | lon |", "| KMSY | 29.993333 | -90.258056 |", "| KSFO | 37.618889 | -122.375 |", "| KSEA | 47.448889 | -122.309444 |", "| KJFK | 40.639722 | -73.778889 |"); registry.defineDataTableType(new DataTableType(Coordinate.class, COORDINATE_TABLE_ENTRY_TRANSFORMER)); CucumberDataTableException exception = assertThrows( CucumberDataTableException.class, () -> converter.toMap(table, String.class, Coordinate.class)); assertThat(exception.getMessage(), is(format("" + "Can't convert DataTable to Map<%s, %s>.\n" + "There are more keys than values. " + "Did you use a TableEntryTransformer for the value " + "while using a TableRow or TableCellTransformer for the keys?", typeName(String.class), typeName(Coordinate.class)))); }
@Override public Map<StreamMessageId, Map<K, V>> pendingRange(String groupName, StreamMessageId startId, StreamMessageId endId, long idleTime, TimeUnit idleTimeUnit, int count) { return get(pendingRangeAsync(groupName, startId, endId, idleTime, idleTimeUnit, count)); }
@Test public void testPendingRange() { RStream<String, String> stream = redisson.getStream("test"); stream.add(StreamAddArgs.entry("0", "0")); stream.createGroup(StreamCreateGroupArgs.name("testGroup")); StreamMessageId id1 = stream.add(StreamAddArgs.entry("11", "12")); StreamMessageId id2 = stream.add(StreamAddArgs.entry("21", "22")); Map<StreamMessageId, Map<String, String>> s = stream.readGroup("testGroup", "consumer1", StreamReadGroupArgs.neverDelivered()); assertThat(s.size()).isEqualTo(2); Map<StreamMessageId, Map<String, String>> pres = stream.pendingRange("testGroup", StreamMessageId.MIN, StreamMessageId.MAX, 10); assertThat(pres.keySet()).containsExactly(id1, id2); assertThat(pres.get(id1)).isEqualTo(Collections.singletonMap("11", "12")); assertThat(pres.get(id2)).isEqualTo(Collections.singletonMap("21", "22")); Map<StreamMessageId, Map<String, String>> pres2 = stream.pendingRange("testGroup", "consumer1", StreamMessageId.MIN, StreamMessageId.MAX, 10); assertThat(pres2.keySet()).containsExactly(id1, id2); assertThat(pres2.get(id1)).isEqualTo(Collections.singletonMap("11", "12")); assertThat(pres2.get(id2)).isEqualTo(Collections.singletonMap("21", "22")); Map<StreamMessageId, Map<String, String>> pres3 = stream.pendingRange("testGroup", "consumer2", StreamMessageId.MIN, StreamMessageId.MAX, 10); assertThat(pres3).isEmpty(); }
public static Optional<ShardingRuleConfiguration> findAndConvertShardingRuleConfiguration(final Collection<YamlRuleConfiguration> yamlRuleConfigs) { return findYamlShardingRuleConfiguration(yamlRuleConfigs).map(each -> new YamlShardingRuleConfigurationSwapper().swapToObject(each)); }
@Test void assertFindAndConvertShardingRuleConfiguration() { Optional<ShardingRuleConfiguration> actual = ShardingRuleConfigurationConverter.findAndConvertShardingRuleConfiguration(yamlRuleConfig); assertTrue(actual.isPresent()); assertThat(actual.get().getTables().size(), is(1)); assertThat(actual.get().getTables().iterator().next().getLogicTable(), is("LOGIC_TABLE")); }
public static WindowBytesStoreSupplier persistentTimestampedWindowStore(final String name, final Duration retentionPeriod, final Duration windowSize, final boolean retainDuplicates) throws IllegalArgumentException { return persistentWindowStore(name, retentionPeriod, windowSize, retainDuplicates, true); }
@Test public void shouldCreateRocksDbTimestampedWindowStore() { final WindowStore store = Stores.persistentTimestampedWindowStore("store", ofMillis(1L), ofMillis(1L), false).get(); final StateStore wrapped = ((WrappedStateStore) store).wrapped(); assertThat(store, instanceOf(RocksDBWindowStore.class)); assertThat(wrapped, instanceOf(RocksDBTimestampedSegmentedBytesStore.class)); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test(expected = StackOverflowError.class) public void shouldNotRetryUsingSingleStackOverFlow() { RetryConfig config = retryConfig(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willThrow(new StackOverflowError("BAM!")); Single.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test(); then(helloWorldService).should().returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isZero(); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero(); }
@Override public int hashCode() { return Objects.hash( Arrays.hashCode(salt), Arrays.hashCode(storedKey), Arrays.hashCode(serverKey), iterations ); }
@Test public void testNotEqualsDifferentContent() { byte[] salt1 = {1, 2, 3}; byte[] storedKey1 = {4, 5, 6}; byte[] serverKey1 = {7, 8, 9}; int iterations1 = 1000; byte[] salt2 = {9, 8, 7}; byte[] storedKey2 = {6, 5, 4}; byte[] serverKey2 = {3, 2, 1}; int iterations2 = 2000; ScramCredentialData data1 = new ScramCredentialData(salt1, storedKey1, serverKey1, iterations1); ScramCredentialData data2 = new ScramCredentialData(salt2, storedKey2, serverKey2, iterations2); assertNotEquals(data1, data2); assertNotEquals(data1.hashCode(), data2.hashCode()); }
@Override public double score(int[] truth, int[] prediction) { return of(truth, prediction, strategy); }
@Test public void testWeighted() { System.out.println("Weighted-Precision"); int[] truth = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5 }; int[] prediction = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 2, 2, 3, 1, 3, 3, 3, 4, 5, 4, 4, 4, 4, 1, 5, 5 }; Precision instance = new Precision(Averaging.Weighted); double expResult = 0.8914; double result = instance.score(truth, prediction); assertEquals(expResult, result, 1E-4); }
@Override public KeyValueIterator<K, V> range(final K from, final K to) { final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.range(from, to); } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction)); }
@Test public void shouldSupportRangeAcrossMultipleKVStores() { final KeyValueStore<String, String> cache = newStoreInstance(); stubProviderTwo.addStore(storeName, cache); stubOneUnderlying.put("a", "a"); stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("z", "z"); cache.put("c", "c"); cache.put("d", "d"); cache.put("x", "x"); final List<KeyValue<String, String>> results = toList(theStore.range("a", "e")); assertArrayEquals( asList( new KeyValue<>("a", "a"), new KeyValue<>("b", "b"), new KeyValue<>("c", "c"), new KeyValue<>("d", "d") ).toArray(), results.toArray()); }
@Override public boolean hasRole(Role role) { return roles.containsKey(role); }
@Test void hasRole() { var core = new CustomerCore(); core.addRole(Role.BORROWER); assertTrue(core.hasRole(Role.BORROWER)); assertFalse(core.hasRole(Role.INVESTOR)); }
public static GroupConfig fromProps(Map<?, ?> defaults, Properties overrides) { Properties props = new Properties(); props.putAll(defaults); props.putAll(overrides); return new GroupConfig(props); }
@Test public void testFromPropsWithDefaultValue() { Map<String, String> defaultValue = new HashMap<>(); defaultValue.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "10"); defaultValue.put(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "10"); Properties props = new Properties(); props.put(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "20"); GroupConfig config = GroupConfig.fromProps(defaultValue, props); assertEquals(10, config.getInt(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG)); assertEquals(20, config.getInt(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG)); }
public abstract byte getCategory();
@Test void testDumpedHistogram() { QueryScopeInfo info = new QueryScopeInfo.JobManagerQueryScopeInfo(); MetricDump.HistogramDump hd = new MetricDump.HistogramDump(info, "hist", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11); assertThat(hd.name).isEqualTo("hist"); assertThat(hd.min).isOne(); assertThat(hd.max).isEqualTo(2); assertThat(hd.mean).isCloseTo(3, within(0.1)); assertThat(hd.median).isCloseTo(4, within(0.1)); assertThat(hd.stddev).isCloseTo(5, within(0.1)); assertThat(hd.p75).isCloseTo(6, within(0.1)); assertThat(hd.p90).isCloseTo(7, within(0.1)); assertThat(hd.p95).isCloseTo(8, within(0.1)); assertThat(hd.p98).isCloseTo(9, within(0.1)); assertThat(hd.p99).isCloseTo(10, within(0.1)); assertThat(hd.p999).isCloseTo(11, within(0.1)); assertThat(hd.scopeInfo).isEqualTo(info); assertThat(hd.getCategory()).isEqualTo(METRIC_CATEGORY_HISTOGRAM); }
public int getReferenceCount() { return referenceCount; }
@Test void testConcurrency() throws InterruptedException { TestReferenceCounted referenceCounted = new TestReferenceCounted(); List<Thread> threads = new ArrayList<>(); for (int i = 0; i < 5; i++) { Thread thread = new Thread(referenceCounted::retain); thread.start(); threads.add(thread); } for (int i = 0; i < 5; i++) { Thread thread = new Thread(referenceCounted::release); thread.start(); threads.add(thread); } for (Thread thread : threads) { thread.join(); } assertThat(referenceCounted.getReferenceCount()).isEqualTo(0); }
boolean isIdle() { lock.lock(); try { return assignedSplits.isEmpty() && taskQueue.isEmpty() && runningTask == null; } finally { lock.unlock(); } }
@Test public void testNewFetcherIsIdle() { final SplitFetcher<Object, TestingSourceSplit> fetcher = createFetcher(new TestingSplitReader<>()); assertThat(fetcher.isIdle()).isTrue(); }
@GET public Response getContainers(@PathParam("version") String version, @HeaderParam(HEADER_ACCEPT) String acceptHeader, @HeaderParam(HEADER_ACCEPT_ENCODING) String acceptEncoding, @HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept, @Context UriInfo uriInfo, @Nullable @QueryParam("regions") String regionsStr) { boolean isRemoteRegionRequested = null != regionsStr && !regionsStr.isEmpty(); String[] regions = null; if (!isRemoteRegionRequested) { EurekaMonitors.GET_ALL.increment(); } else { regions = regionsStr.toLowerCase().split(","); Arrays.sort(regions); // So we don't have different caches for same regions queried in different order. EurekaMonitors.GET_ALL_WITH_REMOTE_REGIONS.increment(); } // Check if the server allows the access to the registry. The server can // restrict access if it is not // ready to serve traffic depending on various reasons. if (!registry.shouldAllowAccess(isRemoteRegionRequested)) { return Response.status(Status.FORBIDDEN).build(); } CurrentRequestVersion.set(Version.toEnum(version)); KeyType keyType = Key.KeyType.JSON; String returnMediaType = MediaType.APPLICATION_JSON; if (acceptHeader == null || !acceptHeader.contains(HEADER_JSON_VALUE)) { keyType = Key.KeyType.XML; returnMediaType = MediaType.APPLICATION_XML; } Key cacheKey = new Key(Key.EntityType.Application, ResponseCacheImpl.ALL_APPS, keyType, CurrentRequestVersion.get(), EurekaAccept.fromString(eurekaAccept), regions ); Response response; if (acceptEncoding != null && acceptEncoding.contains(HEADER_GZIP_VALUE)) { response = Response.ok(responseCache.getGZIP(cacheKey)) .header(HEADER_CONTENT_ENCODING, HEADER_GZIP_VALUE) .header(HEADER_CONTENT_TYPE, returnMediaType) .build(); } else { response = Response.ok(responseCache.get(cacheKey)) .build(); } CurrentRequestVersion.remove(); logger.debug("Sent registry information to client."); return response; }
@Test public void testFullAppsGetGzipJsonHeaderType() throws Exception { Response response = applicationsResource.getContainers( Version.V2.name(), MediaType.APPLICATION_JSON, "gzip", // encoding EurekaAccept.full.name(), null, // uriInfo null // remote regions ); assertThat(response.getMetadata().getFirst("Content-Encoding").toString(), is("gzip")); assertThat(response.getMetadata().getFirst("Content-Type").toString(), is(MediaType.APPLICATION_JSON)); }
@POST @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response createNetwork(InputStream input) { log.trace(String.format(MESSAGE, "CREATE")); URI location; try { ObjectNode jsonTree = readTreeFromStream(mapper(), input); final K8sNetwork network = codec(K8sNetwork.class).decode(jsonTree, this); adminService.createNetwork(network); location = new URI(network.networkId()); } catch (IOException | URISyntaxException e) { throw new IllegalArgumentException(e); } return Response.created(location).build(); }
@Test public void testCreateNetworkWithCreateOperation() { mockAdminService.createNetwork(anyObject()); replay(mockAdminService); final WebTarget wt = target(); InputStream jsonStream = K8sNetworkWebResourceTest.class .getResourceAsStream("k8s-network.json"); Response response = wt.path(PATH).request(MediaType.APPLICATION_JSON_TYPE) .post(Entity.json(jsonStream)); final int status = response.getStatus(); assertThat(status, is(201)); verify(mockAdminService); }
public static void checkBetaIps(String betaIps) throws NacosException { if (StringUtils.isBlank(betaIps)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, BETAIPS_INVALID_MSG); } String[] ipsArr = betaIps.split(","); for (String ip : ipsArr) { if (!InternetAddressUtil.isIP(ip)) { throw new NacosException(NacosException.CLIENT_INVALID_PARAM, BETAIPS_INVALID_MSG); } } }
@Test void testCheckBetaIpsFail1() throws NacosException { Throwable exception = assertThrows(NacosException.class, () -> { ParamUtils.checkBetaIps(""); }); assertTrue(exception.getMessage().contains("betaIps invalid")); }
static CliArguments fromRawArgs(String[] rawArgs) throws CliArgumentsException { CommandLineParser parser = new DefaultParser(); try { return new CliArguments(parser.parse(optionsDefinition, rawArgs)); } catch (ParseException e) { throw new CliArgumentsException(e); } }
@Test void fails_on_missing_parameters() { CliArguments.CliArgumentsException exception = assertThrows( CliArguments.CliArgumentsException.class, () -> CliArguments.fromRawArgs(new String[] {"--file", "/path/to/file", "--stdin"})); assertEquals("Endpoint must be specified", exception.getMessage()); }
@GET @Produces(MediaType.APPLICATION_JSON) @Operation(summary = "Get prekey count", description = "Gets the number of one-time prekeys uploaded for this device and still available") @ApiResponse(responseCode = "200", description = "Body contains the number of available one-time prekeys for the device.", useReturnTypeSchema = true) @ApiResponse(responseCode = "401", description = "Account authentication check failed.") public CompletableFuture<PreKeyCount> getStatus(@ReadOnly @Auth final AuthenticatedDevice auth, @QueryParam("identity") @DefaultValue("aci") final IdentityType identityType) { final CompletableFuture<Integer> ecCountFuture = keysManager.getEcCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId()); final CompletableFuture<Integer> pqCountFuture = keysManager.getPqCount(auth.getAccount().getIdentifier(identityType), auth.getAuthenticatedDevice().getId()); return ecCountFuture.thenCombine(pqCountFuture, PreKeyCount::new); }
@Test void putKeysTestV2() { final ECPreKey preKey = KeysHelper.ecPreKey(31337); final ECSignedPreKey signedPreKey = KeysHelper.signedECPreKey(31338, AuthHelper.VALID_IDENTITY_KEY_PAIR); final SetKeysRequest setKeysRequest = new SetKeysRequest(List.of(preKey), signedPreKey, null, null); Response response = resources.getJerseyTest() .target("/v2/keys") .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .put(Entity.entity(setKeysRequest, MediaType.APPLICATION_JSON_TYPE)); assertThat(response.getStatus()).isEqualTo(204); ArgumentCaptor<List<ECPreKey>> listCaptor = ArgumentCaptor.forClass(List.class); verify(KEYS).storeEcOneTimePreKeys(eq(AuthHelper.VALID_UUID), eq(SAMPLE_DEVICE_ID), listCaptor.capture()); assertThat(listCaptor.getValue()).containsExactly(preKey); verify(KEYS).storeEcSignedPreKeys(AuthHelper.VALID_UUID, AuthHelper.VALID_DEVICE.getId(), signedPreKey); }
@Override public String toString() { return "ScheduledExecutorConfig{" + "name='" + name + '\'' + ", durability=" + durability + ", poolSize=" + poolSize + ", capacity=" + capacity + ", capacityPolicy=" + capacityPolicy + ", statisticsEnabled=" + statisticsEnabled + ", splitBrainProtectionName=" + splitBrainProtectionName + ", mergePolicyConfig=" + mergePolicyConfig + ", userCodeNamespace=" + userCodeNamespace + '}'; }
@Test public void testToString() { assertContains(config.toString(), "ScheduledExecutorConfig"); }
@SuppressWarnings({"checkstyle:npathcomplexity", "checkstyle:cyclomaticcomplexity", "checkstyle:methodlength"}) void planMigrations(int partitionId, PartitionReplica[] oldReplicas, PartitionReplica[] newReplicas, MigrationDecisionCallback callback) { assert oldReplicas.length == newReplicas.length : "Replica addresses with different lengths! Old: " + Arrays.toString(oldReplicas) + ", New: " + Arrays.toString(newReplicas); if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Initial state: %s", partitionId, Arrays.toString(oldReplicas)); logger.finest("partitionId=%d, Final state: %s", partitionId, Arrays.toString(newReplicas)); } initState(oldReplicas); assertNoDuplicate(partitionId, oldReplicas, newReplicas); // fix cyclic partition replica movements if (fixCycle(oldReplicas, newReplicas)) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Final state (after cycle fix): %s", partitionId, Arrays.toString(newReplicas)); } } int currentIndex = 0; while (currentIndex < oldReplicas.length) { if (logger.isFinestEnabled()) { logger.finest("partitionId=%d, Current index: %d, state: %s", partitionId, currentIndex, Arrays.toString(state)); } assertNoDuplicate(partitionId, oldReplicas, newReplicas); if (newReplicas[currentIndex] == null) { if (state[currentIndex] != null) { // replica owner is removed and no one will own this replica logger.finest("partitionId=%d, New address is null at index: %d", partitionId, currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, null, -1, -1); state[currentIndex] = null; } currentIndex++; continue; } if (state[currentIndex] == null) { int i = getReplicaIndex(state, newReplicas[currentIndex]); if (i == -1) { // fresh replica copy is needed, so COPY replica to newReplicas[currentIndex] from partition owner logger.finest("partitionId=%d, COPY %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(null, -1, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (i > currentIndex) { // SHIFT UP replica from i to currentIndex, copy data from partition owner logger.finest("partitionId=%d, SHIFT UP-2 %s from old addresses index: %d to index: %d", partitionId, state[i], i, currentIndex); callback.migrate(null, -1, -1, state[i], i, currentIndex); state[currentIndex] = state[i]; state[i] = null; continue; } throw new AssertionError("partitionId=" + partitionId + "Migration decision algorithm failed during SHIFT UP! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas)); } if (newReplicas[currentIndex].equals(state[currentIndex])) { // no change, no action needed currentIndex++; continue; } if (getReplicaIndex(newReplicas, state[currentIndex]) == -1 && getReplicaIndex(state, newReplicas[currentIndex]) == -1) { // MOVE partition replica from its old owner to new owner logger.finest("partitionId=%d, MOVE %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } if (getReplicaIndex(state, newReplicas[currentIndex]) == -1) { int newIndex = getReplicaIndex(newReplicas, state[currentIndex]); assert newIndex > currentIndex : "partitionId=" + partitionId + ", Migration decision algorithm failed during SHIFT DOWN! INITIAL: " + Arrays.toString(oldReplicas) + ", CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); if (state[newIndex] == null) { // it is a SHIFT DOWN logger.finest("partitionId=%d, SHIFT DOWN %s to index: %d, COPY %s to index: %d", partitionId, state[currentIndex], newIndex, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, newIndex, newReplicas[currentIndex], -1, currentIndex); state[newIndex] = state[currentIndex]; } else { logger.finest("partitionId=%d, MOVE-3 %s to index: %d", partitionId, newReplicas[currentIndex], currentIndex); callback.migrate(state[currentIndex], currentIndex, -1, newReplicas[currentIndex], -1, currentIndex); } state[currentIndex] = newReplicas[currentIndex]; currentIndex++; continue; } planMigrations(partitionId, oldReplicas, newReplicas, callback, currentIndex); } assert Arrays.equals(state, newReplicas) : "partitionId=" + partitionId + ", Migration decisions failed! INITIAL: " + Arrays.toString(oldReplicas) + " CURRENT: " + Arrays.toString(state) + ", FINAL: " + Arrays.toString(newReplicas); }
@Test public void test_MOVE_performedAfter_SHIFT_UP_toReplicaIndexWithExistingOwnerKicksItOutOfCluster() throws UnknownHostException { final PartitionReplica[] oldReplicas = { new PartitionReplica(new Address("localhost", 5701), uuids[0]), new PartitionReplica(new Address("localhost", 5702), uuids[1]), new PartitionReplica(new Address("localhost", 5703), uuids[2]), new PartitionReplica(new Address("localhost", 5704), uuids[3]), null, null, null, }; final PartitionReplica[] newReplicas = { new PartitionReplica(new Address("localhost", 5702), uuids[1]), new PartitionReplica(new Address("localhost", 5704), uuids[3]), new PartitionReplica(new Address("localhost", 5703), uuids[2]), null, null, null, null, }; migrationPlanner.planMigrations(0, oldReplicas, newReplicas, callback); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5702), uuids[1]), 1, -1, new PartitionReplica(new Address("localhost", 5704), uuids[3]), 3, 1); verify(callback).migrate(new PartitionReplica(new Address("localhost", 5701), uuids[0]), 0, -1, new PartitionReplica(new Address("localhost", 5702), uuids[1]), -1, 0); }
@Override public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception { OptionParser optParser = new OptionParser(); OptionSpec<Long> offsetOpt = optParser.accepts("offset", "offset for reading input").withRequiredArg() .ofType(Long.class).defaultsTo(Long.valueOf(0)); OptionSpec<Long> limitOpt = optParser.accepts("limit", "maximum number of records in the outputfile") .withRequiredArg().ofType(Long.class).defaultsTo(Long.MAX_VALUE); OptionSpec<Double> fracOpt = optParser.accepts("samplerate", "rate at which records will be collected") .withRequiredArg().ofType(Double.class).defaultsTo(Double.valueOf(1)); OptionSet opts = optParser.parse(args.toArray(new String[0])); List<String> nargs = (List<String>) opts.nonOptionArguments(); if (nargs.size() < 2) { printHelp(out); return 0; } inFiles = Util.getFiles(nargs.subList(0, nargs.size() - 1)); System.out.println("List of input files:"); for (Path p : inFiles) { System.out.println(p); } currentInput = -1; nextInput(); OutputStream output = out; String lastArg = nargs.get(nargs.size() - 1); if (nargs.size() > 1 && !lastArg.equals("-")) { output = Util.createFromFS(lastArg); } writer = new DataFileWriter<>(new GenericDatumWriter<>()); String codecName = reader.getMetaString(DataFileConstants.CODEC); CodecFactory codec = (codecName == null) ? CodecFactory.fromString(DataFileConstants.NULL_CODEC) : CodecFactory.fromString(codecName); writer.setCodec(codec); for (String key : reader.getMetaKeys()) { if (!DataFileWriter.isReservedMeta(key)) { writer.setMeta(key, reader.getMeta(key)); } } writer.create(schema, output); long offset = opts.valueOf(offsetOpt); long limit = opts.valueOf(limitOpt); double samplerate = opts.valueOf(fracOpt); sampleCounter = 1; totalCopied = 0; reuse = null; if (limit < 0) { System.out.println("limit has to be non-negative"); this.printHelp(out); return 1; } if (offset < 0) { System.out.println("offset has to be non-negative"); this.printHelp(out); return 1; } if (samplerate < 0 || samplerate > 1) { System.out.println("samplerate has to be a number between 0 and 1"); this.printHelp(out); return 1; } skip(offset); writeRecords(limit, samplerate); System.out.println(totalCopied + " records written."); writer.flush(); writer.close(); Util.close(out); return 0; }
@Test void differentSchemasFail() throws Exception { assertThrows(IOException.class, () -> { Map<String, String> metadata = new HashMap<>(); metadata.put("myMetaKey", "myMetaValue"); File input1 = generateData("input1.avro", Type.STRING, metadata, DEFLATE); File input2 = generateData("input2.avro", Type.INT, metadata, DEFLATE); File output = new File(DIR, name.getMethodName() + ".avro"); output.deleteOnExit(); List<String> args = asList(input1.getAbsolutePath(), input2.getAbsolutePath(), output.getAbsolutePath()); new CatTool().run(System.in, System.out, System.err, args); }); }
public static void shiftRightArray8(int[] values, int rightShifts, Slice result) { if (values.length != NUMBER_OF_INTS * 2) { throw new IllegalArgumentException("Incorrect values length"); } if (rightShifts == 0) { for (int i = NUMBER_OF_INTS; i < 2 * NUMBER_OF_INTS; i++) { if (values[i] != 0) { throwOverflowException(); } } for (int i = 0; i < NUMBER_OF_INTS; i++) { setRawInt(result, i, values[i]); } return; } int wordShifts = rightShifts / 32; int bitShiftsInWord = rightShifts % 32; int shiftRestore = 32 - bitShiftsInWord; // check round-ups before settings values to result. // be aware that result could be the same object as decimal. boolean roundCarry; if (bitShiftsInWord == 0) { roundCarry = values[wordShifts - 1] < 0; } else { roundCarry = (values[wordShifts] & (1 << (bitShiftsInWord - 1))) != 0; } int r0 = values[0 + wordShifts]; int r1 = values[1 + wordShifts]; int r2 = values[2 + wordShifts]; int r3 = values[3 + wordShifts]; int r4 = wordShifts >= 4 ? 0 : values[4 + wordShifts]; int r5 = wordShifts >= 3 ? 0 : values[5 + wordShifts]; int r6 = wordShifts >= 2 ? 0 : values[6 + wordShifts]; int r7 = wordShifts >= 1 ? 0 : values[7 + wordShifts]; if (bitShiftsInWord > 0) { r0 = (r0 >>> bitShiftsInWord) | (r1 << shiftRestore); r1 = (r1 >>> bitShiftsInWord) | (r2 << shiftRestore); r2 = (r2 >>> bitShiftsInWord) | (r3 << shiftRestore); r3 = (r3 >>> bitShiftsInWord) | (r4 << shiftRestore); } if ((r4 >>> bitShiftsInWord) != 0 || r5 != 0 || r6 != 0 || r7 != 0) { throwOverflowException(); } if (r3 < 0) { throwOverflowException(); } // increment if (roundCarry) { r0++; if (r0 == 0) { r1++; if (r1 == 0) { r2++; if (r2 == 0) { r3++; if (r3 < 0) { throwOverflowException(); } } } } } pack(result, r0, r1, r2, r3, false); }
@Test public void testShiftRightArray8() { assertShiftRightArray8(TWO.pow(1), 0); assertShiftRightArray8(TWO.pow(1), 1); assertShiftRightArray8(TWO.pow(1), 10); assertShiftRightArray8(TWO.pow(15).add(TWO.pow(3)), 2); assertShiftRightArray8(TWO.pow(15).add(TWO.pow(3)), 10); assertShiftRightArray8(TWO.pow(15).add(TWO.pow(3)), 20); assertShiftRightArray8(TWO.pow(70), 30); assertShiftRightArray8(TWO.pow(70).subtract(TWO.pow(1)), 30, true); assertShiftRightArray8(TWO.pow(70), 32); assertShiftRightArray8(TWO.pow(70).subtract(TWO.pow(1)), 32, true); assertShiftRightArray8(TWO.pow(120), 70); assertShiftRightArray8(TWO.pow(120).subtract(TWO.pow(1)), 70, true); assertShiftRightArray8(TWO.pow(120), 96); assertShiftRightArray8(TWO.pow(120).subtract(TWO.pow(1)), 96, true); assertShiftRightArray8(MAX_DECIMAL_UNSCALED_VALUE, 20, true); assertShiftRightArray8(MAX_DECIMAL_UNSCALED_VALUE.multiply(MAX_DECIMAL_UNSCALED_VALUE), 130); assertShiftRightArray8(TWO.pow(256).subtract(BigInteger.ONE), 130, true); assertShiftRightArray8Overflow(TWO.pow(156), 1); assertShiftRightArray8Overflow(MAX_DECIMAL_UNSCALED_VALUE.multiply(MAX_DECIMAL_UNSCALED_VALUE), 20); assertShiftRightArray8Overflow(TWO.pow(256).subtract(BigInteger.ONE), 129); }
public static String findRedenOpschortingOudeWaarde(List<Container> categorieList){ return findValue(categorieList, CATEGORIE_INSCHRIJVING_OUDE_WAARDE, ELEMENT_REDEN_OPSCHORTING); }
@Test public void testFindRedenOpschortingOudeWaarde() { assertThat(CategorieUtil.findRedenOpschortingOudeWaarde(createFullCategories()), is("redenopschorting_oud")); }
private static FileStatus getFileStatus(final FileSystem fs, final Path path, LoadingCache<Path,Future<FileStatus>> statCache) throws IOException { // if the stat cache does not exist, simply query the filesystem if (statCache == null) { return fs.getFileStatus(path); } try { // get or load it from the cache return statCache.get(path).get(); } catch (ExecutionException e) { Throwable cause = e.getCause(); // the underlying exception should normally be IOException if (cause instanceof IOException) { throw (IOException)cause; } else { throw new IOException(cause); } } catch (InterruptedException e) { // should not happen Thread.currentThread().interrupt(); throw new IOException(e); } }
@Test @Timeout(10000) void testDownload() throws IOException, URISyntaxException, InterruptedException { conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); FileContext files = FileContext.getLocalFSFileContext(conf); final Path basedir = files.makeQualified(new Path("target", TestFSDownload.class.getSimpleName())); files.mkdir(basedir, null, true); conf.setStrings(TestFSDownload.class.getName(), basedir.toString()); Map<LocalResource, LocalResourceVisibility> rsrcVis = new HashMap<LocalResource, LocalResourceVisibility>(); Random rand = new Random(); long sharedSeed = rand.nextLong(); rand.setSeed(sharedSeed); System.out.println("SEED: " + sharedSeed); Map<LocalResource, Future<Path>> pending = new HashMap<LocalResource, Future<Path>>(); ExecutorService exec = HadoopExecutors.newSingleThreadExecutor(); LocalDirAllocator dirs = new LocalDirAllocator(TestFSDownload.class.getName()); int[] sizes = new int[10]; for (int i = 0; i < 10; ++i) { sizes[i] = rand.nextInt(512) + 512; LocalResourceVisibility vis = LocalResourceVisibility.PRIVATE; if (i % 2 == 1) { vis = LocalResourceVisibility.APPLICATION; } Path p = new Path(basedir, "" + i); LocalResource rsrc = createFile(files, p, sizes[i], rand, vis); rsrcVis.put(rsrc, vis); Path destPath = dirs.getLocalPathForWrite( basedir.toString(), sizes[i], conf); destPath = new Path(destPath, Long.toString(uniqueNumberGenerator.incrementAndGet())); FSDownload fsd = new FSDownload(files, UserGroupInformation.getCurrentUser(), conf, destPath, rsrc); pending.put(rsrc, exec.submit(fsd)); } exec.shutdown(); while (!exec.awaitTermination(1000, TimeUnit.MILLISECONDS)) ; for (Future<Path> path : pending.values()) { assertTrue(path.isDone()); } try { for (Map.Entry<LocalResource, Future<Path>> p : pending.entrySet()) { Path localized = p.getValue().get(); assertEquals(sizes[Integer.parseInt(localized.getName())], p.getKey() .getSize()); FileStatus status = files.getFileStatus(localized.getParent()); FsPermission perm = status.getPermission(); assertEquals(new FsPermission((short) 0755), perm, "Cache directory permissions are incorrect"); status = files.getFileStatus(localized); perm = status.getPermission(); System.out.println("File permission " + perm + " for rsrc vis " + p.getKey().getVisibility().name()); assert(rsrcVis.containsKey(p.getKey())); assertTrue(perm.toShort() == FSDownload.PRIVATE_FILE_PERMS.toShort(), "Private file should be 500"); } } catch (ExecutionException e) { throw new IOException("Failed exec", e); } }
@VisibleForTesting protected void renamePrefixes(GenericRow record) { if (_prefixesToRename.isEmpty()) { return; } List<String> fields = new ArrayList<>(record.getFieldToValueMap().keySet()); for (Map.Entry<String, String> entry : _prefixesToRename.entrySet()) { for (String field : fields) { String prefix = entry.getKey(); String replacementPrefix = entry.getValue(); if (field.startsWith(prefix)) { Object value = record.removeValue(field); String remainingColumnName = field.substring(prefix.length()); String newName = replacementPrefix + remainingColumnName; if (newName.isEmpty() || record.getValue(newName) != null) { throw new RuntimeException( String.format("Name conflict after attempting to rename field %s to %s", field, newName)); } record.putValue(newName, value); } } } }
@Test public void testRenamePrefixes() { HashMap<String, String> prefixesToRename = new HashMap<>(); prefixesToRename.put("map1.", ""); prefixesToRename.put("map2", "test"); ComplexTypeTransformer transformer = new ComplexTypeTransformer(new ArrayList<>(), ".", DEFAULT_COLLECTION_TO_JSON_MODE, prefixesToRename, null); GenericRow genericRow = new GenericRow(); genericRow.putValue("a", 1L); genericRow.putValue("map1.b", 2L); genericRow.putValue("map2.c", "u"); transformer.renamePrefixes(genericRow); Assert.assertEquals(genericRow.getValue("a"), 1L); Assert.assertEquals(genericRow.getValue("b"), 2L); Assert.assertEquals(genericRow.getValue("test.c"), "u"); // name conflict where there becomes duplicate field names after renaming prefixesToRename = new HashMap<>(); prefixesToRename.put("test.", ""); transformer = new ComplexTypeTransformer(new ArrayList<>(), ".", DEFAULT_COLLECTION_TO_JSON_MODE, prefixesToRename, null); genericRow = new GenericRow(); genericRow.putValue("a", 1L); genericRow.putValue("test.a", 2L); try { transformer.renamePrefixes(genericRow); Assert.fail("Should fail due to name conflict after renaming"); } catch (RuntimeException e) { // expected } // name conflict where there becomes an empty field name after renaming prefixesToRename = new HashMap<>(); prefixesToRename.put("test", ""); transformer = new ComplexTypeTransformer(new ArrayList<>(), ".", DEFAULT_COLLECTION_TO_JSON_MODE, prefixesToRename, null); genericRow = new GenericRow(); genericRow.putValue("a", 1L); genericRow.putValue("test", 2L); try { transformer.renamePrefixes(genericRow); Assert.fail("Should fail due to empty name after renaming"); } catch (RuntimeException e) { // expected } // case where nothing gets renamed prefixesToRename = new HashMap<>(); transformer = new ComplexTypeTransformer(new ArrayList<>(), ".", DEFAULT_COLLECTION_TO_JSON_MODE, prefixesToRename, null); genericRow = new GenericRow(); genericRow.putValue("a", 1L); genericRow.putValue("test", 2L); transformer.renamePrefixes(genericRow); Assert.assertEquals(genericRow.getValue("a"), 1L); Assert.assertEquals(genericRow.getValue("test"), 2L); }
public static Boolean judge(final ConditionData conditionData, final String realData) { if (Objects.isNull(conditionData) || StringUtils.isBlank(conditionData.getOperator())) { return false; } PredicateJudge predicateJudge = newInstance(conditionData.getOperator()); if (!(predicateJudge instanceof BlankPredicateJudge) && StringUtils.isBlank(realData)) { return false; } return predicateJudge.judge(conditionData, realData); }
@Test public void testContainsJudge() { conditionData.setOperator(OperatorEnum.CONTAINS.getAlias()); assertTrue(PredicateJudgeFactory.judge(conditionData, "/http/**/test")); assertTrue(PredicateJudgeFactory.judge(conditionData, "/test/http/**")); assertFalse(PredicateJudgeFactory.judge(conditionData, "/http1/**")); }
@Override public int compareTo(DateTimeStamp dateTimeStamp) { return comparator.compare(this,dateTimeStamp); }
@Test void compareWithNullDates() { DateTimeStamp stamp1 = new DateTimeStamp((String)null, 100); DateTimeStamp stamp2 = new DateTimeStamp((String)null, 200); DateTimeStamp stamp3 = new DateTimeStamp((String)null, 100); assertTrue(stamp1.compareTo(stamp2) < 0); assertTrue(stamp1.compareTo(stamp3) == 0); assertTrue(stamp2.compareTo(stamp3) > 0); }
public static BigDecimal[] toDecimalArray(String name, Object value) { try { if (value instanceof BigDecimal[]) { return (BigDecimal[]) value; } else if (value instanceof double[]) { return Arrays.stream((double[]) value) .mapToObj(val -> new BigDecimal(String.valueOf(val))) .toArray(BigDecimal[]::new); } else if (value instanceof List) { return ((List<?>) value) .stream().map(d -> new BigDecimal(String.valueOf(d))).toArray(BigDecimal[]::new); } else { throw new MaestroInternalError( "Cannot cast value [%s] into a BigDecimal array for param [%s]", toTruncateString(value), name); } } catch (NumberFormatException nfe) { throw new MaestroInternalError( nfe, "Invalid number format for value: %s for param [%s]", toTruncateString(value), name); } }
@Test public void testInvalidToDecimalArray() { AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Invalid number format for value: [true, 5.6]", () -> ParamHelper.toDecimalArray("foo", Arrays.asList(true, 5.6))); AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Invalid number format for value: [3.4abc, 5.6]", () -> ParamHelper.toDecimalArray("foo", Arrays.asList("3.4abc", 5.6))); AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Invalid number format for value: [null, 5.6]", () -> ParamHelper.toDecimalArray("foo", Arrays.asList(null, 5.6))); AssertHelper.assertThrows( "Invalid number format", MaestroInternalError.class, "Cannot cast value [null] into a BigDecimal array", () -> ParamHelper.toDecimalArray("foo", null)); }
@Override public List<StorageEntity> filter() { return new ArrayList<>(listAllParent()); }
@Test public void filterTest() { List<StorageEntity> allList = new ArrayList<>(); StorageEntity resource1 = new StorageEntity(); resource1.setFullName("a1.txt"); StorageEntity resource2 = new StorageEntity(); resource2.setFullName("b1.txt"); StorageEntity resource3 = new StorageEntity(); resource3.setFullName("b2.jar"); StorageEntity resource4 = new StorageEntity(); resource4.setFullName("c2.jar"); allList.add(resource1); allList.add(resource2); allList.add(resource3); allList.add(resource4); ResourceFilter resourceFilter = new ResourceFilter(".jar", allList); List<StorageEntity> resourceList = resourceFilter.filter(); Assertions.assertNotNull(resourceList); resourceList.forEach(t -> logger.info(t.toString())); }
@Override protected FieldValue doGet(String fieldName, EventWithContext eventWithContext) { final ImmutableMap.Builder<String, Object> dataModelBuilder = ImmutableMap.builder(); if (eventWithContext.messageContext().isPresent()) { dataModelBuilder.put("source", eventWithContext.messageContext().get().getFields()); } else if (eventWithContext.eventContext().isPresent()) { dataModelBuilder.put("source", eventWithContext.eventContext().get().toDto().fields()); } final ImmutableMap<String, Object> dataModel = dataModelBuilder.build(); if (!isValidTemplate(config.template(), dataModel)) { return FieldValue.error(); } try { return FieldValue.string(templateEngine.transform(config.template(), dataModel)); } catch (Exception e) { LOG.error("Couldn't render field template \"{}\"", config.template(), e); return FieldValue.error(); } }
@Test public void templateDateFormatting() { final TestEvent event = new TestEvent(); final EventWithContext eventWithContext = EventWithContext.create(event, newMessage(ImmutableMap.of("timestamp", DateTime.parse("2019-07-02T12:21:00.123Z")))); final FieldValue fieldValue = newTemplate("timestamp: ${source.timestamp}").doGet("test", eventWithContext); assertThat(fieldValue.value()).isEqualTo("timestamp: 2019-07-02T12:21:00.123Z"); }
@Override public void collect(MetricsEmitter metricsEmitter) { for (Map.Entry<MetricKey, KafkaMetric> entry : ledger.getMetrics()) { MetricKey metricKey = entry.getKey(); KafkaMetric metric = entry.getValue(); try { collectMetric(metricsEmitter, metricKey, metric); } catch (Exception e) { // catch and log to continue processing remaining metrics log.error("Error processing Kafka metric {}", metricKey, e); } } }
@Test public void testNonMeasurable() { metrics.addMetric(metrics.metricName("float", "group1", tags), (Gauge<Float>) (config, now) -> 99f); metrics.addMetric(metrics.metricName("double", "group1", tags), (Gauge<Double>) (config, now) -> 99d); metrics.addMetric(metrics.metricName("int", "group1", tags), (Gauge<Integer>) (config, now) -> 100); metrics.addMetric(metrics.metricName("long", "group1", tags), (Gauge<Long>) (config, now) -> 100L); collector.collect(testEmitter); List<SinglePointMetric> result = testEmitter.emittedMetrics(); // Should get exactly 5 Kafka measurables since Metrics always includes a count measurable. assertEquals(5, result.size()); result.stream() .flatMap(metrics -> Stream.of(metrics.builder().build())) .filter(metric -> metric.getName().equals("test.domain.group1.(float|double)")).forEach( doubleGauge -> { assertTrue(doubleGauge.hasGauge()); assertEquals(tags, getTags(doubleGauge.getGauge().getDataPoints(0).getAttributesList())); assertEquals(99d, doubleGauge.getGauge().getDataPoints(0).getAsDouble(), 0.0); }); result.stream() .flatMap(metrics -> Stream.of(metrics.builder().build())) .filter(metric -> metric.getName().equals("test.domain.group1.(int|long)")).forEach( intGauge -> { assertTrue(intGauge.hasGauge()); assertEquals(tags, getTags(intGauge.getGauge().getDataPoints(0).getAttributesList())); assertEquals(100, intGauge.getGauge().getDataPoints(0).getAsDouble(), 0.0); }); }
public synchronized void incrementFileCountForPath(String relPath) { relPath = relPath == null ? "" : relPath.trim(); Directory subDir = knownDirectories.get(relPath); if (subDir == null) { int dirnum = Directory.getDirectoryNumber(relPath); totalSubDirectories = Math.max(dirnum, totalSubDirectories); subDir = new Directory(dirnum); nonFullDirectories.add(subDir); knownDirectories.put(subDir.getRelativePath(), subDir); } if (subDir.incrementAndGetCount() >= perDirectoryFileLimit) { nonFullDirectories.remove(subDir); } }
@Test public void testIncrementFileCountForPath() { YarnConfiguration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.NM_LOCAL_CACHE_MAX_FILES_PER_DIRECTORY, LocalCacheDirectoryManager.DIRECTORIES_PER_LEVEL + 2); LocalCacheDirectoryManager mgr = new LocalCacheDirectoryManager(conf); final String rootPath = ""; mgr.incrementFileCountForPath(rootPath); Assert.assertEquals(rootPath, mgr.getRelativePathForLocalization()); Assert.assertFalse("root dir should be full", rootPath.equals(mgr.getRelativePathForLocalization())); // finish filling the other directory mgr.getRelativePathForLocalization(); // free up space in the root dir mgr.decrementFileCountForPath(rootPath); mgr.decrementFileCountForPath(rootPath); Assert.assertEquals(rootPath, mgr.getRelativePathForLocalization()); Assert.assertEquals(rootPath, mgr.getRelativePathForLocalization()); String otherDir = mgr.getRelativePathForLocalization(); Assert.assertFalse("root dir should be full", otherDir.equals(rootPath)); final String deepDir0 = "d/e/e/p/0"; final String deepDir1 = "d/e/e/p/1"; final String deepDir2 = "d/e/e/p/2"; final String deepDir3 = "d/e/e/p/3"; mgr.incrementFileCountForPath(deepDir0); Assert.assertEquals(otherDir, mgr.getRelativePathForLocalization()); Assert.assertEquals(deepDir0, mgr.getRelativePathForLocalization()); Assert.assertEquals("total dir count incorrect after increment", deepDir1, mgr.getRelativePathForLocalization()); mgr.incrementFileCountForPath(deepDir2); mgr.incrementFileCountForPath(deepDir1); mgr.incrementFileCountForPath(deepDir2); Assert.assertEquals(deepDir3, mgr.getRelativePathForLocalization()); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void no_emitted_node_state_event_when_node_state_not_changed() { final EventFixture fixture = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .clusterStateAfter("distributor:3 storage:3"); final List<Event> events = fixture.computeEventDiff(); assertThat(events.size(), equalTo(0)); }
@Override public Optional<Rule> findByUuid(String uuid) { ensureInitialized(); return Optional.ofNullable(rulesByUuid.get(uuid)); }
@Test public void first_call_to_findById_triggers_call_to_db_and_any_subsequent_get_or_find_call_does_not() { underTest.findByUuid(AB_RULE.getUuid()); verify(ruleDao, times(1)).selectAll(any(DbSession.class)); verifyNoMethodCallTriggersCallToDB(); }
@CheckForNull public static Secret decrypt(@CheckForNull String data) { if (!isValidData(data)) return null; if (data.startsWith("{") && data.endsWith("}")) { //likely CBC encrypted/containing metadata but could be plain text byte[] payload; try { payload = Base64.getDecoder().decode(data.substring(1, data.length() - 1)); } catch (IllegalArgumentException e) { return null; } switch (payload[0]) { case PAYLOAD_V1: // For PAYLOAD_V1 we use this byte shifting model, V2 probably will need DataOutput int ivLength = ((payload[1] & 0xff) << 24) | ((payload[2] & 0xff) << 16) | ((payload[3] & 0xff) << 8) | (payload[4] & 0xff); int dataLength = ((payload[5] & 0xff) << 24) | ((payload[6] & 0xff) << 16) | ((payload[7] & 0xff) << 8) | (payload[8] & 0xff); if (payload.length != 1 + 8 + ivLength + dataLength) { // not valid v1 return null; } byte[] iv = Arrays.copyOfRange(payload, 9, 9 + ivLength); byte[] code = Arrays.copyOfRange(payload, 9 + ivLength, payload.length); String text; try { text = new String(KEY.decrypt(iv).doFinal(code), UTF_8); } catch (GeneralSecurityException e) { // it's v1 which cannot be historical, but not decrypting return null; } return new Secret(text, iv); default: return null; } } else { try { return HistoricalSecrets.decrypt(data, KEY); } catch (UnsupportedEncodingException e) { throw new Error(e); // impossible } catch (GeneralSecurityException | IOException e) { return null; } } }
@Test public void decrypt() { assertEquals("abc", Secret.toString(Secret.fromString("abc"))); }
@Override public V load(K key) { awaitSuccessfulInit(); try (SqlResult queryResult = sqlService.execute(queries.load(), key)) { Iterator<SqlRow> it = queryResult.iterator(); V value = null; if (it.hasNext()) { SqlRow sqlRow = it.next(); if (it.hasNext()) { throw new IllegalStateException("multiple matching rows for a key " + key); } // If there is a single column as the value, return that column as the value if (queryResult.getRowMetadata().getColumnCount() == 2 && genericMapStoreProperties.singleColumnAsValue) { value = sqlRow.getObject(1); } else { //noinspection unchecked value = (V) toGenericRecord(sqlRow, genericMapStoreProperties); } } return value; } }
@Test public void givenValidMappingExists_whenMapLoaderInit_thenInitAndLoadRecord() { ObjectSpec spec = objectProvider.createObject(mapName, false); objectProvider.insertItems(spec, 1); createMapping(mapName, MAPPING_PREFIX + mapName); mapLoader = createMapLoader(); GenericRecord loaded = mapLoader.load(0); assertThat(loaded).isNotNull(); }
List<String> liveKeysAsOrderedList() { return new ArrayList<String>(liveMap.keySet()); }
@Test public void destroy() { long now = 3000; CyclicBuffer<Object> cb = tracker.getOrCreate(key, now); cb.add(new Object()); assertEquals(1, cb.length()); tracker.endOfLife(key); now += CyclicBufferTracker.LINGERING_TIMEOUT + 10; tracker.removeStaleComponents(now); assertEquals(0, tracker.liveKeysAsOrderedList().size()); assertEquals(0, tracker.getComponentCount()); assertEquals(0, cb.length()); }
public static int getEditDistance( String source, String target, boolean caseSensitive, int changeCost, int openGapCost, int continueGapCost) { if (!caseSensitive) { source = Ascii.toLowerCase(source); target = Ascii.toLowerCase(target); } int sourceLength = source.length(); int targetLength = target.length(); if (sourceLength == 0) { return scriptCost(openGapCost, continueGapCost, targetLength); } if (targetLength == 0) { return scriptCost(openGapCost, continueGapCost, sourceLength); } // mMatrix[i][j] = Cost of aligning source.substring(0,i) with // target.substring(0,j), using an edit script ending with // matched characters. int[][] mMatrix = new int[sourceLength + 1][targetLength + 1]; // Cost of an alignment that ends with a bunch of deletions. // dMatrix[i][j] = best found cost of changing the first i chars // of source into the first j chars of target, ending with one // or more deletes of source characters. int[][] dMatrix = new int[sourceLength + 1][targetLength + 1]; // Cost of an alignment that ends with one or more insertions. int[][] iMatrix = new int[sourceLength + 1][targetLength + 1]; mMatrix[0][0] = dMatrix[0][0] = iMatrix[0][0] = 0; // Any edit script that changes i chars of source into zero // chars of target will only involve deletions. So only the // d&m Matrix entries are relevant, because dMatrix[i][0] gives // the cost of changing an i-length string into a 0-length string, // using an edit script ending in deletions. for (int i = 1; i <= sourceLength; i++) { mMatrix[i][0] = dMatrix[i][0] = scriptCost(openGapCost, continueGapCost, i); // Make the iMatrix entries impossibly expensive, so they'll be // ignored as inputs to min(). Use a big cost but not // max int because that will overflow if anything's added to it. iMatrix[i][0] = Integer.MAX_VALUE / 2; } for (int j = 1; j <= targetLength; j++) { // Only the i&m Matrix entries are relevant here, because they represent // the cost of changing a 0-length string into a j-length string, using // an edit script ending in insertions. mMatrix[0][j] = iMatrix[0][j] = scriptCost(openGapCost, continueGapCost, j); // Make the dMatrix entries impossibly expensive, so they'll be // ignored as inputs to min(). Use a big cost but not // max int because that will overflow if anything's added to it. dMatrix[0][j] = Integer.MAX_VALUE / 2; } for (int i = 1; i <= sourceLength; i++) { char sourceI = source.charAt(i - 1); for (int j = 1; j <= targetLength; j++) { char targetJ = target.charAt(j - 1); int cost = (sourceI == targetJ) ? 0 : changeCost; // Cost of changing i chars of source into j chars of target, // using an edit script ending in matched characters. mMatrix[i][j] = cost + Ints.min(mMatrix[i - 1][j - 1], iMatrix[i - 1][j - 1], dMatrix[i - 1][j - 1]); // Cost of an edit script ending in a deletion. dMatrix[i][j] = Math.min( mMatrix[i - 1][j] + openGapCost + continueGapCost, dMatrix[i - 1][j] + continueGapCost); // Cost of an edit script ending in an insertion. iMatrix[i][j] = Math.min( mMatrix[i][j - 1] + openGapCost + continueGapCost, iMatrix[i][j - 1] + continueGapCost); } } // Return the minimum cost. int costOfEditScriptEndingWithMatch = mMatrix[sourceLength][targetLength]; int costOfEditScriptEndingWithDelete = dMatrix[sourceLength][targetLength]; int costOfEditScriptEndingWithInsert = iMatrix[sourceLength][targetLength]; return Ints.min( costOfEditScriptEndingWithMatch, costOfEditScriptEndingWithDelete, costOfEditScriptEndingWithInsert); }
@Test public void needlemanWunschEditDistance_returnsZero_withIdenticalNames() { String identifier = "foo"; double distance = NeedlemanWunschEditDistance.getEditDistance( identifier, identifier, /* caseSensitive= */ false, 1, 1, 10); assertThat(distance).isEqualTo(0.0); }
public void addFirst(PDOutlineItem newChild) { requireSingleNode(newChild); prepend(newChild); updateParentOpenCountForAddedChild(newChild); }
@Test void cannotAddFirstAList() { PDOutlineItem child = new PDOutlineItem(); child.insertSiblingAfter(new PDOutlineItem()); child.insertSiblingAfter(new PDOutlineItem()); assertThrows(IllegalArgumentException.class, () -> root.addFirst(child)); }
@Override @MethodNotAvailable public CompletionStage<Boolean> putIfAbsentAsync(K key, V value) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testPutIfAbsentAsync() { adapter.putIfAbsentAsync(23, "value"); }
public String getSchemaEntry(String schemaPath) { return schemaEntries.get(schemaPath); }
@Test void getSchemaEntry() { SchemaMap schemaMap = new SchemaMap(); schemaMap.putSchemaEntry("path1", "schema1"); assertEquals("schema1", schemaMap.getSchemaEntry("path1")); assertNull(schemaMap.getSchemaEntry("path2")); }
@Override public MongoPaginationHelper<T> includeGrandTotal(boolean includeGrandTotal) { return new DefaultMongoPaginationHelper<>(collection, filter, sort, perPage, includeGrandTotal, grandTotalFilter, collation); }
@Test void testIncludeGrandTotal() { assertThat(paginationHelper.page(1).grandTotal()) .isEqualTo(paginationHelper.includeGrandTotal(false).page(1).grandTotal()) .isEqualTo(paginationHelper.includeGrandTotal(false).page(1, alwaysTrue()).grandTotal()) .isEmpty(); assertThat(paginationHelper.includeGrandTotal(true).page(1).grandTotal()) .isEqualTo(paginationHelper.includeGrandTotal(true).page(1, alwaysTrue()).grandTotal()) .contains(16L); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void doNotRetryFromPredicateUsingObservable() { RetryConfig config = RetryConfig.custom() .retryOnException(t -> t instanceof IOException) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willThrow(new HelloWorldException()); Observable.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .assertError(HelloWorldException.class) .assertNotComplete() .assertSubscribed(); then(helloWorldService).should().returnHelloWorld(); Retry.Metrics metrics = retry.getMetrics(); assertThat(metrics.getNumberOfFailedCallsWithoutRetryAttempt()).isEqualTo(1); assertThat(metrics.getNumberOfFailedCallsWithRetryAttempt()).isZero(); }
static String computeDetailsAsString(SearchRequest searchRequest) { StringBuilder message = new StringBuilder(); message.append(String.format("ES search request '%s'", searchRequest)); if (searchRequest.indices().length > 0) { message.append(String.format(ON_INDICES_MESSAGE, Arrays.toString(searchRequest.indices()))); } return message.toString(); }
@Test public void should_format_search_SearchScrollRequest() { SearchScrollRequest scrollRequest = Requests.searchScrollRequest("scroll-id") .scroll(TimeValue.ZERO); assertThat(EsRequestDetails.computeDetailsAsString(scrollRequest)) .isEqualTo("ES search scroll request for scroll id 'Scroll{keepAlive=0s}'"); }
public void addManifest(BlobDescriptor descriptor, String imageReferenceName) { ManifestDescriptorTemplate contentDescriptorTemplate = new ManifestDescriptorTemplate( OciManifestTemplate.MANIFEST_MEDIA_TYPE, descriptor.getSize(), descriptor.getDigest()); contentDescriptorTemplate.setAnnotations( ImmutableMap.of("org.opencontainers.image.ref.name", imageReferenceName)); manifests.add(contentDescriptorTemplate); }
@Test public void testToJsonWithPlatform() throws DigestException, IOException, URISyntaxException { // Loads the expected JSON string. Path jsonFile = Paths.get(Resources.getResource("core/json/ociindex_platforms.json").toURI()); String expectedJson = new String(Files.readAllBytes(jsonFile), StandardCharsets.UTF_8); // Creates the JSON object to serialize. OciIndexTemplate ociIndexJson = new OciIndexTemplate(); OciIndexTemplate.ManifestDescriptorTemplate ppc64leManifest = new OciIndexTemplate.ManifestDescriptorTemplate( OciManifestTemplate.MANIFEST_MEDIA_TYPE, 7143, DescriptorDigest.fromDigest( "sha256:e692418e4cbaf90ca69d05a66403747baa33ee08806650b51fab815ad7fc331f")); ppc64leManifest.setPlatform("ppc64le", "linux"); ociIndexJson.addManifest(ppc64leManifest); OciIndexTemplate.ManifestDescriptorTemplate amd64Manifest = new OciIndexTemplate.ManifestDescriptorTemplate( OciManifestTemplate.MANIFEST_MEDIA_TYPE, 7682, DescriptorDigest.fromDigest( "sha256:5b0bcabd1ed22e9fb1310cf6c2dec7cdef19f0ad69efa1f392e94a4333501270")); amd64Manifest.setPlatform("amd64", "linux"); ociIndexJson.addManifest(amd64Manifest); // Serializes the JSON object. Assert.assertEquals( expectedJson.replaceAll("[\r\n\t ]", ""), JsonTemplateMapper.toUtf8String(ociIndexJson)); }
@Udf public String concat(@UdfParameter final String... jsonStrings) { if (jsonStrings == null) { return null; } final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length); boolean allObjects = true; for (final String jsonString : jsonStrings) { if (jsonString == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonString); if (node.isMissingNode()) { return null; } if (allObjects && !node.isObject()) { allObjects = false; } nodes.add(node); } JsonNode result = nodes.get(0); if (allObjects) { for (int i = 1; i < nodes.size(); i++) { result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i)); } } else { for (int i = 1; i < nodes.size(); i++) { result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i))); } } return UdfJsonMapper.writeValueAsJson(result); }
@Test public void shouldMerge2Objects() { // When: final String result = udf.concat("{\"a\": 1}", "{\"b\": 2}"); // Then: assertEquals("{\"a\":1,\"b\":2}", result); }
public static JsonMapper validateJsonMapper(JsonMapper jsonMapper) { try { final String serializedJob = jsonMapper.serialize(getJobForTesting()); testTimeFields(serializedJob); testUseFieldsNotMethods(serializedJob); testUsePolymorphism(serializedJob); testCanConvertBackToJob(jsonMapper, serializedJob); return jsonMapper; } catch (Exception e) { throw new IllegalArgumentException("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.", e); } }
@Test void testValidGsonJsonMapper() { assertThatCode(() -> validateJsonMapper(new GsonJsonMapper())).doesNotThrowAnyException(); }
static Entry<String, String> splitTrimmedConfigStringComponent(String input) { int i; for (i = 0; i < input.length(); i++) { if (input.charAt(i) == '=') { break; } } if (i == input.length()) { throw new FormatterException("No equals sign found in SCRAM component: " + input); } String value = input.substring(i + 1); if (value.length() >= 2) { if (value.startsWith("\"") && value.endsWith("\"")) { value = value.substring(1, value.length() - 1); } } return new AbstractMap.SimpleImmutableEntry<>(input.substring(0, i), value); }
@Test public void testSplitTrimmedConfigStringComponentOnNameEqualsFoo() { assertEquals(new AbstractMap.SimpleImmutableEntry<>("name", "foo"), ScramParser.splitTrimmedConfigStringComponent("name=foo")); }
@Override public ReadwriteSplittingRuleConfiguration buildToBeDroppedRuleConfiguration(final DropReadwriteSplittingRuleStatement sqlStatement) { Collection<ReadwriteSplittingDataSourceGroupRuleConfiguration> toBeDroppedDataSourceGroups = new LinkedList<>(); Map<String, AlgorithmConfiguration> toBeDroppedLoadBalancers = new HashMap<>(); for (String each : sqlStatement.getNames()) { toBeDroppedDataSourceGroups.add(new ReadwriteSplittingDataSourceGroupRuleConfiguration(each, null, null, null)); dropRule(each); } findUnusedLoadBalancers().forEach(each -> toBeDroppedLoadBalancers.put(each, rule.getConfiguration().getLoadBalancers().get(each))); return new ReadwriteSplittingRuleConfiguration(toBeDroppedDataSourceGroups, toBeDroppedLoadBalancers); }
@Test void assertBuildToBeDroppedRuleConfiguration() { ReadwriteSplittingRuleConfiguration ruleConfig = createCurrentRuleConfiguration(); ReadwriteSplittingRule rule = mock(ReadwriteSplittingRule.class); when(rule.getConfiguration()).thenReturn(ruleConfig); executor.setRule(rule); ReadwriteSplittingRuleConfiguration actual = executor.buildToBeDroppedRuleConfiguration(createSQLStatement()); assertThat(actual.getDataSourceGroups().size(), is(1)); assertThat(actual.getLoadBalancers().size(), is(1)); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testOrderByWithGroupByAndSubquerySelectExpression() { analyze("SELECT a FROM t1 GROUP BY a ORDER BY (SELECT a)"); assertFails( MUST_BE_AGGREGATE_OR_GROUP_BY, "line 1:46: Subquery uses 'b' which must appear in GROUP BY clause", "SELECT a FROM t1 GROUP BY a ORDER BY (SELECT b)"); analyze("SELECT a AS b FROM t1 GROUP BY t1.a ORDER BY (SELECT b)"); assertFails( REFERENCE_TO_OUTPUT_ATTRIBUTE_WITHIN_ORDER_BY_AGGREGATION, "line 2:22: Invalid reference to output projection attribute from ORDER BY aggregation", "SELECT a AS b FROM t1 GROUP BY t1.a \n" + "ORDER BY MAX((SELECT b))"); analyze("SELECT a FROM t1 GROUP BY a ORDER BY MAX((SELECT x FROM (VALUES 4) t(x)))"); analyze("SELECT CAST(ROW(1) AS ROW(someField BIGINT)) AS x\n" + "FROM (VALUES (1, 2)) t(a, b)\n" + "GROUP BY b\n" + "ORDER BY (SELECT x.someField)"); assertFails( REFERENCE_TO_OUTPUT_ATTRIBUTE_WITHIN_ORDER_BY_AGGREGATION, "line 4:22: Invalid reference to output projection attribute from ORDER BY aggregation", "SELECT CAST(ROW(1) AS ROW(someField BIGINT)) AS x\n" + "FROM (VALUES (1, 2)) t(a, b)\n" + "GROUP BY b\n" + "ORDER BY MAX((SELECT x.someField))"); }
public ContentInfo verify(ContentInfo signedMessage, Date date) { final SignedData signedData = SignedData.getInstance(signedMessage.getContent()); final X509Certificate cert = certificate(signedData); certificateVerifier.verify(cert, date); final X500Name name = X500Name.getInstance(cert.getIssuerX500Principal().getEncoded()); try { final CMSSignedData cms = new CMSSignedData(signedMessage); cms.verifySignatures(signerId -> { if (!name.equals(signerId.getIssuer())) { throw new VerificationException("Issuer does not match certificate"); } if (!cert.getSerialNumber().equals(signerId.getSerialNumber())) { throw new VerificationException("Serial number does not match certificate"); } return new JcaSignerInfoVerifierBuilder(digestProvider).setProvider(bcProvider).build(cert); }); } catch (CMSException e) { throw new VerificationException("Could not verify CMS", e); } return signedData.getEncapContentInfo(); }
@Test public void verifyValidRvig2011Cms() throws Exception { final ContentInfo signedMessage = ContentInfo.getInstance(fixture("rvig2011")); final ContentInfo message = new CmsVerifier(new CertificateVerifier.None()).verify(signedMessage); assertEquals(LdsSecurityObject.OID, message.getContentType().getId()); assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", Hex.toHexString( DigestUtils.digest("SHA1").digest(((ASN1OctetString) message.getContent()).getOctets()) )); }
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception { return newGetter(object, parent, modifier, field.getType(), field::get, (t, et) -> new FieldGetter(parent, field, modifier, t, et)); }
@Test public void newFieldGetter_whenExtractingFromNonEmpty_Collection_FieldAndParentIsNonEmptyMultiResult_thenInferReturnType() throws Exception { OuterObject object = new OuterObject("name", new InnerObject("inner", 0, 1, 2, 3)); Getter parentGetter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]"); Getter innerObjectNameGetter = GetterFactory.newFieldGetter(object, parentGetter, innerAttributesCollectionField, "[any]"); Class<?> returnType = innerObjectNameGetter.getReturnType(); assertEquals(Integer.class, returnType); }
@Override @MethodNotAvailable public void evictAll() { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testEvictAll() { adapter.evictAll(); }
public static int[] createBackwardCompatibleStyleable( @NonNull int[] localStyleableArray, @NonNull Context localContext, @NonNull Context remoteContext, @NonNull SparseIntArray attributeIdMap) { final String remotePackageName = remoteContext.getPackageName(); if (localContext.getPackageName().equals(remotePackageName)) { Logger.d( TAG, "This is a local context (" + remotePackageName + "), optimization will be done."); // optimization for (int attrId : localStyleableArray) { attributeIdMap.put(attrId, attrId); } return localStyleableArray; } final Resources localRes = localContext.getResources(); final Resources remoteRes = remoteContext.getResources(); List<Integer> styleableIdList = new ArrayList<>(localStyleableArray.length); for (int attrId : localStyleableArray) { final boolean isAndroidAttribute = localRes.getResourcePackageName(attrId).equals("android"); final int remoteAttrId; if (isAndroidAttribute) { // android attribute IDs are the same always. So, I can optimize. remoteAttrId = attrId; } else { final String attributeName = localRes.getResourceEntryName(attrId); remoteAttrId = remoteRes.getIdentifier(attributeName, "attr", remotePackageName); Logger.d( TAG, "attr " + attributeName + ", local id " + attrId + ", remote id " + remoteAttrId); } if (remoteAttrId != 0) { attributeIdMap.put(remoteAttrId, attrId); styleableIdList.add(remoteAttrId); } } final int[] remoteMappedStyleable = new int[styleableIdList.size()]; for (int i = 0; i < remoteMappedStyleable.length; i++) { remoteMappedStyleable[i] = styleableIdList.get(i); } return remoteMappedStyleable; }
@Test public void testDifferentPackageDifferentValues() { // this is a long setup Context remoteContext = Mockito.mock(Context.class); Mockito.doReturn("com.some.other.package").when(remoteContext).getPackageName(); Resources remoteRes = Mockito.mock(Resources.class); Mockito.doAnswer( invocation -> { final Object packageName = invocation.getArgument(2); final String resName = invocation.getArgument(0).toString(); if (packageName == null || packageName.equals("android")) { return getApplicationContext() .getResources() .getIdentifier(resName, invocation.getArgument(1), null); } else { switch (resName) { case "showPreview": return 123; case "autoCap": return 124; default: return 0; } } }) .when(remoteRes) .getIdentifier(Mockito.anyString(), Mockito.anyString(), Mockito.anyString()); Mockito.doReturn(remoteRes).when(remoteContext).getResources(); // starting test SparseIntArray sparseIntArray = new SparseIntArray(); int[] backwardCompatibleStyleable = Support.createBackwardCompatibleStyleable( R.styleable.KeyboardLayout, getApplicationContext(), remoteContext, sparseIntArray); Mockito.verify(remoteRes).getIdentifier("showPreview", "attr", "com.some.other.package"); Mockito.verify(remoteRes).getIdentifier("autoCap", "attr", "com.some.other.package"); Mockito.verifyNoMoreInteractions(remoteRes); Assert.assertNotSame(backwardCompatibleStyleable, R.styleable.KeyboardLayout); Assert.assertEquals(backwardCompatibleStyleable.length, R.styleable.KeyboardLayout.length); Assert.assertEquals(backwardCompatibleStyleable.length, sparseIntArray.size()); for (int attrId : backwardCompatibleStyleable) { if (attrId == 123) { Assert.assertEquals(R.attr.showPreview, sparseIntArray.get(123)); } else if (attrId == 124) { Assert.assertEquals(R.attr.autoCap, sparseIntArray.get(124)); } else { Assert.assertEquals(attrId, sparseIntArray.get(attrId)); } } }
public void startServices() throws ExecutionException, InterruptedException { LOG.info("StartServices Config: " + cfg); List<String> tablePaths; if (cfg.autoDiscovery) { // We support defining multi base paths tablePaths = cfg.basePath.stream() .filter(this::pathExists) .flatMap(p -> MultiTableServiceUtils.findHoodieTablesUnderPath(jsc, p).stream()) .collect(Collectors.toList()); } else { tablePaths = MultiTableServiceUtils.getTablesToBeServedFromProps(jsc, props); } LOG.info("All table paths: " + String.join(",", tablePaths)); if (cfg.batch) { batchRunTableServices(tablePaths); } else { streamRunTableServices(tablePaths); } }
@Test public void testRunAllServicesForSingleTable() throws IOException, ExecutionException, InterruptedException { HoodieMultiTableServicesMain.Config cfg = getHoodieMultiServiceConfig(); HoodieTableMetaClient metaClient1 = getMetaClient("table1"); cfg.batch = true; cfg.basePath = Collections.singletonList(metaClient1.getBasePath().toString()); HoodieMultiTableServicesMain main = new HoodieMultiTableServicesMain(jsc, cfg); main.startServices(); // Verify cleans Assertions.assertEquals(1, metaClient1.reloadActiveTimeline().getCleanerTimeline().countInstants()); // Verify delta commits Assertions.assertEquals(2, metaClient1.reloadActiveTimeline().getDeltaCommitTimeline().countInstants()); // Verify replace commits Assertions.assertEquals(1, metaClient1.reloadActiveTimeline().getCompletedReplaceTimeline().countInstants()); // Verify compactions, delta commits and replace commits Assertions.assertEquals(4, metaClient1.reloadActiveTimeline().getCommitsTimeline().countInstants()); }
@Override public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks) throws InterruptedException { return delegate.invokeAll(tasks); }
@Test public void invokeAll() throws InterruptedException { underTest.invokeAll(callables); verify(executorService).invokeAll(callables); }
Aggregation parseAggregationConfig(String aggName) { if (StringUtils.isEmpty(aggName) || !fetchMode.equals(TbGetTelemetryNodeConfiguration.FETCH_MODE_ALL)) { return Aggregation.NONE; } return Aggregation.valueOf(aggName); }
@Test public void givenAggregationWhiteSpace_whenParseAggregation_thenException() { Assertions.assertThrows(IllegalArgumentException.class, () -> { node.parseAggregationConfig(" "); }); }
@Override public double mean() { return mean; }
@Test public void testMean() { System.out.println("mean"); BetaDistribution instance = new BetaDistribution(2, 5); instance.rand(); assertEquals(0.2857143, instance.mean(), 1E-7); }
String filenameValidatorForInputFiles( String filename ) { Repository rep = getTransMeta().getRepository(); if ( rep != null && rep.isConnected() && filename .contains( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ) { return environmentSubstitute( filename.replace( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ); } else { return environmentSubstitute( filename ); } }
@Test public void testFilenameValidatorForInputFilesNotConnectedToRep() { CsvInput csvInput = mock( CsvInput.class ); String internalEntryVariable = "internalEntryVariable"; String internalTransformationVariable = "internalTransformationVariable"; String filename = Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ; csvInput.setVariable( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY, internalEntryVariable ); csvInput.setVariable( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY, internalTransformationVariable ); TransMeta transmeta = mock( TransMeta.class ); Repository rep = mock( Repository.class ); when( csvInput.getTransMeta() ).thenReturn( transmeta ); when( transmeta.getRepository() ).thenReturn( rep ); when( rep.isConnected() ).thenReturn( false ); when( csvInput.filenameValidatorForInputFiles( any() ) ).thenCallRealMethod(); when( csvInput.environmentSubstitute( Const.INTERNAL_VARIABLE_ENTRY_CURRENT_DIRECTORY ) ).thenReturn( internalEntryVariable ); when( csvInput.environmentSubstitute( Const.INTERNAL_VARIABLE_TRANSFORMATION_FILENAME_DIRECTORY ) ).thenReturn( internalTransformationVariable ); String finalFilename = csvInput.filenameValidatorForInputFiles(filename); assertEquals( internalEntryVariable, finalFilename ); }
static void removeNaN(PointList pointList) { int curr = 0; for (int i = 0; i < pointList.size(); i++) { if (!Double.isNaN(pointList.getLat(i))) { pointList.set(curr, pointList.getLat(i), pointList.getLon(i), pointList.getEle(i)); curr++; } } pointList.trimToSize(curr); }
@Test public void testRemoveNaN() { PointList pl = new PointList(10, true); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(1, 1, 1); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(5, 5, 5); pl.add(6, 6, 6); pl.add(7, 7, 7); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(8, 8, 8); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(9, 9, 9); pl.add(10, 10, 10); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(Double.NaN, Double.NaN, Double.NaN); pl.add(14, 14, 14); pl.add(Double.NaN, Double.NaN, Double.NaN); RamerDouglasPeucker.removeNaN(pl); // doing it again should be no problem RamerDouglasPeucker.removeNaN(pl); RamerDouglasPeucker.removeNaN(pl); assertEquals(8, pl.size()); List<Integer> expected = Arrays.asList(1, 5, 6, 7, 8, 9, 10, 14); List<Integer> given = new ArrayList<>(); for (int i = 0; i < pl.size(); i++) { assertEquals(pl.getLat(i), pl.getEle(i), 1.e-6); assertEquals(pl.getLon(i), pl.getEle(i), 1.e-6); given.add((int) pl.getLat(i)); } assertEquals(expected, given); }
@Udf public String rpad( @UdfParameter(description = "String to be padded") final String input, @UdfParameter(description = "Target length") final Integer targetLen, @UdfParameter(description = "Padding string") final String padding) { if (input == null) { return null; } if (padding == null || padding.isEmpty() || targetLen == null || targetLen < 0) { return null; } final StringBuilder sb = new StringBuilder(targetLen + padding.length()); sb.append(input); final int padChars = Math.max(targetLen - input.length(), 0); for (int i = 0; i < padChars; i += padding.length()) { sb.append(padding); } sb.setLength(targetLen); return sb.toString(); }
@Test public void shouldReturnNullForNullInputString() { final String result = udf.rpad(null, 4, "foo"); assertThat(result, is(nullValue())); }
@Override public ChannelFuture goAway(final ChannelHandlerContext ctx, final int lastStreamId, final long errorCode, final ByteBuf debugData, ChannelPromise promise) { promise = promise.unvoid(); final Http2Connection connection = connection(); try { if (!connection.goAwaySent(lastStreamId, errorCode, debugData)) { debugData.release(); promise.trySuccess(); return promise; } } catch (Throwable cause) { debugData.release(); promise.tryFailure(cause); return promise; } // Need to retain before we write the buffer because if we do it after the refCnt could already be 0 and // result in an IllegalRefCountException. debugData.retain(); ChannelFuture future = frameWriter().writeGoAway(ctx, lastStreamId, errorCode, debugData, promise); if (future.isDone()) { processGoAwayWriteResult(ctx, lastStreamId, errorCode, debugData, future); } else { future.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { processGoAwayWriteResult(ctx, lastStreamId, errorCode, debugData, future); } }); } return future; }
@Test public void cannotSendGoAwayFrameWithIncreasingLastStreamIds() throws Exception { handler = newHandler(); ByteBuf data = dummyData(); long errorCode = Http2Error.INTERNAL_ERROR.code(); handler.goAway(ctx, STREAM_ID, errorCode, data.retain(), promise); verify(connection).goAwaySent(eq(STREAM_ID), eq(errorCode), eq(data)); verify(frameWriter).writeGoAway(eq(ctx), eq(STREAM_ID), eq(errorCode), eq(data), eq(promise)); // The frameWriter is only mocked, so it should not have interacted with the promise. assertFalse(promise.isDone()); when(connection.goAwaySent()).thenReturn(true); when(remote.lastStreamKnownByPeer()).thenReturn(STREAM_ID); doAnswer(new Answer<Boolean>() { @Override public Boolean answer(InvocationOnMock invocationOnMock) { throw new IllegalStateException(); } }).when(connection).goAwaySent(anyInt(), anyLong(), any(ByteBuf.class)); handler.goAway(ctx, STREAM_ID + 2, errorCode, data, promise); assertTrue(promise.isDone()); assertFalse(promise.isSuccess()); assertEquals(0, data.refCnt()); verifyNoMoreInteractions(frameWriter); }
public static SourceConfig validateUpdate(SourceConfig existingConfig, SourceConfig newConfig) { SourceConfig mergedConfig = clone(existingConfig); if (!existingConfig.getTenant().equals(newConfig.getTenant())) { throw new IllegalArgumentException("Tenants differ"); } if (!existingConfig.getNamespace().equals(newConfig.getNamespace())) { throw new IllegalArgumentException("Namespaces differ"); } if (!existingConfig.getName().equals(newConfig.getName())) { throw new IllegalArgumentException("Function Names differ"); } if (!StringUtils.isEmpty(newConfig.getClassName())) { mergedConfig.setClassName(newConfig.getClassName()); } if (!StringUtils.isEmpty(newConfig.getTopicName())) { mergedConfig.setTopicName(newConfig.getTopicName()); } if (!StringUtils.isEmpty(newConfig.getSerdeClassName())) { mergedConfig.setSerdeClassName(newConfig.getSerdeClassName()); } if (!StringUtils.isEmpty(newConfig.getSchemaType())) { mergedConfig.setSchemaType(newConfig.getSchemaType()); } if (newConfig.getConfigs() != null) { mergedConfig.setConfigs(newConfig.getConfigs()); } if (newConfig.getSecrets() != null) { mergedConfig.setSecrets(newConfig.getSecrets()); } if (!StringUtils.isEmpty(newConfig.getLogTopic())) { mergedConfig.setLogTopic(newConfig.getLogTopic()); } if (newConfig.getProcessingGuarantees() != null && !newConfig.getProcessingGuarantees() .equals(existingConfig.getProcessingGuarantees())) { throw new IllegalArgumentException("Processing Guarantees cannot be altered"); } if (newConfig.getParallelism() != null) { mergedConfig.setParallelism(newConfig.getParallelism()); } if (newConfig.getResources() != null) { mergedConfig .setResources(ResourceConfigUtils.merge(existingConfig.getResources(), newConfig.getResources())); } if (!StringUtils.isEmpty(newConfig.getArchive())) { mergedConfig.setArchive(newConfig.getArchive()); } if (!StringUtils.isEmpty(newConfig.getRuntimeFlags())) { mergedConfig.setRuntimeFlags(newConfig.getRuntimeFlags()); } if (!StringUtils.isEmpty(newConfig.getCustomRuntimeOptions())) { mergedConfig.setCustomRuntimeOptions(newConfig.getCustomRuntimeOptions()); } if (isBatchSource(existingConfig) != isBatchSource(newConfig)) { throw new IllegalArgumentException("Sources cannot be update between regular sources and batchsource"); } if (newConfig.getBatchSourceConfig() != null) { validateBatchSourceConfigUpdate(existingConfig.getBatchSourceConfig(), newConfig.getBatchSourceConfig()); mergedConfig.setBatchSourceConfig(newConfig.getBatchSourceConfig()); } if (newConfig.getProducerConfig() != null) { mergedConfig.setProducerConfig(newConfig.getProducerConfig()); } return mergedConfig; }
@Test public void testMergeEqual() { SourceConfig sourceConfig = createSourceConfig(); SourceConfig newSourceConfig = createSourceConfig(); SourceConfig mergedConfig = SourceConfigUtils.validateUpdate(sourceConfig, newSourceConfig); assertEquals( new Gson().toJson(sourceConfig), new Gson().toJson(mergedConfig) ); }
void recordLatency(String node, long requestLatencyMs) { fetchLatency.record(requestLatencyMs); if (!node.isEmpty()) { String nodeTimeName = "node-" + node + ".latency"; Sensor nodeRequestTime = this.metrics.getSensor(nodeTimeName); if (nodeRequestTime != null) nodeRequestTime.record(requestLatencyMs); } }
@Test public void testNodeLatency() { String connectionId = "0"; MetricName nodeLatencyAvg = metrics.metricName("request-latency-avg", "group"); MetricName nodeLatencyMax = metrics.metricName("request-latency-max", "group"); registerNodeLatencyMetric(connectionId, nodeLatencyAvg, nodeLatencyMax); metricsManager.recordLatency(connectionId, 123); time.sleep(metrics.config().timeWindowMs() + 1); metricsManager.recordLatency(connectionId, 456); assertEquals(289.5, metricValue(metricsRegistry.fetchLatencyAvg), EPSILON); assertEquals(456, metricValue(metricsRegistry.fetchLatencyMax), EPSILON); assertEquals(289.5, metricValue(nodeLatencyAvg), EPSILON); assertEquals(456, metricValue(nodeLatencyMax), EPSILON); // Record metric against another node. metricsManager.recordLatency("1", 501); assertEquals(360, metricValue(metricsRegistry.fetchLatencyAvg), EPSILON); assertEquals(501, metricValue(metricsRegistry.fetchLatencyMax), EPSILON); // Node specific metric should not be affected. assertEquals(289.5, metricValue(nodeLatencyAvg), EPSILON); assertEquals(456, metricValue(nodeLatencyMax), EPSILON); }
public static KafkaUserModel fromCrd(KafkaUser kafkaUser, String secretPrefix, boolean aclsAdminApiSupported) { KafkaUserModel result = new KafkaUserModel(kafkaUser.getMetadata().getNamespace(), kafkaUser.getMetadata().getName(), Labels.fromResource(kafkaUser).withStrimziKind(kafkaUser.getKind()), secretPrefix); validateTlsUsername(kafkaUser); validateDesiredPassword(kafkaUser); result.setOwnerReference(kafkaUser); result.setAuthentication(kafkaUser.getSpec().getAuthentication()); if (kafkaUser.getSpec().getAuthorization() != null && kafkaUser.getSpec().getAuthorization().getType().equals(KafkaUserAuthorizationSimple.TYPE_SIMPLE)) { if (aclsAdminApiSupported) { KafkaUserAuthorizationSimple simple = (KafkaUserAuthorizationSimple) kafkaUser.getSpec().getAuthorization(); result.setSimpleAclRules(simple.getAcls()); } else { throw new InvalidResourceException("Simple authorization ACL rules are configured but not supported in the Kafka cluster configuration."); } } result.setQuotas(kafkaUser.getSpec().getQuotas()); if (kafkaUser.getSpec().getTemplate() != null && kafkaUser.getSpec().getTemplate().getSecret() != null && kafkaUser.getSpec().getTemplate().getSecret().getMetadata() != null) { result.templateSecretLabels = kafkaUser.getSpec().getTemplate().getSecret().getMetadata().getLabels(); result.templateSecretAnnotations = kafkaUser.getSpec().getTemplate().getSecret().getMetadata().getAnnotations(); } return result; }
@Test public void testFromCrdScramShaUserWith65CharSaslUsernameValid() { // 65 characters => should work with SCRAM-SHA-512 KafkaUser tooLong = new KafkaUserBuilder(scramShaUser) .editMetadata() .withName("User-123456789012345678901234567890123456789012345678901234567890") .endMetadata() .build(); KafkaUserModel.fromCrd(tooLong, UserOperatorConfig.SECRET_PREFIX.defaultValue(), Boolean.parseBoolean(UserOperatorConfig.ACLS_ADMIN_API_SUPPORTED.defaultValue())); }
public HttpHost[] getHttpHosts() { return httpHosts; }
@Test public void usesMultipleHostnames() { EsConfig esConfig = new EsConfig("http://host1:9200", "http://host2:9200"); HttpHost[] httpHosts = esConfig.getHttpHosts(); assertEquals(2, httpHosts.length); assertEquals("host1", httpHosts[0].getHostName()); assertEquals("host2", httpHosts[1].getHostName()); }
@Override public Set<RuleDescriptionSectionDto> generateSections(RulesDefinition.Rule rule) { return getDescriptionInHtml(rule) .map(this::generateSections) .orElse(emptySet()); }
@Test public void parse_return_null_vulnerable_when_no_ask_yourself_whether_title() { when(rule.htmlDescription()).thenReturn(DESCRIPTION + RECOMMENTEDCODINGPRACTICE); Set<RuleDescriptionSectionDto> results = generator.generateSections(rule); Map<String, String> sectionKeyToContent = results.stream().collect(toMap(RuleDescriptionSectionDto::getKey, RuleDescriptionSectionDto::getContent)); assertThat(sectionKeyToContent).hasSize(3) .containsEntry(DEFAULT_SECTION_KEY, rule.htmlDescription()) .containsEntry(ROOT_CAUSE_SECTION_KEY, DESCRIPTION) .containsEntry(HOW_TO_FIX_SECTION_KEY, RECOMMENTEDCODINGPRACTICE); }
@Override public Iterable<V> get() throws Exception { // NOTE: Both heap and rocks return copied state. But in RocksDB, changes to the returned // value will not get into the snapshot. return delegatedState.get(); }
@Test public void testValuesIterator() throws Exception { testIterator(singletonList("value"), state -> state.get().iterator(), "value"); }
public MergePolicyConfig setPolicy(String policy) { this.policy = checkHasText(policy, "Merge policy must contain text!"); return this; }
@Test public void setPolicy() { config.setPolicy(DiscardMergePolicy.class.getName()); assertEquals(DiscardMergePolicy.class.getName(), config.getPolicy()); }
public boolean detachAppender(Appender<E> appender) { if (appender == null) { return false; } boolean result; result = appenderList.remove(appender); return result; }
@Test public void testDetachAppender() throws Exception { NOPAppender<TestEvent> ta = new NOPAppender<TestEvent>(); ta.start(); aai.addAppender(ta); NOPAppender<TestEvent> tab = new NOPAppender<TestEvent>(); tab.setName("test"); tab.start(); aai.addAppender(tab); Assertions.assertTrue(aai.detachAppender(tab),"Appender not detached"); Assertions.assertNull(aai.getAppender("test"), "Appender was not removed"); Assertions.assertFalse(aai.detachAppender(tab), "Appender detach error"); }
public static ExecutorService getPoolThreadExecutor() { setup(); return poolThreadExecutor; }
@Test public void poolThread() { ExecutorService a = SharedExecutors.getPoolThreadExecutor(); assertNotNull("ExecutorService must not be null", a); ExecutorService b = SharedExecutors.getPoolThreadExecutor(); assertSame("factories should be same", a, b); }
public static Combine.BinaryCombineLongFn ofLongs() { return new Min.MinLongFn(); }
@Test public void testMinLongFn() { testCombineFn(Min.ofLongs(), Lists.newArrayList(1L, 2L, 3L, 4L), 1L); }
public static < X, P extends MessageQueryParameter<X>, R extends RequestBody, M extends MessageParameters> X getQueryParameter(final HandlerRequest<R> request, final Class<P> queryParameterClass) throws RestHandlerException { return getQueryParameter(request, queryParameterClass, null); }
@Test void testGetQueryParameter() throws Exception { final Boolean queryParameter = HandlerRequestUtils.getQueryParameter( HandlerRequest.resolveParametersAndCreate( EmptyRequestBody.getInstance(), new TestMessageParameters(), Collections.emptyMap(), Collections.singletonMap("key", Collections.singletonList("true")), Collections.emptyList()), TestBooleanQueryParameter.class); assertThat(queryParameter).isTrue(); }
public static boolean shouldLoadInIsolation(String name) { return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches()); }
@Test public void testTransformsClasses() { List<String> transformsClasses = Arrays.asList( "org.apache.kafka.connect.transforms.", "org.apache.kafka.connect.transforms.util.", "org.apache.kafka.connect.transforms.util.NonEmptyListValidator", "org.apache.kafka.connect.transforms.util.RegexValidator", "org.apache.kafka.connect.transforms.util.Requirements", "org.apache.kafka.connect.transforms.util.SchemaUtil", "org.apache.kafka.connect.transforms.util.SimpleConfig", "org.apache.kafka.connect.transforms.Cast", "org.apache.kafka.connect.transforms.Cast$Key", "org.apache.kafka.connect.transforms.Cast$Value", "org.apache.kafka.connect.transforms.ExtractField", "org.apache.kafka.connect.transforms.ExtractField$Key", "org.apache.kafka.connect.transforms.ExtractField$Value", "org.apache.kafka.connect.transforms.Flatten", "org.apache.kafka.connect.transforms.Flatten$Key", "org.apache.kafka.connect.transforms.Flatten$Value", "org.apache.kafka.connect.transforms.HoistField", "org.apache.kafka.connect.transforms.HoistField$Key", "org.apache.kafka.connect.transforms.HoistField$Key", "org.apache.kafka.connect.transforms.InsertField", "org.apache.kafka.connect.transforms.InsertField$Key", "org.apache.kafka.connect.transforms.InsertField$Value", "org.apache.kafka.connect.transforms.MaskField", "org.apache.kafka.connect.transforms.MaskField$Key", "org.apache.kafka.connect.transforms.MaskField$Value", "org.apache.kafka.connect.transforms.RegexRouter", "org.apache.kafka.connect.transforms.ReplaceField", "org.apache.kafka.connect.transforms.ReplaceField$Key", "org.apache.kafka.connect.transforms.ReplaceField$Value", "org.apache.kafka.connect.transforms.SetSchemaMetadata", "org.apache.kafka.connect.transforms.SetSchemaMetadata$Key", "org.apache.kafka.connect.transforms.SetSchemaMetadata$Value", "org.apache.kafka.connect.transforms.TimestampConverter", "org.apache.kafka.connect.transforms.TimestampConverter$Key", "org.apache.kafka.connect.transforms.TimestampConverter$Value", "org.apache.kafka.connect.transforms.TimestampRouter", "org.apache.kafka.connect.transforms.TimestampRouter$Key", "org.apache.kafka.connect.transforms.TimestampRouter$Value", "org.apache.kafka.connect.transforms.ValueToKey", "org.apache.kafka.connect.transforms.predicates.", "org.apache.kafka.connect.transforms.predicates.HasHeaderKey", "org.apache.kafka.connect.transforms.predicates.RecordIsTombstone", "org.apache.kafka.connect.transforms.predicates.TopicNameMatches" ); for (String clazz : transformsClasses) { assertTrue(PluginUtils.shouldLoadInIsolation(clazz), clazz + " from 'transforms' is not loaded in isolation but should be"); } }
@Override public void unregisterClusterListener(final LoadBalancerClusterListener listener) { trace(_log, "unregister listener: ", listener); _executor.execute(new PropertyEvent("remove cluster listener for state") { @Override public void innerRun() { _clusterListeners.remove(listener); } }); }
@Test public void testUnregisterClusterListener() { reset(); MockClusterListener clusterListener = new MockClusterListener(); _state.registerClusterListener(clusterListener); assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 0, "expected zero count"); // first add a cluster _state.listenToCluster(CLUSTER1_CLUSTER_NAME, new NullStateListenerCallback()); _clusterRegistry.put(CLUSTER1_CLUSTER_NAME, new ClusterProperties(CLUSTER1_CLUSTER_NAME)); assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 1, "expected 1 call after put"); _state.unregisterClusterListener(clusterListener); _clusterRegistry.put(CLUSTER1_CLUSTER_NAME, new ClusterProperties(CLUSTER1_CLUSTER_NAME)); assertEquals(clusterListener.getClusterAddedCount(CLUSTER1_CLUSTER_NAME), 1, "expected 1 call, since we shouldn't have seen the latest put"); }
public static String formatBytes(long sizeInBytes) { if (sizeInBytes==0) return "0 B"; String sign = (sizeInBytes < 0) ? "-" : ""; sizeInBytes = Math.abs(sizeInBytes); int digitGroups = (int) (Math.log10(sizeInBytes)/Math.log10(1024)); DecimalFormat formatter = new DecimalFormat("#,##0.##"); return sign + formatter.format(sizeInBytes / Math.pow(1024, digitGroups)) + " " + HEAP_SIZE_UNITS[digitGroups]; }
@Test public void testFormatBytes() { sampleTesting(1,"B", -10, 2, 0, 2, 10); sampleTesting(Math.pow(2, 10),"KiB", -100, 50, 30, 100); sampleTesting(Math.pow(2, 20),"MiB", -100, 50, 30, 100); sampleTesting(Math.pow(2, 30),"GiB", -10, 30, 30, 100); sampleTesting(Math.pow(2, 40),"TiB", -100, 50, 30, 100); sampleTesting(Math.pow(2, 50),"PiB", -100, 50, 30, 100); Assert.assertEquals( "-1,023 B", formatBytes(-1023)); Assert.assertEquals( "-1 KiB", formatBytes(-1024)); Assert.assertEquals( "1,000 TiB", formatBytes(1000 * (long)Math.pow(2, 40))); Assert.assertEquals( "1 PiB", formatBytes(1024 * (long)Math.pow(2, 40))); Assert.assertEquals( "8 EiB", formatBytes(Long.MAX_VALUE)); // Validate Decimal Assert.assertEquals( "95.37 MiB", formatBytes(100000000)); Assert.assertEquals( "-9.54 MiB", formatBytes(-10000000)); Assert.assertEquals( "1.95 KiB", formatBytes(2001)); Assert.assertEquals( "19.53 KiB", formatBytes(20000)); Assert.assertEquals( "186.26 GiB", formatBytes(200000000000L)); }
public static PropertyDescriptor[] getPropertyDescriptors(Class<?> clazz) throws BeanException { BeanInfo beanInfo; try { beanInfo = Introspector.getBeanInfo(clazz); } catch (IntrospectionException e) { throw new BeanException(e); } return ArrayUtil.filter(beanInfo.getPropertyDescriptors(), t -> { // 过滤掉getClass方法 return false == "class".equals(t.getName()); }); }
@Test public void getPropertyDescriptorsTest() { final HashSet<Object> set = CollUtil.newHashSet(); final PropertyDescriptor[] propertyDescriptors = BeanUtil.getPropertyDescriptors(SubPerson.class); for (final PropertyDescriptor propertyDescriptor : propertyDescriptors) { set.add(propertyDescriptor.getName()); } assertTrue(set.contains("age")); assertTrue(set.contains("id")); assertTrue(set.contains("name")); assertTrue(set.contains("openid")); assertTrue(set.contains("slow")); assertTrue(set.contains("subName")); }
public static String get(String urlString, Charset customCharset) { return HttpRequest.get(urlString).charset(customCharset).execute().body(); }
@Test @Disabled public void getTest3() { // 测试url中带有空格的情况 final String result1 = HttpUtil.get("http://hutool.cn:5000/kf?abc= d"); Console.log(result1); }
public static <K, V> Read<K, V> read() { return new AutoValue_KafkaIO_Read.Builder<K, V>() .setTopics(new ArrayList<>()) .setTopicPartitions(new ArrayList<>()) .setConsumerFactoryFn(KafkaIOUtils.KAFKA_CONSUMER_FACTORY_FN) .setConsumerConfig(KafkaIOUtils.DEFAULT_CONSUMER_PROPERTIES) .setMaxNumRecords(Long.MAX_VALUE) .setCommitOffsetsInFinalizeEnabled(false) .setDynamicRead(false) .setTimestampPolicyFactory(TimestampPolicyFactory.withProcessingTime()) .setConsumerPollingTimeout(2L) .setRedistributed(false) .setAllowDuplicates(false) .setRedistributeNumKeys(0) .build(); }
@Test public void testUnreachableKafkaBrokers() { // Expect an exception when the Kafka brokers are not reachable on the workers. // We specify partitions explicitly so that splitting does not involve server interaction. // Set request timeout to 10ms so that test does not take long. thrown.expect(Exception.class); thrown.expectMessage("Reader-0: Timeout while initializing partition 'test-0'"); int numElements = 1000; PCollection<Long> input = p.apply( KafkaIO.<Integer, Long>read() .withBootstrapServers("8.8.8.8:9092") // Google public DNS ip. .withTopicPartitions(ImmutableList.of(new TopicPartition("test", 0))) .withKeyDeserializer(IntegerDeserializer.class) .withValueDeserializer(LongDeserializer.class) .withConsumerConfigUpdates( ImmutableMap.of( ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 5, ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 8, ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, 8, "default.api.timeout.ms", 10)) .withMaxNumRecords(10) .withoutMetadata()) .apply(Values.create()); addCountingAsserts(input, numElements); p.run(); }
public void handleUpdatedHostInfo(NodeInfo node, HostInfo hostInfo) { if ( ! node.isDistributor()) return; final int hostVersion; if (hostInfo.getClusterStateVersionOrNull() == null) { // TODO: Consider logging a warning in the future (>5.36). // For now, a missing cluster state version probably means the content // node has not been updated yet. return; } else { hostVersion = hostInfo.getClusterStateVersionOrNull(); } int currentStateVersion = clusterState.getVersion(); if (hostVersion != currentStateVersion) { // The distributor may be old (null), or the distributor may not have updated // to the latest state version just yet. We log here with fine, because it may // also be a symptom of something wrong. log.log(Level.FINE, () -> "Current state version is " + currentStateVersion + ", while host info received from distributor " + node.getNodeIndex() + " is " + hostVersion); return; } statsAggregator.updateForDistributor(node.getNodeIndex(), StorageNodeStatsBridge.generate(hostInfo.getDistributor())); }
@Test void testWrongNodeType() { when(nodeInfo.isDistributor()).thenReturn(false); clusterStateView.handleUpdatedHostInfo(nodeInfo, createHostInfo("101")); verify(statsAggregator, never()).updateForDistributor(anyInt(), any()); }
public static void addNumAliveStreamThreadMetric(final StreamsMetricsImpl streamsMetrics, final Gauge<Integer> stateProvider) { streamsMetrics.addClientLevelMutableMetric( ALIVE_STREAM_THREADS, ALIVE_STREAM_THREADS_DESCRIPTION, RecordingLevel.INFO, stateProvider ); }
@Test public void shouldAddAliveStreamThreadsMetric() { final String name = "alive-stream-threads"; final String description = "The current number of alive stream threads that are running or participating in rebalance"; final Gauge<Integer> valueProvider = (config, now) -> 1; setUpAndVerifyMutableMetric( name, description, valueProvider, () -> ClientMetrics.addNumAliveStreamThreadMetric(streamsMetrics, valueProvider) ); }
public boolean fileIsInAllowedPath(Path path) { if (allowedPaths.isEmpty()) { return true; } final Path realFilePath = resolveRealPath(path); if (realFilePath == null) { return false; } for (Path allowedPath : allowedPaths) { final Path realAllowedPath = resolveRealPath(allowedPath); if (realAllowedPath != null && realFilePath.startsWith(realAllowedPath)) { return true; } } return false; }
@Test public void fileDoesNotExist() { final Path filePath = Paths.get("non-existent-file"); pathChecker = new AllowedAuxiliaryPathChecker( new TreeSet<>(Collections.singleton(permittedTempDir.getRoot().toPath()))); assertFalse(pathChecker.fileIsInAllowedPath(filePath)); }
@Override public boolean shouldWait() { RingbufferContainer ringbuffer = getRingBufferContainerOrNull(); if (resultSet == null) { resultSet = new ReadResultSetImpl<>(minSize, maxSize, getNodeEngine().getSerializationService(), filter); sequence = startSequence; } if (ringbuffer == null) { return minSize > 0; } sequence = ringbuffer.clampReadSequenceToBounds(sequence); if (minSize == 0) { if (sequence < ringbuffer.tailSequence() + 1) { readMany(ringbuffer); } return false; } if (resultSet.isMinSizeReached()) { // enough items have been read, we are done. return false; } if (sequence == ringbuffer.tailSequence() + 1) { // the sequence is not readable return true; } readMany(ringbuffer); return !resultSet.isMinSizeReached(); }
@Test public void whenOneAfterTail() { ringbuffer.add("tail"); ReadManyOperation op = getReadManyOperation(ringbuffer.tailSequence() + 1, 1, 1, null); // since there is an item, we don't need to wait boolean shouldWait = op.shouldWait(); assertTrue(shouldWait); ReadResultSetImpl response = getReadResultSet(op); assertEquals(0, response.readCount()); assertEquals(0, response.getNextSequenceToReadFrom()); }
@Override public String getNumericFunctions() { return null; }
@Test void assertGetNumericFunctions() { assertNull(metaData.getNumericFunctions()); }
public NearCacheConfig getNearCacheConfig() { return nearCacheConfig; }
@Test public void testGetNearCacheConfig() { assertNull(new MapConfig().getNearCacheConfig()); }
@Override public boolean isInfinite() { return this.equals(INFINITY) || (this.cpu == Double.POSITIVE_INFINITY) || (this.cpuRate == Double.POSITIVE_INFINITY); }
@Test public void testOneInfiniteValue() { BeamCostModel cost = BeamCostModel.FACTORY.makeCost(Double.POSITIVE_INFINITY, 1); Assert.assertTrue(cost.isInfinite()); }
public TrueTypeFont parse(RandomAccessRead randomAccessRead) throws IOException { RandomAccessReadDataStream dataStream = new RandomAccessReadDataStream(randomAccessRead); try (randomAccessRead) { return parse(dataStream); } catch (IOException ex) { // close only on error (source is still being accessed later) dataStream.close(); throw ex; } }
@Test void testUTCDate() throws IOException { final File testFile = new File("src/test/resources/ttf/LiberationSans-Regular.ttf"); TimeZone utc = TimeZone.getTimeZone("UTC"); //Before PDFBOX-2122, TTFDataStream was using the default TimeZone //Set the default to something not UTC and see if a UTC timeZone is returned TimeZone.setDefault(TimeZone.getTimeZone("America/Los Angeles")); TTFParser parser = new TTFParser(); TrueTypeFont ttf = parser.parse(new RandomAccessReadBufferedFile(testFile)); Calendar created = ttf.getHeader().getCreated(); assertEquals(created.getTimeZone(), utc); Calendar target = Calendar.getInstance(utc); target.set(2010, 5, 18, 10, 23, 22); target.set(Calendar.MILLISECOND, 0); assertEquals(target, created); }
@Override public VarianceAccumulator createAccumulator() { return VarianceAccumulator.ofZeroElements(); }
@Test public void testCreatesEmptyAccumulator() { assertEquals(VarianceAccumulator.EMPTY, varianceFn.createAccumulator()); }
@Bean public ReactorNettyWebSocketClient reactorNettyWebSocketClient(final ShenyuConfig shenyuConfig, final ObjectProvider<HttpClient> httpClient) { Supplier<WebsocketClientSpec.Builder> builder = WebsocketClientSpec.builder() .maxFramePayloadLength(shenyuConfig.getWebsocket().getMaxFramePayloadSize() * Constants.BYTES_PER_MB) .handlePing(shenyuConfig.getWebsocket().getEnableProxyPing()); return new ReactorNettyWebSocketClient(httpClient.getIfAvailable(HttpClient::create), builder); }
@Test public void testReactorNettyWebSocketClient() { applicationContextRunner.run(context -> { ReactorNettyWebSocketClient client = context.getBean("reactorNettyWebSocketClient", ReactorNettyWebSocketClient.class); assertNotNull(client); } ); }
@ScalarFunction @SqlType(StandardTypes.VARCHAR) public static Slice jsonFormat(@SqlType(StandardTypes.JSON) Slice slice) { return slice; }
@Test public void testJsonFormat() { assertFunction("JSON_FORMAT(JSON '[\"a\", \"b\"]')", VARCHAR, "[\"a\",\"b\"]"); }
@Override public boolean equals( Object o ) { if ( this == o ) { return true; } if ( o == null || getClass() != o.getClass() ) { return false; } SlaveStepCopyPartitionDistribution that = (SlaveStepCopyPartitionDistribution) o; return Objects.equals( distribution, that.distribution ) && Objects.equals( originalPartitionSchemas, that.originalPartitionSchemas ); }
@Test public void equalsDifferentClassesTest() { Assert.assertFalse( slaveStep.equals( Integer.valueOf(5) ) ); }
@Override protected void decode(ChannelHandlerContext ctx, Object object, List out) throws Exception { try { if (object instanceof XMLEvent) { final XMLEvent event = (XMLEvent) object; if (event.isStartDocument() || event.isEndDocument()) { return; } if (event.isCharacters() && depth <= 1) { return; } if (depth < 1 && event.isStartElement()) { out.add(object); depth++; return; } if (depth <= 1 && event.isEndElement()) { out.add(object); depth--; return; } writer.add(event); if (event.isStartElement()) { depth++; } else if (event.isEndElement()) { depth--; if (depth == 1) { writer.flush(); org.dom4j.Element xmlElement = transform().getRootElement(); out.add(xmlElement); writer.close(); resetWriter(); } } } } catch (Exception e) { logger.info(e.getCause().getMessage()); throw e; } }
@Test public void testMergeStreamOpen() throws Exception { List<Object> list = Lists.newArrayList(); streamOpenXmlEventList.forEach(xmlEvent -> { try { xmlMerger.decode(new ChannelHandlerContextAdapter(), xmlEvent, list); } catch (Exception e) { fail(); } }); // StreamOpen should not be merged, should be passed as XMLEvent assertThat(list.size(), Matchers.is(1)); assertThat(list.get(0), Matchers.is(instanceOf(XMLEvent.class))); assertThat(((XMLEvent) list.get(0)).isStartElement(), Matchers.is(true)); }
public static UBlock create(List<UStatement> statements) { return new AutoValue_UBlock(ImmutableList.copyOf(statements)); }
@Test public void equality() { new EqualsTester() .addEqualityGroup(UBlock.create()) .addEqualityGroup( UBlock.create( UExpressionStatement.create( UMethodInvocation.create( UStaticIdent.create( "java.lang.System", "exit", UMethodType.create(UPrimitiveType.VOID, UPrimitiveType.INT)), ULiteral.intLit(0))))) .testEquals(); }
@Override public PageResult<GoViewProjectDO> getMyProjectPage(PageParam pageReqVO, Long userId) { return goViewProjectMapper.selectPage(pageReqVO, userId); }
@Test public void testGetMyGoViewProjectPage() { // mock 数据 GoViewProjectDO dbGoViewProject = randomPojo(GoViewProjectDO.class, o -> { // 等会查询到 o.setCreator("1"); }); goViewProjectMapper.insert(dbGoViewProject); // 测试 userId 不匹配 goViewProjectMapper.insert(cloneIgnoreId(dbGoViewProject, o -> o.setCreator("2"))); // 准备参数 PageParam reqVO = new PageParam(); Long userId = 1L; // 调用 PageResult<GoViewProjectDO> pageResult = goViewProjectService.getMyProjectPage(reqVO, userId); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbGoViewProject, pageResult.getList().get(0)); }
public RegionsResponse getAvailableRegions() { List<AWSRegion> regions = Region.regions().stream() // Ignore the global region. CloudWatch and Kinesis cannot be used with global regions. .filter(r -> !r.isGlobalRegion()) .map(r -> { // Build a single AWSRegionResponse with id, description, and displayValue. RegionMetadata regionMetadata = r.metadata(); String label = String.format(Locale.ROOT, "%s: %s", regionMetadata.description(), regionMetadata.id()); return AWSRegion.create(regionMetadata.id(), label); }) .sorted(Comparator.comparing(AWSRegion::regionId)) .collect(Collectors.toList()); return RegionsResponse.create(regions, regions.size()); }
@Test public void regionTest() { List<AWSRegion> regions = awsService.getAvailableRegions().regions(); // Use a loop presence check. // Check format of random region. boolean foundEuWestRegion = false; for (AWSRegion availableAWSRegion : regions) { if (availableAWSRegion.regionId().equals("eu-west-2")) { foundEuWestRegion = true; } } assertTrue(foundEuWestRegion); assertTrue(regions.stream().anyMatch(r -> r.displayValue().equals("Europe (Stockholm): eu-north-1"))); // AWS periodically adds regions. The number should generally only increase. No need to check exact number. assertTrue("There should be at least 34 total regions.", regions.size() >= 34); }
static EditLogValidation scanEditLog(EditLogInputStream in, long maxTxIdToScan) { long lastPos; long lastTxId = HdfsServerConstants.INVALID_TXID; long numValid = 0; while (true) { long txid; lastPos = in.getPosition(); try { if ((txid = in.scanNextOp()) == HdfsServerConstants.INVALID_TXID) { break; } } catch (Throwable t) { FSImage.LOG.warn("Caught exception after scanning through " + numValid + " ops from " + in + " while determining its valid length. Position was " + lastPos, t); in.resync(); FSImage.LOG.warn("After resync, position is " + in.getPosition()); if (in.getPosition() <= lastPos) { FSImage.LOG.warn("After resync, the position, {} is not greater " + "than the previous position {}. Skipping remainder of this log.", in.getPosition(), lastPos); break; } continue; } if (lastTxId == HdfsServerConstants.INVALID_TXID || txid > lastTxId) { lastTxId = txid; } if (lastTxId >= maxTxIdToScan) { break; } numValid++; } return new EditLogValidation(lastPos, lastTxId, false); }
@Test public void testValidateEditLogWithCorruptHeader() throws IOException { File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptHeader"); SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap(); File logFile = prepareUnfinalizedTestEditLog(testDir, 2, offsetToTxId); RandomAccessFile rwf = new RandomAccessFile(logFile, "rw"); try { rwf.seek(0); rwf.writeLong(42); // corrupt header } finally { rwf.close(); } EditLogValidation validation = EditLogFileInputStream.scanEditLog(logFile, Long.MAX_VALUE, true); assertTrue(validation.hasCorruptHeader()); }
public KsqlGenericRecord build( final List<ColumnName> columnNames, final List<Expression> expressions, final LogicalSchema schema, final DataSourceType dataSourceType ) { final List<ColumnName> columns = columnNames.isEmpty() ? implicitColumns(schema) : columnNames; if (columns.size() != expressions.size()) { throw new KsqlException( "Expected a value for each column." + " Expected Columns: " + columnNames + ". Got " + expressions); } final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema); for (ColumnName col : columns) { if (!schemaWithPseudoColumns.findColumn(col).isPresent()) { throw new KsqlException("Column name " + col + " does not exist."); } if (SystemColumns.isDisallowedForInsertValues(col)) { throw new KsqlException("Inserting into column " + col + " is not allowed."); } } final Map<ColumnName, Object> values = resolveValues( columns, expressions, schemaWithPseudoColumns, functionRegistry, config ); if (dataSourceType == DataSourceType.KTABLE) { final String noValue = schemaWithPseudoColumns.key().stream() .map(Column::name) .filter(colName -> !values.containsKey(colName)) .map(ColumnName::text) .collect(Collectors.joining(", ")); if (!noValue.isEmpty()) { throw new KsqlException("Value for primary key column(s) " + noValue + " is required for tables"); } } final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong()); final GenericKey key = buildKey(schema, values); final GenericRow value = buildValue(schema, values); return KsqlGenericRecord.of(key, value, ts); }
@Test public void shouldBuildCoerceTypes() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(KEY, SqlTypes.BIGINT) .valueColumn(COL0, SqlTypes.BIGINT) .build(); final List<ColumnName> names = ImmutableList.of(KEY, COL0); final Expression exp = new IntegerLiteral(1); // When: final KsqlGenericRecord record = recordFactory.build( names, ImmutableList.of(exp, exp), schema, DataSourceType.KSTREAM ); // Then: assertThat(record, is(KsqlGenericRecord.of( GenericKey.genericKey(1L), GenericRow.genericRow(1L), 0 ))); }
public SmppMessage createSmppMessage(CamelContext camelContext, AlertNotification alertNotification) { SmppMessage smppMessage = new SmppMessage(camelContext, alertNotification, configuration); smppMessage.setHeader(SmppConstants.MESSAGE_TYPE, SmppMessageType.AlertNotification.toString()); smppMessage.setHeader(SmppConstants.SEQUENCE_NUMBER, alertNotification.getSequenceNumber()); smppMessage.setHeader(SmppConstants.COMMAND_ID, alertNotification.getCommandId()); smppMessage.setHeader(SmppConstants.COMMAND_STATUS, alertNotification.getCommandStatus()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR, alertNotification.getSourceAddr()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR_NPI, alertNotification.getSourceAddrNpi()); smppMessage.setHeader(SmppConstants.SOURCE_ADDR_TON, alertNotification.getSourceAddrTon()); smppMessage.setHeader(SmppConstants.ESME_ADDR, alertNotification.getEsmeAddr()); smppMessage.setHeader(SmppConstants.ESME_ADDR_NPI, alertNotification.getEsmeAddrNpi()); smppMessage.setHeader(SmppConstants.ESME_ADDR_TON, alertNotification.getEsmeAddrTon()); return smppMessage; }
@Test public void createSmppMessageFromDataSmShouldReturnASmppMessage() { DataSm dataSm = new DataSm(); dataSm.setSequenceNumber(1); dataSm.setCommandId(1); dataSm.setCommandStatus(0); dataSm.setSourceAddr("1818"); dataSm.setSourceAddrNpi(NumberingPlanIndicator.NATIONAL.value()); dataSm.setSourceAddrTon(TypeOfNumber.NATIONAL.value()); dataSm.setDestAddress("1919"); dataSm.setDestAddrNpi(NumberingPlanIndicator.NATIONAL.value()); dataSm.setDestAddrTon(TypeOfNumber.NATIONAL.value()); dataSm.setServiceType("WAP"); dataSm.setRegisteredDelivery((byte) 0); SmppMessage smppMessage = binding.createSmppMessage(camelContext, dataSm, "1"); assertNull(smppMessage.getBody()); assertEquals(14, smppMessage.getHeaders().size()); assertEquals("1", smppMessage.getHeader(SmppConstants.ID)); assertEquals(1, smppMessage.getHeader(SmppConstants.SEQUENCE_NUMBER)); assertEquals(1, smppMessage.getHeader(SmppConstants.COMMAND_ID)); assertEquals(0, smppMessage.getHeader(SmppConstants.COMMAND_STATUS)); assertEquals("1818", smppMessage.getHeader(SmppConstants.SOURCE_ADDR)); assertEquals((byte) 8, smppMessage.getHeader(SmppConstants.SOURCE_ADDR_NPI)); assertEquals((byte) 2, smppMessage.getHeader(SmppConstants.SOURCE_ADDR_TON)); assertEquals("1919", smppMessage.getHeader(SmppConstants.DEST_ADDR)); assertEquals((byte) 8, smppMessage.getHeader(SmppConstants.DEST_ADDR_NPI)); assertEquals((byte) 2, smppMessage.getHeader(SmppConstants.DEST_ADDR_TON)); assertEquals("WAP", smppMessage.getHeader(SmppConstants.SERVICE_TYPE)); assertEquals((byte) 0, smppMessage.getHeader(SmppConstants.REGISTERED_DELIVERY)); assertEquals((byte) 0, smppMessage.getHeader(SmppConstants.DATA_CODING)); assertEquals(SmppMessageType.DataSm.toString(), smppMessage.getHeader(SmppConstants.MESSAGE_TYPE)); }
@VisibleForTesting static SocketAddress pickAddressInternal(ResolverResult chosenServer, @Nullable OriginName originName) { String rawHost; int port; rawHost = chosenServer.getHost(); port = chosenServer.getPort(); InetSocketAddress serverAddr; try { InetAddress ipAddr = InetAddresses.forString(rawHost); serverAddr = new InetSocketAddress(ipAddr, port); } catch (IllegalArgumentException e1) { LOG.warn("NettyClientConnectionFactory got an unresolved address, addr: {}", rawHost); Counter unresolvedDiscoveryHost = SpectatorUtils.newCounter( "unresolvedDiscoveryHost", originName == null ? "unknownOrigin" : originName.getTarget()); unresolvedDiscoveryHost.increment(); try { serverAddr = new InetSocketAddress(rawHost, port); } catch (RuntimeException e2) { e1.addSuppressed(e2); throw e1; } } return serverAddr; }
@Test void pickAddressInternal_discovery() { InstanceInfo instanceInfo = Builder.newBuilder() .setAppName("app") .setHostName("192.168.0.1") .setPort(443) .build(); DiscoveryResult s = DiscoveryResult.from(instanceInfo, true); SocketAddress addr = DefaultClientChannelManager.pickAddressInternal(s, OriginName.fromVip("vip")); Truth.assertThat(addr).isInstanceOf(InetSocketAddress.class); InetSocketAddress socketAddress = (InetSocketAddress) addr; assertEquals(InetAddresses.forString("192.168.0.1"), socketAddress.getAddress()); assertEquals(443, socketAddress.getPort()); }