focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public boolean hasGlobalAdminRole(String username) { return roleService.hasGlobalAdminRole(username); }
@Test void testHasGlobalAdminRole3() { NacosUser nacosUser = new NacosUser("nacos"); nacosUser.setGlobalAdmin(true); boolean hasGlobalAdminRole = abstractAuthenticationManager.hasGlobalAdminRole(nacosUser); assertTrue(hasGlobalAdminRole); }
public static Thread daemonThread(Runnable r, Class<?> context, String description) { return daemonThread(r, "hollow", context, description); }
@Test public void named() { Thread thread = daemonThread(() -> {}, "Thready McThreadson"); assertEquals("Thready McThreadson", thread.getName()); assertTrue(thread.isDaemon()); // TODO(timt): invariant }
public Integer doCall() throws Exception { List<Row> rows = new ArrayList<>(); List<Integration> integrations = client(Integration.class).list().getItems(); integrations .forEach(integration -> { Row row = new Row(); row.name = integration.getMetadata().getName(); row.ready = "0/1"; if (integration.getStatus() != null) { row.phase = integration.getStatus().getPhase(); if (integration.getStatus().getConditions() != null) { row.ready = integration.getStatus().getConditions().stream().filter(c -> c.getType().equals("Ready")) .anyMatch(c -> c.getStatus().equals("True")) ? "1/1" : "0/1"; } row.kit = integration.getStatus().getIntegrationKit() != null ? integration.getStatus().getIntegrationKit().getName() : ""; } else { row.phase = "Unknown"; } rows.add(row); }); if (!rows.isEmpty()) { if (name) { rows.forEach(r -> printer().println(r.name)); } else { printer().println(AsciiTable.getTable(AsciiTable.NO_BORDERS, rows, Arrays.asList( new Column().header("NAME").dataAlign(HorizontalAlign.LEFT) .maxWidth(40, OverflowBehaviour.ELLIPSIS_RIGHT) .with(r -> r.name), new Column().header("PHASE").headerAlign(HorizontalAlign.LEFT) .with(r -> r.phase), new Column().header("KIT").headerAlign(HorizontalAlign.LEFT).with(r -> r.kit), new Column().header("READY").dataAlign(HorizontalAlign.CENTER).with(r -> r.ready)))); } } return 0; }
@Test public void shouldListReadyIntegration() throws Exception { Integration integration = createIntegration(); IntegrationStatus status = new IntegrationStatus(); IntegrationKit kit = new IntegrationKit(); kit.setName("kit-123456789"); status.setIntegrationKit(kit); status.setPhase("Running"); status.setConditions(new ArrayList<>()); Conditions readyCondition = new Conditions(); readyCondition.setType("Ready"); readyCondition.setStatus("True"); status.getConditions().add(readyCondition); integration.setStatus(status); kubernetesClient.resources(Integration.class).resource(integration).create(); createCommand().doCall(); List<String> output = printer.getLines(); Assertions.assertEquals("NAME PHASE KIT READY", output.get(0)); Assertions.assertEquals("routes Running kit-123456789 1/1", output.get(1)); }
public boolean tableNotExistsOrDoesNotMatchSpecification(String tableName) { TableId tableId = TableId.of(projectId, datasetName, tableName); Table table = bigquery.getTable(tableId); if (table == null || !table.exists()) { return true; } ExternalTableDefinition externalTableDefinition = table.getDefinition(); boolean manifestDoesNotExist = externalTableDefinition.getSourceUris() == null || externalTableDefinition.getSourceUris().stream().noneMatch(uri -> uri.contains(ManifestFileWriter.ABSOLUTE_PATH_MANIFEST_FOLDER_NAME)); if (isBasePathUpdated(externalTableDefinition)) { // if table base path is outdated, we need to replace the table. return true; } if (!StringUtils.isNullOrEmpty(config.getString(BIGQUERY_SYNC_BIG_LAKE_CONNECTION_ID))) { // If bigLakeConnectionId is present and connectionId is not present in table definition, we need to replace the table. return manifestDoesNotExist || externalTableDefinition.getConnectionId() == null; } return manifestDoesNotExist; }
@Test void testTableNotExistsOrDoesNotMatchSpecification() { BigQuerySyncConfig config = new BigQuerySyncConfig(properties); client = new HoodieBigQuerySyncClient(config, mockBigQuery); // table does not exist assertTrue(client.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)); TableId tableId = TableId.of(PROJECT_ID, TEST_DATASET, TEST_TABLE); Table table = mock(Table.class); when(mockBigQuery.getTable(tableId)).thenReturn(table); ExternalTableDefinition externalTableDefinition = mock(ExternalTableDefinition.class); when(table.exists()).thenReturn(true); when(table.getDefinition()).thenReturn(externalTableDefinition); // manifest does not exist when(externalTableDefinition.getSourceUris()).thenReturn(Collections.emptyList()); assertTrue(client.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)); // manifest exists but base path is outdated when(externalTableDefinition.getSourceUris()).thenReturn(Collections.singletonList( basePath + "/.hoodie/" + ManifestFileWriter.ABSOLUTE_PATH_MANIFEST_FOLDER_NAME)); assertFalse(client.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)); // manifest exists but base path is outdated when(externalTableDefinition.getSourceUris()).thenReturn(Collections.singletonList(ManifestFileWriter.ABSOLUTE_PATH_MANIFEST_FOLDER_NAME)); when(externalTableDefinition.getHivePartitioningOptions()).thenReturn( HivePartitioningOptions.newBuilder().setSourceUriPrefix(basePath + "1").build()); assertTrue(client.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)); // manifest exists, base path is up-to-date when(externalTableDefinition.getHivePartitioningOptions()).thenReturn( HivePartitioningOptions.newBuilder().setSourceUriPrefix(basePath + "/").build()); assertFalse(client.tableNotExistsOrDoesNotMatchSpecification(TEST_TABLE)); }
public String getLegacyColumnName( DatabaseMetaData dbMetaData, ResultSetMetaData rsMetaData, int index ) throws KettleDatabaseException { if ( dbMetaData == null ) { throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoDBMetaDataException" ) ); } if ( rsMetaData == null ) { throw new KettleDatabaseException( BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameNoRSMetaDataException" ) ); } try { return dbMetaData.getDriverMajorVersion() > 3 ? rsMetaData.getColumnLabel( index ) : rsMetaData.getColumnName( index ); } catch ( Exception e ) { throw new KettleDatabaseException( String.format( "%s: %s", BaseMessages.getString( PKG, "MySQLDatabaseMeta.Exception.LegacyColumnNameException" ), e.getMessage() ), e ); } }
@Test( expected = KettleDatabaseException.class ) public void testGetLegacyColumnNameNullDBMetaDataException() throws Exception { new MySQLDatabaseMeta().getLegacyColumnName( null, getResultSetMetaData(), 1 ); }
public static boolean isEIP3668(String data) { if (data == null || data.length() < 10) { return false; } return EnsUtils.EIP_3668_CCIP_INTERFACE_ID.equals(data.substring(0, 10)); }
@Test void isEIP3668WhenSuccess() { assertTrue(EnsUtils.isEIP3668(EnsUtils.EIP_3668_CCIP_INTERFACE_ID + "some data")); }
@Override public HttpResponse send(HttpRequest httpRequest) throws IOException { return send(httpRequest, null); }
@Test public void send_whenInvalidCertificatesAreIgnored_getResponseWithoutException() throws GeneralSecurityException, IOException { InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); String host = "host.com"; MockWebServer mockWebServer = startMockWebServerWithSsl(loopbackAddress); int port = mockWebServer.url("/").port(); NetworkService networkService = NetworkService.newBuilder() .setNetworkEndpoint( NetworkEndpointUtils.forIpHostnameAndPort( loopbackAddress.getHostAddress(), host, port)) .build(); HttpClientCliOptions cliOptions = new HttpClientCliOptions(); HttpClientConfigProperties configProperties = new HttpClientConfigProperties(); cliOptions.trustAllCertificates = configProperties.trustAllCertificates = true; HttpClient httpClient = Guice.createInjector( new AbstractModule() { @Override protected void configure() { install(new HttpClientModule.Builder().build()); bind(HttpClientCliOptions.class).toInstance(cliOptions); bind(HttpClientConfigProperties.class).toInstance(configProperties); } }) .getInstance(HttpClient.class); HttpResponse response = httpClient.send( get(String.format("https://%s:%d", host, port)).withEmptyHeaders().build(), networkService); assertThat(response.bodyString()).hasValue("body"); mockWebServer.shutdown(); }
@Override @MethodNotAvailable public boolean evict(K key) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testEvict() { adapter.evict(23); }
@CheckReturnValue @NonNull public static Observable<Boolean> observePowerSavingState( @NonNull Context context, @StringRes int enablePrefResId, @BoolRes int defaultValueResId) { final RxSharedPrefs prefs = AnyApplication.prefs(context); return Observable.combineLatest( prefs .getString( R.string.settings_key_power_save_mode, R.string.settings_default_power_save_mode_value) .asObservable(), enablePrefResId == 0 ? Observable.just(true) : prefs.getBoolean(enablePrefResId, defaultValueResId).asObservable(), RxBroadcastReceivers.fromIntentFilter( context.getApplicationContext(), getBatteryStateIntentFilter()) .startWith(new Intent(Intent.ACTION_BATTERY_OKAY)), RxBroadcastReceivers.fromIntentFilter( context.getApplicationContext(), getChargerStateIntentFilter()) .startWith(new Intent(Intent.ACTION_POWER_DISCONNECTED)), getOsPowerSavingStateObservable(context), (powerSavingPref, enabledPref, batteryIntent, chargerIntent, osPowerSavingState) -> { if (!enabledPref) return false; switch (powerSavingPref) { case "never": return false; case "always": return true; default: return osPowerSavingState || (Intent.ACTION_BATTERY_LOW.equals(batteryIntent.getAction()) && Intent.ACTION_POWER_DISCONNECTED.equals(chargerIntent.getAction())); } }) .distinctUntilChanged(); }
@Test public void testAlwaysPowerSavingMode() { SharedPrefsHelper.setPrefsValue(R.string.settings_key_power_save_mode, "always"); AtomicReference<Boolean> state = new AtomicReference<>(null); final Observable<Boolean> powerSavingState = PowerSaving.observePowerSavingState(getApplicationContext(), 0); Assert.assertNull(state.get()); final Disposable disposable = powerSavingState.subscribe(state::set); Assert.assertEquals(Boolean.TRUE, state.get()); sendBatteryState(false); Assert.assertEquals(Boolean.TRUE, state.get()); sendBatteryState(true); Assert.assertEquals(Boolean.TRUE, state.get()); sendBatteryState(false); Assert.assertEquals(Boolean.TRUE, state.get()); disposable.dispose(); sendBatteryState(true); Assert.assertEquals(Boolean.TRUE, state.get()); sendBatteryState(false); Assert.assertEquals(Boolean.TRUE, state.get()); }
@Override protected String getRootKey() { return Constants.HEADER_GCS + mBucketName; }
@Test public void testGetRootKey() { Assert.assertEquals(Constants.HEADER_GCS + BUCKET_NAME, mGCSUnderFileSystem.getRootKey()); }
@Override public long get(long key1, long key2) { return super.get0(key1, key2); }
@Test public void testGotoAddress() { final long addr1 = hsa.address(); final SlotAssignmentResult slot = insert(1, 2); hsa.gotoNew(); assertEquals(NULL_ADDRESS, hsa.get(1, 2)); hsa.gotoAddress(addr1); assertEquals(slot.address(), hsa.get(1, 2)); }
public void computeCpd(Component component, Collection<Block> originBlocks, Collection<Block> duplicationBlocks) { CloneIndex duplicationIndex = new PackedMemoryCloneIndex(); populateIndex(duplicationIndex, originBlocks); populateIndex(duplicationIndex, duplicationBlocks); List<CloneGroup> duplications = SuffixTreeCloneDetectionAlgorithm.detect(duplicationIndex, originBlocks); Iterable<CloneGroup> filtered = duplications.stream() .filter(getNumberOfUnitsNotLessThan(component.getFileAttributes().getLanguageKey())) .toList(); addDuplications(component, filtered); }
@Test public void default_minimum_tokens_is_one_hundred() { settings.setProperty("sonar.cpd.xoo.minimumTokens", (Integer) null); Collection<Block> originBlocks = singletonList( new Block.Builder() .setResourceId(ORIGIN_FILE_KEY) .setBlockHash(new ByteArray("a8998353e96320ec")) .setIndexInFile(0) .setLines(30, 45) .setUnit(0, 100) .build()); Collection<Block> duplicatedBlocks = singletonList( new Block.Builder() .setResourceId(OTHER_FILE_KEY) .setBlockHash(new ByteArray("a8998353e96320ec")) .setIndexInFile(0) .setLines(40, 55) .build()); underTest.computeCpd(ORIGIN_FILE, originBlocks, duplicatedBlocks); assertThat(duplicationRepository.getDuplications(ORIGIN_FILE)) .containsExactly( crossProjectDuplication(new TextBlock(30, 45), OTHER_FILE_KEY, new TextBlock(40, 55))); }
public Collection<String> getRelatedShadowTables(final Collection<String> tableNames) { Collection<String> result = new LinkedList<>(); for (String each : tableNames) { if (shadowTableRules.containsKey(each)) { result.add(each); } } return result; }
@Test void assertGetRelatedShadowTables() { Collection<String> relatedShadowTables = shadowRule.getRelatedShadowTables(Arrays.asList("t_user", "t_auto")); assertThat(relatedShadowTables.size(), is(1)); assertThat(relatedShadowTables.iterator().next(), is("t_user")); }
public LagFunction(List<Integer> argumentChannels) { this.valueChannel = argumentChannels.get(0); this.offsetChannel = (argumentChannels.size() > 1) ? argumentChannels.get(1) : -1; this.defaultChannel = (argumentChannels.size() > 2) ? argumentChannels.get(2) : -1; }
@Test public void testLagFunction() { assertWindowQuery("lag(orderdate) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, VARCHAR) .row(3, "F", null) .row(5, "F", "1993-10-14") .row(6, "F", "1994-07-30") .row(33, "F", "1992-02-21") .row(1, "O", null) .row(2, "O", "1996-01-02") .row(4, "O", "1996-12-01") .row(7, "O", "1995-10-11") .row(32, "O", "1996-01-10") .row(34, "O", "1995-07-16") .build()); assertWindowQueryWithNulls("lag(orderdate) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, BIGINT, VARCHAR, VARCHAR) .row(3L, "F", null) .row(5L, "F", "1993-10-14") .row(6L, "F", null) .row(null, "F", "1992-02-21") .row(34L, "O", null) .row(null, "O", "1998-07-21") .row(1L, null, null) .row(7L, null, null) .row(null, null, "1996-01-10") .row(null, null, null) .build()); assertWindowQuery("lag(orderkey) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, INTEGER) .row(3, "F", null) .row(5, "F", 3) .row(6, "F", 5) .row(33, "F", 6) .row(1, "O", null) .row(2, "O", 1) .row(4, "O", 2) .row(7, "O", 4) .row(32, "O", 7) .row(34, "O", 32) .build()); assertWindowQueryWithNulls("lag(orderkey) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, BIGINT, VARCHAR, BIGINT) .row(3L, "F", null) .row(5L, "F", 3L) .row(6L, "F", 5L) .row(null, "F", 6L) .row(34L, "O", null) .row(null, "O", 34L) .row(1L, null, null) .row(7L, null, 1L) .row(null, null, 7L) .row(null, null, null) .build()); assertWindowQuery("lag(orderdate, 2, '1977-01-01') OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, VARCHAR) .row(3, "F", "1977-01-01") .row(5, "F", "1977-01-01") .row(6, "F", "1993-10-14") .row(33, "F", "1994-07-30") .row(1, "O", "1977-01-01") .row(2, "O", "1977-01-01") .row(4, "O", "1996-01-02") .row(7, "O", "1996-12-01") .row(32, "O", "1995-10-11") .row(34, "O", "1996-01-10") .build()); assertWindowQueryWithNulls("lag(orderdate, 2, '1977-01-01') OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, BIGINT, VARCHAR, VARCHAR) .row(3L, "F", "1977-01-01") .row(5L, "F", "1977-01-01") .row(6L, "F", "1993-10-14") .row(null, "F", null) .row(34L, "O", "1977-01-01") .row(null, "O", "1977-01-01") .row(1L, null, "1977-01-01") .row(7L, null, "1977-01-01") .row(null, null, null) .row(null, null, "1996-01-10") .build()); assertWindowQuery("lag(orderkey, 2, -1) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, INTEGER) .row(3, "F", -1) .row(5, "F", -1) .row(6, "F", 3) .row(33, "F", 5) .row(1, "O", -1) .row(2, "O", -1) .row(4, "O", 1) .row(7, "O", 2) .row(32, "O", 4) .row(34, "O", 7) .build()); assertWindowQueryWithNulls("lag(orderkey, 2, -1) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, BIGINT, VARCHAR, BIGINT) .row(3L, "F", -1L) .row(5L, "F", -1L) .row(6L, "F", 3L) .row(null, "F", 5L) .row(34L, "O", -1L) .row(null, "O", -1L) .row(1L, null, -1L) .row(7L, null, -1L) .row(null, null, 1L) .row(null, null, 7L) .build()); assertWindowQuery("lag(orderkey, BIGINT '8' * 1000 * 1000 * 1000) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, BIGINT) .row(3, "F", null) .row(5, "F", null) .row(6, "F", null) .row(33, "F", null) .row(1, "O", null) .row(2, "O", null) .row(4, "O", null) .row(7, "O", null) .row(32, "O", null) .row(34, "O", null) .build()); assertWindowQuery("lag(orderkey, null, -1) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, BIGINT) .row(3, "F", null) .row(5, "F", null) .row(6, "F", null) .row(33, "F", null) .row(1, "O", null) .row(2, "O", null) .row(4, "O", null) .row(7, "O", null) .row(32, "O", null) .row(34, "O", null) .build()); assertWindowQuery("lag(orderkey, 0) OVER (PARTITION BY orderstatus ORDER BY orderkey)", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, INTEGER) .row(3, "F", 3) .row(5, "F", 5) .row(6, "F", 6) .row(33, "F", 33) .row(1, "O", 1) .row(2, "O", 2) .row(4, "O", 4) .row(7, "O", 7) .row(32, "O", 32) .row(34, "O", 34) .build()); assertWindowQuery("date_format(lag(cast(orderdate as TIMESTAMP), 0) OVER (PARTITION BY orderstatus ORDER BY orderkey), '%Y-%m-%d')", resultBuilder(TEST_SESSION, INTEGER, VARCHAR, VARCHAR) .row(3, "F", "1993-10-14") .row(5, "F", "1994-07-30") .row(6, "F", "1992-02-21") .row(33, "F", "1993-10-27") .row(1, "O", "1996-01-02") .row(2, "O", "1996-12-01") .row(4, "O", "1995-10-11") .row(7, "O", "1996-01-10") .row(32, "O", "1995-07-16") .row(34, "O", "1998-07-21") .build()); }
@Override public V fetch(final K key, final long time) { Objects.requireNonNull(key, "key can't be null"); final List<ReadOnlyWindowStore<K, V>> stores = provider.stores(storeName, windowStoreType); for (final ReadOnlyWindowStore<K, V> windowStore : stores) { try { final V result = windowStore.fetch(key, time); if (result != null) { return result; } } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException( "State store is not available anymore and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return null; }
@Test public void emptyIteratorNextShouldThrowNoSuchElementException() { final StateStoreProvider storeProvider = mock(StateStoreProvider.class); when(storeProvider.stores(anyString(), any())).thenReturn(emptyList()); final CompositeReadOnlyWindowStore<Object, Object> store = new CompositeReadOnlyWindowStore<>( storeProvider, QueryableStoreTypes.windowStore(), "foo" ); try (final WindowStoreIterator<Object> windowStoreIterator = store.fetch("key", ofEpochMilli(1), ofEpochMilli(10))) { assertThrows(NoSuchElementException.class, windowStoreIterator::next); } }
@Override public FileInfo generateClientFileInfo(String path) { FileInfo ret = new FileInfo(); ret.setFileId(getId()); ret.setName(getName()); ret.setPath(path); ret.setBlockSizeBytes(0); ret.setCreationTimeMs(getCreationTimeMs()); ret.setCompleted(true); ret.setFolder(isDirectory()); ret.setPinned(isPinned()); ret.setCacheable(false); ret.setPersisted(isPersisted()); ret.setLastModificationTimeMs(getLastModificationTimeMs()); ret.setLastAccessTimeMs(getLastAccessTimeMs()); ret.setTtl(mTtl); ret.setTtlAction(mTtlAction); ret.setOwner(getOwner()); ret.setGroup(getGroup()); ret.setMode(getMode()); ret.setPersistenceState(getPersistenceState().toString()); ret.setMountPoint(isMountPoint()); ret.setUfsFingerprint(Constants.INVALID_UFS_FINGERPRINT); ret.setAcl(mAcl); ret.setDefaultAcl(mDefaultAcl); ret.setMediumTypes(getMediumTypes()); ret.setXAttr(getXAttr()); return ret; }
@Test public void generateClientFileInfo() { MutableInodeDirectory inodeDirectory = createInodeDirectory(); String path = "/test/path"; FileInfo info = inodeDirectory.generateClientFileInfo(path); Assert.assertEquals(inodeDirectory.getId(), info.getFileId()); Assert.assertEquals(inodeDirectory.getName(), info.getName()); Assert.assertEquals(path, info.getPath()); Assert.assertEquals("", info.getUfsPath()); Assert.assertEquals(0, info.getLength()); Assert.assertEquals(0, info.getBlockSizeBytes()); Assert.assertEquals(inodeDirectory.getCreationTimeMs(), info.getCreationTimeMs()); Assert.assertTrue(info.isCompleted()); Assert.assertTrue(info.isFolder()); Assert.assertEquals(inodeDirectory.isPinned(), info.isPinned()); Assert.assertFalse(info.isCacheable()); Assert.assertNotNull(info.getBlockIds()); Assert.assertEquals(inodeDirectory.getLastModificationTimeMs(), info.getLastModificationTimeMs()); }
@Override public void addSlots(Collection<AllocatedSlot> slots, long currentTime) { for (AllocatedSlot slot : slots) { addSlot(slot, currentTime); } }
@Test void testAddSlots() { final DefaultAllocatedSlotPool slotPool = new DefaultAllocatedSlotPool(); final Collection<AllocatedSlot> slots = createAllocatedSlots(); slotPool.addSlots(slots, 0); assertSlotPoolContainsSlots(slotPool, slots); assertSlotPoolContainsFreeSlots(slotPool, slots); }
@Nonnull public static String replaceRange(@Nonnull String string, int start, int end, String replacement) { String temp = string.substring(0, start); temp += replacement; temp += string.substring(end); return temp; }
@Test void testReplaceRange() { assertEquals("_cdefg", StringUtil.replaceRange("abcdefg", 0, 2, "_")); }
public static String format(double amount, boolean isUseTraditional) { return format(amount, isUseTraditional, false); }
@Test public void formatTenThousandLongTest() { String f = NumberChineseFormatter.format(1_0000, false); assertEquals("一万", f); f = NumberChineseFormatter.format(1_0001, false); assertEquals("一万零一", f); f = NumberChineseFormatter.format(1_0010, false); assertEquals("一万零一十", f); f = NumberChineseFormatter.format(1_0100, false); assertEquals("一万零一百", f); f = NumberChineseFormatter.format(1_1000, false); assertEquals("一万一千", f); f = NumberChineseFormatter.format(10_1000, false); assertEquals("一十万零一千", f); f = NumberChineseFormatter.format(10_0100, false); assertEquals("一十万零一百", f); f = NumberChineseFormatter.format(100_1000, false); assertEquals("一百万零一千", f); f = NumberChineseFormatter.format(100_0100, false); assertEquals("一百万零一百", f); f = NumberChineseFormatter.format(1000_1000, false); assertEquals("一千万零一千", f); f = NumberChineseFormatter.format(1000_0100, false); assertEquals("一千万零一百", f); f = NumberChineseFormatter.format(9999_0000, false); assertEquals("九千九百九十九万", f); }
public static Builder custom() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void zeroMinimumNumberOfCallsShouldFail() { custom().slidingWindow(2, 0, SlidingWindowType.COUNT_BASED).build(); }
@Override public V put(@Nullable final K key, final V value) { if (key == null) { if (nullEntry == null) { _size += 1; nullEntry = new Entry<>(null, value); return null; } return nullEntry.setValue(value); } final Entry<K, V>[] table = this.table; final int hash = key.hashCode(); final int index = HashUtil.indexFor(hash, table.length, mask); for (Entry<K, V> e = table[index]; e != null; e = e.hashNext) { final K entryKey; if ((entryKey = e.key) == key || entryKey.equals(key)) { return e.setValue(value); } } final Entry<K, V> e = new Entry<>(key, value); e.hashNext = table[index]; table[index] = e; _size += 1; if (_size > capacity) { rehash(HashUtil.nextCapacity(capacity)); } return null; }
@Test public void testPutGet() { final Map<Integer, String> tested = new HashMap<>(); for (int i = 0; i < 1000; ++i) { tested.put(i, Integer.toString(i)); } tested.put(null, "null"); Assert.assertEquals(1001, tested.size()); for (int i = 0; i < 1000; ++i) { Assert.assertEquals(Integer.toString(i), tested.get(i)); } Assert.assertEquals("null", tested.get(null)); for (int i = 0; i < 1000; ++i) { Assert.assertEquals(Integer.toString(i), tested.put(i, Integer.toString(i + 1))); } Assert.assertEquals("null", tested.put(null, "new null")); Assert.assertEquals(1001, tested.size()); for (int i = 0; i < 1000; ++i) { Assert.assertEquals(Integer.toString(i + 1), tested.get(i)); } Assert.assertEquals("new null", tested.get(null)); }
public static NamespaceName get(String tenant, String namespace) { validateNamespaceName(tenant, namespace); return get(tenant + '/' + namespace); }
@Test(expectedExceptions = IllegalArgumentException.class) public void namespace_emptyTenant() { NamespaceName.get("", "cluster", "namespace"); }
@Override public void open() throws Exception { this.timerService = getInternalTimerService("processing timer", VoidNamespaceSerializer.INSTANCE, this); this.keySet = new HashSet<>(); super.open(); }
@Test void testKeyCheck() throws Exception { OutputTag<Long> sideOutputTag = new OutputTag<Long>("side-output") {}; AtomicBoolean emitToFirstOutput = new AtomicBoolean(true); KeyedTwoOutputProcessOperator<Integer, Integer, Integer, Long> processOperator = new KeyedTwoOutputProcessOperator<>( new TwoOutputStreamProcessFunction<Integer, Integer, Long>() { @Override public void processRecord( Integer record, Collector<Integer> output1, Collector<Long> output2, PartitionedContext ctx) { if (emitToFirstOutput.get()) { output1.collect(record); } else { output2.collect((long) (record)); } } }, sideOutputTag, // -1 is an invalid key in this suite. (KeySelector<Integer, Integer>) value -> -1, // -1 is an invalid key in this suite. (KeySelector<Long, Integer>) value -> -1); try (KeyedOneInputStreamOperatorTestHarness<Integer, Integer, Integer> testHarness = new KeyedOneInputStreamOperatorTestHarness<>( processOperator, (KeySelector<Integer, Integer>) value -> value, Types.INT)) { testHarness.open(); assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>(1))) .isInstanceOf(IllegalStateException.class); emitToFirstOutput.set(false); assertThatThrownBy(() -> testHarness.processElement(new StreamRecord<>(1))) .isInstanceOf(IllegalStateException.class); } }
public static Object get(Object object, int index) { if (index < 0) { throw new IndexOutOfBoundsException("Index cannot be negative: " + index); } if (object instanceof Map) { Map map = (Map) object; Iterator iterator = map.entrySet().iterator(); return get(iterator, index); } else if (object instanceof List) { return ((List) object).get(index); } else if (object instanceof Object[]) { return ((Object[]) object)[index]; } else if (object instanceof Iterator) { Iterator it = (Iterator) object; while (it.hasNext()) { index--; if (index == -1) { return it.next(); } else { it.next(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object instanceof Collection) { Iterator iterator = ((Collection) object).iterator(); return get(iterator, index); } else if (object instanceof Enumeration) { Enumeration it = (Enumeration) object; while (it.hasMoreElements()) { index--; if (index == -1) { return it.nextElement(); } else { it.nextElement(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object == null) { throw new IllegalArgumentException("Unsupported object type: null"); } else { try { return Array.get(object, index); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName()); } } }
@Test void testGetArray3() { assertEquals("1", CollectionUtils.get(new Object[] {"1"}, 0)); assertEquals("2", CollectionUtils.get(new Object[] {"1", "2"}, 1)); }
public static List<TargetInfo> parseOptTarget(CommandLine cmd, AlluxioConfiguration conf) throws IOException { String[] targets; if (cmd.hasOption(TARGET_OPTION_NAME)) { String argTarget = cmd.getOptionValue(TARGET_OPTION_NAME); if (StringUtils.isBlank(argTarget)) { throw new IOException("Option " + TARGET_OPTION_NAME + " can not be blank."); } else if (argTarget.contains(TARGET_SEPARATOR)) { targets = argTarget.split(TARGET_SEPARATOR); } else { targets = new String[]{argTarget}; } } else { // By default we set on all targets (master/workers/job_master/job_workers) targets = new String[]{ROLE_MASTER, ROLE_JOB_MASTER, ROLE_WORKERS, ROLE_JOB_WORKERS}; } return getTargetInfos(targets, conf); }
@Test public void parseSingleMasterTarget() throws Exception { mConf.set(PropertyKey.MASTER_HOSTNAME, "masters-1"); CommandLine mockCommandLine = mock(CommandLine.class); String[] mockArgs = new String[]{"--target", "master"}; when(mockCommandLine.getArgs()).thenReturn(mockArgs); when(mockCommandLine.hasOption(LogLevel.TARGET_OPTION_NAME)).thenReturn(true); when(mockCommandLine.getOptionValue(LogLevel.TARGET_OPTION_NAME)).thenReturn(mockArgs[1]); List<LogLevel.TargetInfo> targets = LogLevel.parseOptTarget(mockCommandLine, mConf); assertEquals(1, targets.size()); assertEquals(new LogLevel.TargetInfo("masters-1", MASTER_WEB_PORT, "master"), targets.get(0)); }
@Override public V remove() { return removeFirst(); }
@Test public void testRemoveEmpty() { Assertions.assertThrows(NoSuchElementException.class, () -> { RQueue<Integer> queue = getQueue(); queue.remove(); }); }
public void onHttpServerUpgrade(Http2Settings settings) throws Http2Exception { if (!connection().isServer()) { throw connectionError(PROTOCOL_ERROR, "Server-side HTTP upgrade requested for a client"); } if (!prefaceSent()) { // If the preface was not sent yet it most likely means the handler was not added to the pipeline before // calling this method. throw connectionError(INTERNAL_ERROR, "HTTP upgrade must occur after preface was sent"); } if (decoder.prefaceReceived()) { throw connectionError(PROTOCOL_ERROR, "HTTP upgrade must occur before HTTP/2 preface is received"); } // Apply the settings but no ACK is necessary. encoder.remoteSettings(settings); // Create a stream in the half-closed state. connection().remote().createStream(HTTP_UPGRADE_STREAM_ID, true); }
@Test public void onHttpServerUpgradeWithoutHandlerAdded() throws Exception { handler = new Http2ConnectionHandlerBuilder().frameListener(new Http2FrameAdapter()).server(true).build(); Http2Exception e = assertThrows(Http2Exception.class, new Executable() { @Override public void execute() throws Throwable { handler.onHttpServerUpgrade(new Http2Settings()); } }); assertEquals(Http2Error.INTERNAL_ERROR, e.error()); }
public void replicaBackupLog(List<TransactionLogRecord> records, UUID callerUuid, UUID txnId, long timeoutMillis, long startTime) { TxBackupLog beginLog = txBackupLogs.get(txnId); if (beginLog == null) { throw new TransactionException("Could not find begin tx log!"); } if (beginLog.state != ACTIVE) { // the exception message is very strange throw new TransactionException("TxLog already exists!"); } TxBackupLog newTxBackupLog = new TxBackupLog(records, callerUuid, COMMITTING, timeoutMillis, startTime, beginLog.allowedDuringPassiveState); if (!txBackupLogs.replace(txnId, beginLog, newTxBackupLog)) { throw new TransactionException("TxLog already exists!"); } }
@Test(expected = TransactionException.class) public void replicaBackupLog_whenNotExist_thenTransactionException() { List<TransactionLogRecord> records = new LinkedList<>(); txService.replicaBackupLog(records, UuidUtil.newUnsecureUUID(), TXN, 1, 1); }
@Override public String generateSqlType(Dialect dialect) { switch (dialect.getId()) { case MsSql.ID: return "NVARCHAR (MAX)"; case Oracle.ID, H2.ID: return "CLOB"; case PostgreSql.ID: return "TEXT"; default: throw new IllegalArgumentException("Unsupported dialect id " + dialect.getId()); } }
@Test public void generate_sql_type_on_oracle() { assertThat(underTest.generateSqlType(new Oracle())).isEqualTo("CLOB"); }
public static WebSocketUpstream buildWebSocketUpstream(final String protocol, final String host, final Integer port) { return WebSocketUpstream.builder().host(LOCALHOST).protocol(protocol) .upstreamUrl(buildUrl(host, port)).weight(DEFAULT_WEIGHT) .warmup(Constants.WARMUP_TIME) .timestamp(System.currentTimeMillis()) .status(Objects.nonNull(port) && StringUtils.isNotBlank(host)) .build(); }
@Test public void buildWebSocketUpstream() { WebSocketUpstream webSocketUpstream = CommonUpstreamUtils.buildWebSocketUpstream("tcp", HOST, PORT); Assert.assertNotNull(webSocketUpstream); Assert.assertEquals(HOST + ":" + PORT, webSocketUpstream.getUpstreamUrl()); Assert.assertEquals("tcp", webSocketUpstream.getProtocol()); }
@Override public void upgrade() { try { streamService.load(Stream.DEFAULT_STREAM_ID); } catch (NotFoundException ignored) { createDefaultStream(); } }
@Test public void upgradeDoesNotRunIfDefaultStreamExists() throws Exception { when(streamService.load("000000000000000000000001")).thenReturn(mock(Stream.class)); migration.upgrade(); verify(streamService, never()).save(any(Stream.class)); }
public static String gensalt(int log_rounds, SecureRandom random) { StringBuffer rs = new StringBuffer(); byte rnd[] = new byte[BCRYPT_SALT_LEN]; random.nextBytes(rnd); rs.append("$2a$"); if (log_rounds < 10) rs.append("0"); if (log_rounds > 30) { throw new IllegalArgumentException( "log_rounds exceeds maximum (30)"); } rs.append(Integer.toString(log_rounds)); rs.append("$"); rs.append(encode_base64(rnd, rnd.length)); return rs.toString(); }
@Test public void testGensalt() { System.out.print("BCrypt.gensalt(): "); for (int i = 0; i < test_vectors.length; i += 4) { String plain = test_vectors[i][0]; String salt = BCrypt.gensalt(); String hashed1 = BCrypt.hashpw(plain, salt); String hashed2 = BCrypt.hashpw(plain, hashed1); Assert.assertEquals(hashed1, hashed2); System.out.print("."); } System.out.println(""); }
@Override public long getPriority( TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId, int segmentId, int bufferIndex, @Nullable ReadProgress readProgress) throws IOException { // noop return -1; }
@Test void testGetPriority() throws IOException { assertThat( partitionFileReader.getPriority( DEFAULT_PARTITION_ID, DEFAULT_SUBPARTITION_ID, 0, 0, null)) .isEqualTo(-1); assertThat(readBuffer(0, DEFAULT_SUBPARTITION_ID, 0)).isNotNull(); assertThat( partitionFileReader.getPriority( DEFAULT_PARTITION_ID, DEFAULT_SUBPARTITION_ID, 0, 1, null)) .isEqualTo(-1); }
public static String toHivePartitionName(List<String> partitionColumnNames, PartitionKey partitionKey) { List<String> partitionValues = fromPartitionKey(partitionKey); return toHivePartitionName(partitionColumnNames, partitionValues); }
@Test public void testHivePartitionNames() { List<String> partitionValues = Lists.newArrayList("1", "2", "3"); String partitionNames = "a=1/b=2/c=3"; HivePartitionName hivePartitionName = new HivePartitionName("db", "table", partitionValues, Optional.of(partitionNames)); Assert.assertEquals("HivePartitionName{databaseName='db', tableName='table'," + " partitionValues=[1, 2, 3], partitionNames=Optional[a=1/b=2/c=3]}", hivePartitionName.toString()); List<String> partitionColNames = Lists.newArrayList("k1"); Map<String, String> partitionColToValue = Maps.newHashMap(); partitionColToValue.put("k1", "1"); Assert.assertEquals("k1=1", PartitionUtil.toHivePartitionName(partitionColNames, partitionColToValue)); partitionColNames.add("k3"); partitionColToValue.put("k3", "c"); Assert.assertEquals("k1=1/k3=c", PartitionUtil.toHivePartitionName(partitionColNames, partitionColToValue)); partitionColNames.add("k5"); partitionColNames.add("k4"); partitionColNames.add("k6"); partitionColToValue.put("k4", "d"); partitionColToValue.put("k5", "e"); partitionColToValue.put("k6", "f"); Assert.assertEquals("k1=1/k3=c/k5=e/k4=d/k6=f", PartitionUtil.toHivePartitionName(partitionColNames, partitionColToValue)); partitionColNames.add("not_exists"); try { PartitionUtil.toHivePartitionName(partitionColNames, partitionColToValue); Assert.fail(); } catch (StarRocksConnectorException e) { Assert.assertTrue(e.getMessage().contains("Can't find column")); } }
public void sendCouponNewsletter() { try { // Retrieve the list of contacts from the "weekly-coupons-newsletter" contact // list // snippet-start:[sesv2.java2.newsletter.ListContacts] ListContactsRequest contactListRequest = ListContactsRequest.builder() .contactListName(CONTACT_LIST_NAME) .build(); List<String> contactEmails; try { ListContactsResponse contactListResponse = sesClient.listContacts(contactListRequest); contactEmails = contactListResponse.contacts().stream() .map(Contact::emailAddress) .toList(); } catch (Exception e) { // TODO: Remove when listContacts's GET body issue is resolved. contactEmails = this.contacts; } // snippet-end:[sesv2.java2.newsletter.ListContacts] // Send an email using the "weekly-coupons" template to each contact in the list // snippet-start:[sesv2.java2.newsletter.SendEmail.template] String coupons = Files.readString(Paths.get("resources/coupon_newsletter/sample_coupons.json")); for (String emailAddress : contactEmails) { SendEmailRequest newsletterRequest = SendEmailRequest.builder() .destination(Destination.builder().toAddresses(emailAddress).build()) .content(EmailContent.builder() .template(Template.builder() .templateName(TEMPLATE_NAME) .templateData(coupons) .build()) .build()) .fromEmailAddress(this.verifiedEmail) .listManagementOptions(ListManagementOptions.builder() .contactListName(CONTACT_LIST_NAME) .build()) .build(); SendEmailResponse newsletterResponse = sesClient.sendEmail(newsletterRequest); System.out.println("Newsletter sent to " + emailAddress + ": " + newsletterResponse.messageId()); } // snippet-end:[sesv2.java2.newsletter.SendEmail.template] } catch (NotFoundException e) { // If the contact list does not exist, fail the workflow and inform the user System.err.println("The contact list is missing. Please create the contact list and try again."); } catch (AccountSuspendedException e) { // If the account is suspended, fail the workflow and inform the user System.err.println("Your account is suspended. Please resolve the issue and try again."); } catch (MailFromDomainNotVerifiedException e) { // If the sending domain is not verified, fail the workflow and inform the user System.err.println("The sending domain is not verified. Please verify your domain and try again."); throw e; } catch (MessageRejectedException e) { // If the message is rejected due to invalid content, fail the workflow and // inform the user System.err.println("The message content is invalid. Please check your template and try again."); throw e; } catch (SendingPausedException e) { // If sending is paused, fail the workflow and inform the user System.err.println("Sending is currently paused for your account. Please resolve the issue and try again."); throw e; } catch (Exception e) { System.err.println("Error occurred while sending the newsletter: " + e.getMessage()); e.printStackTrace(); } }
@Test public void test_sendCouponNewsletter_error_sendingPaused() { // Mock the necessary AWS SDK calls and responses CreateEmailTemplateResponse templateResponse = CreateEmailTemplateResponse.builder().build(); when(sesClient.createEmailTemplate(any(CreateEmailTemplateRequest.class))).thenReturn(templateResponse); ListContactsResponse contactListResponse = ListContactsResponse.builder() .contacts(Contact.builder().emailAddress("user@example.com").build()) .build(); when(sesClient.listContacts(any(ListContactsRequest.class))).thenReturn( contactListResponse); when(sesClient.sendEmail(any(SendEmailRequest.class))).thenThrow( SendingPausedException.class); try { scenario.sendCouponNewsletter(); } catch (Exception e) { } String errorOutput = errContent.toString(); assertThat( errorOutput, containsString("Sending is currently paused for your account. Please resolve the issue and try again.")); }
static Map<String, String> resolveVariables(String expression, String str) { if (expression == null || str == null) return Collections.emptyMap(); Map<String, String> resolvedVariables = new HashMap<>(); StringBuilder variableBuilder = new StringBuilder(); State state = State.TEXT; int j = 0; int expressionLength = expression.length(); for (int i = 0; i < expressionLength; i++) { char e = expression.charAt(i); switch (e) { case '{': if (state == END_VAR) return Collections.emptyMap(); state = VAR; break; case '}': if (state != VAR) return Collections.emptyMap(); state = END_VAR; if (i != expressionLength - 1) break; default: switch (state) { case VAR: variableBuilder.append(e); break; case END_VAR: String replacement; boolean ec = i == expressionLength - 1; if (ec) { replacement = str.substring(j); } else { int k = str.indexOf(e, j); if (k == -1) return Collections.emptyMap(); replacement = str.substring(j, str.indexOf(e, j)); } resolvedVariables.put(variableBuilder.toString(), replacement); j += replacement.length(); if (j == str.length() && ec) return resolvedVariables; variableBuilder.setLength(0); state = TEXT; case TEXT: if (str.charAt(j) != e) return Collections.emptyMap(); j++; } } } return resolvedVariables; }
@Test public void testNonConformantPath() { Map<String, String> res = resolveVariables("{cachemanager}-{cache}", "default"); assertEquals(0, res.size()); }
public <T extends VFSConnectionDetails> boolean test( @NonNull ConnectionManager manager, @NonNull T details, @Nullable VFSConnectionTestOptions options ) throws KettleException { if ( options == null ) { options = new VFSConnectionTestOptions(); } // The specified connection details may not exist saved in the meta-store, // but still needs to have a non-empty name in it, to be able to form a temporary PVFS URI. if ( StringUtils.isEmpty( details.getName() ) ) { return false; } VFSConnectionProvider<T> provider = getExistingProvider( manager, details ); if ( !provider.test( details ) ) { return false; } if ( !details.isRootPathSupported() || options.isRootPathIgnored() ) { return true; } String resolvedRootPath; try { resolvedRootPath = getResolvedRootPath( details ); } catch ( KettleException e ) { // Invalid root path. return false; } if ( resolvedRootPath == null ) { return !details.isRootPathRequired(); } // Ensure that root path exists and is a folder. return isFolder( getConnectionRootProviderFileObject( manager, provider, details ) ); }
@Test public void testTestReturnsFalseWhenRootPathHasInvalidRelativeSegments() throws KettleException { when( vfsConnectionDetails.getRootPath() ).thenReturn( "../other-connection" ); assertFalse( vfsConnectionManagerHelper.test( connectionManager, vfsConnectionDetails, getTestOptionsCheckRootPath() ) ); }
@VisibleForTesting List<MessageSummary> getMessageBacklog(EventNotificationContext ctx, SlackEventNotificationConfig config) { List<MessageSummary> backlog = notificationCallbackService.getBacklogForEvent(ctx); if (config.backlogSize() > 0 && backlog != null) { return backlog.stream().limit(config.backlogSize()).collect(Collectors.toList()); } return backlog; }
@Test public void testBacklogMessageLimitWhenEventNotificationContextIsNull() { SlackEventNotificationConfig slackConfig = SlackEventNotificationConfig.builder() .backlogSize(0) .build(); //global setting is at N and the eventNotificationContext is null then the message summaries is null List<MessageSummary> messageSummaries = slackEventNotification.getMessageBacklog(null, slackConfig); assertThat(messageSummaries).isNull(); }
public static Ip6Address valueOf(byte[] value) { return new Ip6Address(value); }
@Test public void testValueOfInetAddressIPv6() { Ip6Address ipAddress; InetAddress inetAddress; inetAddress = InetAddresses.forString("1111:2222:3333:4444:5555:6666:7777:8888"); ipAddress = Ip6Address.valueOf(inetAddress); assertThat(ipAddress.toString(), is("1111:2222:3333:4444:5555:6666:7777:8888")); inetAddress = InetAddresses.forString("::"); ipAddress = Ip6Address.valueOf(inetAddress); assertThat(ipAddress.toString(), is("::")); inetAddress = InetAddresses.forString("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"); ipAddress = Ip6Address.valueOf(inetAddress); assertThat(ipAddress.toString(), is("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); }
public static Deserializer<RouterSolicitation> deserializer() { return (data, offset, length) -> { checkInput(data, offset, length, HEADER_LENGTH); RouterSolicitation routerSolicitation = new RouterSolicitation(); ByteBuffer bb = ByteBuffer.wrap(data, offset, length); bb.getInt(); if (bb.limit() - bb.position() > 0) { NeighborDiscoveryOptions options = NeighborDiscoveryOptions.deserializer() .deserialize(data, bb.position(), bb.limit() - bb.position()); for (NeighborDiscoveryOptions.Option option : options.options()) { routerSolicitation.addOption(option.type(), option.data()); } } return routerSolicitation; }; }
@Test public void testDeserializeBadInput() throws Exception { PacketTestUtils.testDeserializeBadInput(RouterSolicitation.deserializer()); }
@Override public V merge(String key, V value, BiFunction<? super V, ? super V, ? extends V> remappingFunction) { return Map.super.merge(key.toLowerCase(), value, remappingFunction); }
@Test void merge() { Map<String, Object> map = new LowerCaseLinkHashMap<>(lowerCaseLinkHashMap); Object result = map.merge("key", "merge",(oldValue,value)-> oldValue.toString().toUpperCase()); Assertions.assertEquals("VALUE", result); Assertions.assertEquals("VALUE", map.get("key")); result = map.merge("key", "merge", (oldValue, value) -> null); Assertions.assertNull(result); Assertions.assertFalse(map.containsKey("key")); result = map.merge("compute", "merge", (oldValue, value) -> oldValue.toString().toUpperCase()); Assertions.assertEquals("merge", result); Assertions.assertEquals("merge", map.get("compute")); }
public Struct put(String fieldName, Object value) { Field field = lookupField(fieldName); return put(field, value); }
@Test public void testInvalidStructFieldValue() { assertThrows(DataException.class, () -> new Struct(NESTED_SCHEMA).put("nested", new Struct(NESTED_CHILD_SCHEMA))); }
public static int getAvailablePort(String host, int port) { return getAvailablePort(host, port, MAX_PORT); }
@Test public void getAvailablePort() throws Exception { int port = NetUtils.getAvailablePort("127.0.0.1", 33000); Assert.assertTrue(port >= 33000 && port < 65535); port = NetUtils.getAvailablePort("0.0.0.0", 33000); Assert.assertTrue(port >= 33000 && port < 65535); port = NetUtils.getAvailablePort(SystemInfo.getLocalHost(), 33000); Assert.assertTrue(port >= 33000 && port < 65535); port = NetUtils.getAvailablePort("127.0.0.1", -1); Assert.assertTrue(port >= 0 && port < 65535); }
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor, @Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) { long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes(); if (desiredSegmentSizeBytes <= 0) { desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES; } long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2; double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5; if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}", _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } else { final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName, autotuneInitialRows); return autotuneInitialRows; } } final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes(); if (committingSegmentSizeBytes <= 0 // repair segment case || SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals( committingSegmentDescriptor.getStopReason())) { String reason = committingSegmentSizeBytes <= 0 // ? "Committing segment size is not available" // : "Committing segment is due to force-commit"; final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}", reason, newSegmentName, targetNumRows); return targetNumRows; } final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime(); final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs(); final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}", newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold, committingSegmentSizeBytes); double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes; if (_latestSegmentRowsToSizeRatio > 0) { _latestSegmentRowsToSizeRatio = CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio; } else { _latestSegmentRowsToSizeRatio = currentRatio; } // If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit. // We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim // to hit the row limit next time around. // // If the size of the committing segment is higher than the desired segment size, then the administrator has // set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time // limit. // // TODO: add feature to adjust time threshold as well // If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit // and time threshold being hit If we set new threshold to be committingSegmentZKMetadata // .getSizeThresholdToFlushSegment(), // we might end up using a lot more memory than required for the segment Using a minor bump strategy, until // we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) { final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis(); long currentNumRows = numRowsConsumed; StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. "); if (timeThresholdMillis < timeConsumed) { // The administrator has reduced the time threshold. Adjust the // number of rows to match the average consumption rate on the partition. currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed; logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows) .append(". "); } long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); logStringBuilder.append("Setting segment size for {} as {}"); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(), newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } long targetSegmentNumRows; if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) { targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2; } else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) { targetSegmentNumRows = numRowsConsumed / 2; } else { if (_latestSegmentRowsToSizeRatio > 0) { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); } else { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio); } } targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment size {}, current ratio {}, setting threshold for {} as {}", committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; }
@Test public void testApplyMultiplierToAdjustedTotalDocsWhenTimeThresholdIsReached() { long currentTime = 1640216032391L; Clock clock = Clock.fixed(java.time.Instant.ofEpochMilli(currentTime), ZoneId.of("UTC")); SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer(clock); StreamConfig streamConfig = mock(StreamConfig.class); when(streamConfig.getFlushThresholdSegmentSizeBytes()).thenReturn(300_0000L); when(streamConfig.getFlushThresholdTimeMillis()).thenReturn(MILLISECONDS.convert(1, TimeUnit.HOURS)); CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class); when(committingSegmentDescriptor.getSegmentSizeBytes()).thenReturn(200_0000L); SegmentZKMetadata committingSegmentZKMetadata = mock(SegmentZKMetadata.class); when(committingSegmentZKMetadata.getTotalDocs()).thenReturn(30_000L); when(committingSegmentZKMetadata.getSizeThresholdToFlushSegment()).thenReturn(60_000); when(committingSegmentZKMetadata.getCreationTime()).thenReturn( currentTime - MILLISECONDS.convert(2, TimeUnit.HOURS)); int threshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, committingSegmentZKMetadata, "events3__0__0__20211222T1646Z"); // (totalDocs / 2) * 1.1 // (30000 / 2) * 1.1 // 15000 * 1.1 assertEquals(threshold, 16_500); }
@Override public String buildContext() { final String plugins = ((Collection<?>) getSource()) .stream() .map(s -> ((PluginDO) s).getName()) .collect(Collectors.joining(",")); return String.format("the plugins[%s] is %s", plugins, StringUtils.lowerCase(getType().getType().toString())); }
@Test public void batchChangePluginBuildContextTest() { String context = String.format("the plugins[%s] is %s", "test-plugin,test-plugin-two", EventTypeEnum.PLUGIN_UPDATE.getType().toString().toLowerCase()); assertEquals(context, changedEvent.buildContext()); }
@Bean public PluginDataHandler springCloudPluginDataHandler(final ObjectProvider<DiscoveryClient> discoveryClient, final ShenyuConfig shenyuConfig) { return new SpringCloudPluginDataHandler(discoveryClient.getIfAvailable(), shenyuConfig.getSpringCloudCache()); }
@Test public void testSpringCloudPluginDataHandler() { applicationContextRunner.run(context -> { PluginDataHandler handler = context.getBean("springCloudPluginDataHandler", PluginDataHandler.class); assertNotNull(handler); } ); }
private RFuture<Boolean> trySet(V newValue, BucketTrySetOperation operation) { checkState(); return executeLocked(() -> { if (state != null) { operations.add(operation); if (state == NULL) { state = Optional.ofNullable((Object) newValue).orElse(NULL); hasExpiration = operation.getTimeUnit() != null; return CompletableFuture.completedFuture(true); } else { return CompletableFuture.completedFuture(false); } } return getAsync().thenApply(res -> { operations.add(operation); if (res == null) { hasExpiration = operation.getTimeUnit() != null; state = Optional.ofNullable((Object) newValue).orElse(NULL); return true; } return false; }); }); }
@Test public void testTrySet() { RBucket<String> b = redisson.getBucket("test"); b.set("123"); RTransaction transaction = redisson.createTransaction(TransactionOptions.defaults()); RBucket<String> bucket = transaction.getBucket("test"); assertThat(bucket.trySet("0")).isFalse(); assertThat(bucket.delete()).isTrue(); assertThat(bucket.trySet("324")).isTrue(); assertThat(bucket.trySet("43")).isFalse(); transaction.commit(); assertThat(redisson.getKeys().count()).isEqualTo(1); assertThat(b.get()).isEqualTo("324"); }
@Override public void write(final PostgreSQLPacketPayload payload, final Object value) { payload.getByteBuf().writeFloat(Float.parseFloat(value.toString())); }
@Test void assertWrite() { new PostgreSQLFloatBinaryProtocolValue().write(new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8), 1F); verify(byteBuf).writeFloat(1.0F); }
@SuppressWarnings("ConstantConditions") public boolean addOrReplaceAction(@NonNull Action a) { if (a == null) { throw new IllegalArgumentException("Action must be non-null"); } // CopyOnWriteArrayList does not support Iterator.remove, so need to do it this way: List<Action> old = new ArrayList<>(1); List<Action> current = getActions(); boolean found = false; for (Action a2 : current) { if (!found && a.equals(a2)) { found = true; } else if (a2.getClass() == a.getClass()) { old.add(a2); } } current.removeAll(old); if (!found) { addAction(a); } return !found || !old.isEmpty(); }
@Test public void addOrReplaceAction_null() { assertThrows(IllegalArgumentException.class, () -> thing.addOrReplaceAction(null)); }
String lowercase(String string) { string = string.toLowerCase(); return string; }
@Test void test() { FullyCovered fullyCovered = new FullyCovered(); String string = fullyCovered.lowercase("THIS IS A STRING"); assertThat(string).isEqualTo("this is a string"); }
public boolean overlap(final Window other) throws IllegalArgumentException { if (getClass() != other.getClass()) { throw new IllegalArgumentException("Cannot compare windows of different type. Other window has type " + other.getClass() + "."); } final SessionWindow otherWindow = (SessionWindow) other; return !(otherWindow.endMs < startMs || endMs < otherWindow.startMs); }
@Test public void shouldNotOverlapIsOtherWindowIsAfterThisWindow() { /* * This: [-------] * Other: [---] */ assertFalse(window.overlap(new SessionWindow(end + 1, end + 1))); assertFalse(window.overlap(new SessionWindow(end + 1, 150))); assertFalse(window.overlap(new SessionWindow(125, 150))); }
public static int hash(Object o) { if (o == null) { return 0; } if (o instanceof Long) { return hashLong((Long) o); } if (o instanceof Integer) { return hashLong((Integer) o); } if (o instanceof Double) { return hashLong(Double.doubleToRawLongBits((Double) o)); } if (o instanceof Float) { return hashLong(Float.floatToRawIntBits((Float) o)); } if (o instanceof String) { return hash(((String) o).getBytes()); } if (o instanceof byte[]) { return hash((byte[]) o); } return hash(o.toString()); }
@Test public void testHashByteArrayOverload() { String input = "hashthis"; byte[] inputBytes = input.getBytes(); int hashOfString = MurmurHash.hash(input); assertEquals("MurmurHash.hash(byte[]) did not match MurmurHash.hash(String)", hashOfString, MurmurHash.hash(inputBytes)); Object bytesAsObject = inputBytes; assertEquals("MurmurHash.hash(Object) given a byte[] did not match MurmurHash.hash(String)", hashOfString, MurmurHash.hash(bytesAsObject)); }
public static boolean isMatch(String regex, CharSequence content) { if (content == null) { // 提供null的字符串为不匹配 return false; } if (StrUtil.isEmpty(regex)) { // 正则不存在则为全匹配 return true; } // Pattern pattern = Pattern.compile(regex, Pattern.DOTALL); final Pattern pattern = PatternPool.get(regex, Pattern.DOTALL); return isMatch(pattern, content); }
@Test public void isMatchTest() { // 给定字符串是否匹配给定正则 final boolean isMatch = ReUtil.isMatch("\\w+[\u4E00-\u9FFF]+\\d+", content); assertTrue(isMatch); }
@Override public LoggingRuleConfiguration build() { ILoggerFactory iLoggerFactory = LoggerFactory.getILoggerFactory(); if ("ch.qos.logback.classic.LoggerContext".equals(iLoggerFactory.getClass().getName())) { LoggerContext loggerContext = (LoggerContext) iLoggerFactory; return new LoggingRuleConfiguration(getDefaultLoggers(loggerContext), getDefaultAppenders(loggerContext)); } return new LoggingRuleConfiguration(Collections.emptyList(), Collections.emptySet()); }
@Test void assertBuild() { LoggingRuleConfiguration actual = new DefaultLoggingRuleConfigurationBuilder().build(); assertThat(actual.getLoggers().size(), is(4)); assertThat(actual.getAppenders().size(), is(1)); }
public CompiledPipeline.CompiledExecution buildExecution() { return buildExecution(false); }
@Test @SuppressWarnings({"unchecked", "rawtypes"}) public void compilerBenchmark() throws Exception { final PipelineIR baselinePipelineIR = createPipelineIR(200); final PipelineIR testPipelineIR = createPipelineIR(400); final JrubyEventExtLibrary.RubyEvent testEvent = JrubyEventExtLibrary.RubyEvent.newRubyEvent(RubyUtil.RUBY, new Event()); final FixedPluginFactory pluginFactory = new FixedPluginFactory( () -> null, () -> IDENTITY_FILTER, mockOutputSupplier() ); final CompiledPipeline baselineCompiledPipeline = new CompiledPipeline(baselinePipelineIR, pluginFactory); final CompiledPipeline testCompiledPipeline = new CompiledPipeline(testPipelineIR, pluginFactory); final long compilationBaseline = time(ChronoUnit.MILLIS, () -> { final CompiledPipeline.CompiledExecution compiledExecution = baselineCompiledPipeline.buildExecution(); compiledExecution.compute(RubyUtil.RUBY.newArray(testEvent), false, false); }); final long compilationTest = time(ChronoUnit.MILLIS, () -> { final CompiledPipeline.CompiledExecution compiledExecution = testCompiledPipeline.buildExecution(); compiledExecution.compute(RubyUtil.RUBY.newArray(testEvent), false, false); }); // sanity checks final Collection<JrubyEventExtLibrary.RubyEvent> outputEvents = EVENT_SINKS.get(runId); MatcherAssert.assertThat(outputEvents.size(), CoreMatchers.is(2)); MatcherAssert.assertThat(outputEvents.contains(testEvent), CoreMatchers.is(true)); // regression check final String testMessage = "regression in pipeline compilation, doubling the filters require more than 5 " + "time, baseline: " + compilationBaseline + " secs, test: " + compilationTest + " secs"; assertTrue(testMessage, compilationTest/compilationBaseline <= 5); }
public B listener(String listener) { this.listener = listener; return getThis(); }
@Test void listener() { InterfaceBuilder builder = new InterfaceBuilder(); builder.listener("mockinvokerlistener"); Assertions.assertEquals("mockinvokerlistener", builder.build().getListener()); }
public void updateNodeResource(RMNode nm, ResourceOption resourceOption) { writeLock.lock(); try { SchedulerNode node = getSchedulerNode(nm.getNodeID()); if (node == null) { LOG.info("Node: " + nm.getNodeID() + " has already been taken out of " + "scheduling. Skip updating its resource"); return; } Resource newResource = resourceOption.getResource(); final int timeout = resourceOption.getOverCommitTimeout(); Resource oldResource = node.getTotalResource(); if (!oldResource.equals(newResource)) { // Notify NodeLabelsManager about this change rmContext.getNodeLabelManager().updateNodeResource(nm.getNodeID(), newResource); // Log resource change LOG.info("Update resource on node: {} from: {}, to: {} in {} ms", node.getNodeName(), oldResource, newResource, timeout); nodeTracker.removeNode(nm.getNodeID()); // update resource to node node.updateTotalResource(newResource); node.setOvercommitTimeOut(timeout); signalContainersIfOvercommitted(node, timeout == 0); nodeTracker.addNode((N) node); } else{ // Log resource change LOG.warn("Update resource on node: " + node.getNodeName() + " with the same resource: " + newResource); } } finally { writeLock.unlock(); } }
@Test public void testMaxAllocationAfterUpdateNodeResource() throws IOException { final int configuredMaxVCores = 20; final int configuredMaxMemory = 10 * 1024; Resource configuredMaximumResource = Resource.newInstance (configuredMaxMemory, configuredMaxVCores); YarnConfiguration conf = getConf(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, configuredMaxVCores); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, configuredMaxMemory); conf.setLong( YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0); MockRM rm = new MockRM(conf); try { rm.start(); AbstractYarnScheduler scheduler = (AbstractYarnScheduler) rm .getResourceScheduler(); verifyMaximumResourceCapability(configuredMaximumResource, scheduler); Resource resource1 = Resource.newInstance(2048, 5); Resource resource2 = Resource.newInstance(4096, 10); Resource resource3 = Resource.newInstance(512, 1); Resource resource4 = Resource.newInstance(1024, 2); RMNode node1 = MockNodes.newNodeInfo( 0, resource1, 1, "127.0.0.2"); scheduler.handle(new NodeAddedSchedulerEvent(node1)); RMNode node2 = MockNodes.newNodeInfo( 0, resource3, 2, "127.0.0.3"); scheduler.handle(new NodeAddedSchedulerEvent(node2)); verifyMaximumResourceCapability(resource1, scheduler); // increase node1 resource scheduler.updateNodeResource(node1, ResourceOption.newInstance( resource2, 0)); verifyMaximumResourceCapability(resource2, scheduler); // decrease node1 resource scheduler.updateNodeResource(node1, ResourceOption.newInstance( resource1, 0)); verifyMaximumResourceCapability(resource1, scheduler); // increase node2 resource scheduler.updateNodeResource(node2, ResourceOption.newInstance( resource4, 0)); verifyMaximumResourceCapability(resource1, scheduler); // decrease node2 resource scheduler.updateNodeResource(node2, ResourceOption.newInstance( resource3, 0)); verifyMaximumResourceCapability(resource1, scheduler); } finally { rm.stop(); } }
@Override public boolean registerAllRequestsProcessedListener(NotificationListener listener) throws IOException { return super.registerAllRequestsProcessedListener(listener); }
@Test void testConcurrentSubscribeAndHandleRequest() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(2); final TestNotificationListener listener = new TestNotificationListener(); final Callable<Boolean> subscriber = () -> writer.registerAllRequestsProcessedListener(listener); final Callable<Void> requestHandler = () -> { handleRequest(); return null; }; try { // Repeat this to provoke races for (int i = 0; i < 50000; i++) { listener.reset(); addRequest(); Future<Void> handleRequestFuture = executor.submit(requestHandler); Future<Boolean> subscribeFuture = executor.submit(subscriber); handleRequestFuture.get(); boolean subscribed = subscribeFuture.get(); assertThat(listener.getNumberOfNotifications()) .withFailMessage( subscribed ? "Race: Successfully subscribed, but was never notified." : "Race: Never subscribed successfully, but was notified.") .isEqualTo(subscribed ? 1 : 0); } } finally { executor.shutdownNow(); } }
public boolean isOlderThan(Object obj) { if (obj instanceof VersionNumber) { return compareTo((VersionNumber) obj) < 0; } return false; }
@Test void testIsOlderThan() { assertThat(v("5.0.0").isOlderThan(v("6.0.0"))).isTrue(); assertThat(v("9.0.0").isOlderThan(v("10.0.0"))).isTrue(); assertThat(v("1.0.0").isOlderThan(v("10.0.0"))).isTrue(); assertThat(v("5.0.0").isOlderThan(v("5.0.1"))).isTrue(); assertThat(v("6.0.0").isOlderThan(v("7.0.0-beta.2"))).isTrue(); assertThat(v("7.0.0-alpha.1").isOlderThan(v("7.0.0-beta.1"))).isTrue(); assertThat(v("7.0.0-beta.2").isOlderThan(v("7.0.0-beta.3"))).isTrue(); assertThat(v("1.8.0_191").isOlderThan(v("1.8"))).isFalse(); assertThat(v("6.0.0").isOlderThan(v("6.0.0"))).isFalse(); assertThat(v("6.0.0").isOlderThan(v("5.0.1"))).isFalse(); assertThat(v("10.0.0").isOlderThan(v("1.0.0"))).isFalse(); assertThat(v("10.0.0").isOlderThan(v("9.0.0"))).isFalse(); }
@Bean public ShenyuLoaderService shenyuLoaderService(final ShenyuWebHandler shenyuWebHandler, final PluginDataSubscriber pluginDataSubscriber, final ShenyuConfig config) { return new ShenyuLoaderService(shenyuWebHandler, (CommonPluginDataSubscriber) pluginDataSubscriber, config); }
@Test public void testShenyuLoaderService() { applicationContextRunner.run(context -> { ShenyuLoaderService service = context.getBean("shenyuLoaderService", ShenyuLoaderService.class); assertNotNull(service); } ); }
public PrefixList(List<String> prefixList) { if (prefixList == null) { mInnerList = ImmutableList.of(); } else { mInnerList = ImmutableList.copyOf(prefixList); } }
@Test public void prefixList() { PrefixList prefixList = new PrefixList(ImmutableList.of("test", "apple", "sun")); assertTrue(prefixList.inList("test")); assertTrue(prefixList.inList("apple")); assertTrue(prefixList.inList("sun")); assertTrue(prefixList.inList("test123")); assertTrue(prefixList.inList("testing-1012")); assertTrue(prefixList.inList("apple12nmzx91l")); assertTrue(prefixList.inList("sunn1i2080-40mx")); assertFalse(prefixList.outList("test123")); assertFalse(prefixList.outList("testing-1012")); assertFalse(prefixList.outList("apple12nmzx91l")); assertFalse(prefixList.outList("sunn1i2080-40mx")); assertTrue(prefixList.outList("tes")); assertTrue(prefixList.outList("a")); assertTrue(prefixList.outList("s")); assertTrue(prefixList.outList("su")); assertTrue(prefixList.outList("ap")); assertTrue(prefixList.outList("")); assertTrue(prefixList.outList(null)); }
public static String buildGlueExpression(Map<Column, Domain> partitionPredicates) { List<String> perColumnExpressions = new ArrayList<>(); int expressionLength = 0; for (Map.Entry<Column, Domain> partitionPredicate : partitionPredicates.entrySet()) { String columnName = partitionPredicate.getKey().getName(); if (JSQL_PARSER_RESERVED_KEYWORDS.contains(columnName.toUpperCase(ENGLISH))) { // The column name is a reserved keyword in the grammar of the SQL parser used internally by Glue API continue; } Domain domain = partitionPredicate.getValue(); if (domain != null && !domain.isAll()) { Optional<String> columnExpression = buildGlueExpressionForSingleDomain(columnName, domain); if (columnExpression.isPresent()) { int newExpressionLength = expressionLength + columnExpression.get().length(); if (expressionLength > 0) { newExpressionLength += CONJUNCT_SEPARATOR.length(); } if (newExpressionLength > GLUE_EXPRESSION_CHAR_LIMIT) { continue; } perColumnExpressions.add((columnExpression.get())); expressionLength = newExpressionLength; } } } return Joiner.on(CONJUNCT_SEPARATOR).join(perColumnExpressions); }
@Test public void testSmallintConversion() { Map<Column, Domain> predicates = new PartitionFilterBuilder(HIVE_TYPE_TRANSLATOR) .addIntegerValues("col1", Long.valueOf(Short.MAX_VALUE)) .build(); String expression = buildGlueExpression(predicates); assertEquals(expression, format("((col1 = %d))", Short.MAX_VALUE)); }
@VisibleForTesting static Object convertAvroField(Object avroValue, Schema schema) { if (avroValue == null) { return null; } switch (schema.getType()) { case NULL: case INT: case LONG: case DOUBLE: case FLOAT: case BOOLEAN: return avroValue; case ENUM: case STRING: return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8 case UNION: for (Schema s : schema.getTypes()) { if (s.getType() == Schema.Type.NULL) { continue; } return convertAvroField(avroValue, s); } throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type"); case ARRAY: case BYTES: case FIXED: case RECORD: case MAP: default: throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType() + " for value field schema " + schema.getName()); } }
@Test public void testConvertAvroDouble() { Object converted = BaseJdbcAutoSchemaSink.convertAvroField(Double.MIN_VALUE, createFieldAndGetSchema((builder) -> builder.name("field").type().doubleType().noDefault())); Assert.assertEquals(converted, Double.MIN_VALUE); }
public static void setSelfEnv(Map<String, List<String>> headers) { if (headers != null) { List<String> amoryTagTmp = headers.get(Constants.AMORY_TAG); if (amoryTagTmp == null) { if (selfAmoryTag != null) { selfAmoryTag = null; LOGGER.warn("selfAmoryTag:null"); } } else { String amoryTagTmpStr = listToString(amoryTagTmp); if (!Objects.equals(amoryTagTmpStr, selfAmoryTag)) { selfAmoryTag = amoryTagTmpStr; LOGGER.warn("selfAmoryTag:{}", selfAmoryTag); } } List<String> vipserverTagTmp = headers.get(Constants.VIPSERVER_TAG); if (vipserverTagTmp == null) { if (selfVipserverTag != null) { selfVipserverTag = null; LOGGER.warn("selfVipserverTag:null"); } } else { String vipserverTagTmpStr = listToString(vipserverTagTmp); if (!Objects.equals(vipserverTagTmpStr, selfVipserverTag)) { selfVipserverTag = vipserverTagTmpStr; LOGGER.warn("selfVipserverTag:{}", selfVipserverTag); } } List<String> locationTagTmp = headers.get(Constants.LOCATION_TAG); if (locationTagTmp == null) { if (selfLocationTag != null) { selfLocationTag = null; LOGGER.warn("selfLocationTag:null"); } } else { String locationTagTmpStr = listToString(locationTagTmp); if (!Objects.equals(locationTagTmpStr, selfLocationTag)) { selfLocationTag = locationTagTmpStr; LOGGER.warn("selfLocationTag:{}", selfLocationTag); } } } }
@Test void testSetSelfEnv() { Map<String, List<String>> headers = new HashMap<>(); headers.put(Constants.AMORY_TAG, Arrays.asList("a", "1")); headers.put(Constants.VIPSERVER_TAG, Arrays.asList("b", "2")); headers.put(Constants.LOCATION_TAG, Arrays.asList("c", "3")); EnvUtil.setSelfEnv(headers); assertEquals("a,1", EnvUtil.getSelfAmoryTag()); assertEquals("b,2", EnvUtil.getSelfVipserverTag()); assertEquals("c,3", EnvUtil.getSelfLocationTag()); // reset by empty list headers.put(Constants.AMORY_TAG, Collections.emptyList()); headers.put(Constants.VIPSERVER_TAG, Collections.emptyList()); headers.put(Constants.LOCATION_TAG, Collections.emptyList()); EnvUtil.setSelfEnv(headers); assertNull(EnvUtil.getSelfAmoryTag()); assertNull(EnvUtil.getSelfVipserverTag()); assertNull(EnvUtil.getSelfLocationTag()); }
@Override public Optional<String> nodeIdToName(String nodeId) { return nodeById(nodeId) .map(jsonNode -> jsonNode.get("name").asText()); }
@Test void returnsNameForNodeId() throws Exception { mockNodesResponse(); assertThat(this.clusterAdapter.nodeIdToName(nodeId)).isNotEmpty() .contains("es02"); }
@POST @ApiOperation("Get all views that match given parameter value") @NoAuditEvent("Only returning matching views, not changing any data") public Collection<ViewParameterSummaryDTO> forParameter(@Context SearchUser searchUser) { return qualifyingViewsService.forValue() .stream() .filter(searchUser::canReadView) .collect(Collectors.toSet()); }
@Test public void returnsAllViewsIfAllArePermitted() { final SearchUser searchUser = TestSearchUser.builder() .allowView("view1") .allowView("view2") .build(); final QualifyingViewsService service = mockViewsService("view1", "view2"); final QualifyingViewsResource resource = new QualifyingViewsResource(service); final Collection<ViewParameterSummaryDTO> result = resource.forParameter(searchUser); assertThat(result) .hasSize(2) .extracting(ViewParameterSummaryDTO::id) .containsOnly("view1", "view2"); }
@Override public Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions) { return beginningOffsets(partitions, Duration.ofMillis(defaultApiTimeoutMs)); }
@Test public void testBeginningOffsetsThrowsKafkaExceptionForUnderlyingExecutionFailure() { consumer = newConsumer(); Set<TopicPartition> partitions = mockTopicPartitionOffset().keySet(); Throwable eventProcessingFailure = new KafkaException("Unexpected failure " + "processing List Offsets event"); doThrow(eventProcessingFailure).when(applicationEventHandler).addAndGet( any(ListOffsetsEvent.class)); Throwable consumerError = assertThrows(KafkaException.class, () -> consumer.beginningOffsets(partitions, Duration.ofMillis(1))); assertEquals(eventProcessingFailure, consumerError); verify(applicationEventHandler).addAndGet(ArgumentMatchers.isA(ListOffsetsEvent.class)); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendVideoNoteFile() { SendResponse response = bot.execute(new SendVideoNote(chatId, videoNoteFile).thumb(thumbFile).length(20).duration(30)); VideoNoteCheck.check(response.message().videoNote(), true); assertNotEquals("telegram should generate thumb", thumbSize, response.message().videoNote().thumb().fileSize()); response = bot.execute(new SendVideoNote(chatId, videoNoteBytes).thumb(thumbBytes)); VideoNoteCheck.check(response.message().videoNote(), true); assertNotEquals("telegram should generate thumb", thumbSize, response.message().videoNote().thumb().fileSize()); }
public FEELFnResult<String> invoke(@ParameterName("from") Object val) { if ( val == null ) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) ); } }
@Test void invokeLocalTime() { final LocalTime localTime = LocalTime.now(); FunctionTestUtil.assertResult(stringFunction.invoke(localTime), TimeFunction.FEEL_TIME.format(localTime)); }
@Override protected FederationSearcher doBuild(DeployState deployState, TreeConfigProducer<AnyConfigProducer> ancestor, Element searcherElement) { FederationSearcherModel model = new FederationSearcherModelBuilder(searcherElement).build(); Optional<Component> targetSelector = buildTargetSelector(deployState, ancestor, searcherElement, model.getComponentId()); return new FederationSearcher(model, targetSelector); }
@Test void ensureCorrectModel() { FederationSearcher searcher = new DomFederationSearcherBuilder().doBuild(root.getDeployState(), root, parse( "<federation id='theId'>", " <provides>p2</provides>", " <source-set inherits=\"default\" />", " <source id='source1'>", " <federationoptions optional='true' />", " </source>", " <source id='source2' />", "</federation>")); FederationSearcherModel model = searcher.model; assertEquals("theId", model.bundleInstantiationSpec.id.stringValue()); assertEquals(com.yahoo.search.federation.FederationSearcher.class.getName(), model.bundleInstantiationSpec.classId.stringValue()); assertEquals(2, model.targets.size()); assertTrue(model.inheritDefaultSources, "source-set option was ignored"); assertTrue(targetNames(model.targets).containsAll(List.of("source1", "source2"))); }
@Override @ManagedOperation(description = "Adds the key to the store") public boolean add(String key) { return cache.putIfAbsent(key, true); }
@Test public void addsNewKeysToCache() { assertTrue(repository.add("One")); assertTrue(repository.add("Two")); assertTrue(cache.containsKey("One")); assertTrue(cache.containsKey("Two")); }
@Override public boolean dropTable(TableIdentifier identifier, boolean purge) { if (!isValidIdentifier(identifier)) { return false; } String database = identifier.namespace().level(0); TableOperations ops = newTableOps(identifier); TableMetadata lastMetadata = null; if (purge) { try { lastMetadata = ops.current(); } catch (NotFoundException e) { LOG.warn( "Failed to load table metadata for table: {}, continuing drop without purge", identifier, e); } } try { clients.run(client -> { client.dropTable(database, identifier.name(), false /* do not delete data */, false /* throw NoSuchObjectException if the table doesn't exist */); return null; }); if (purge && lastMetadata != null) { CatalogUtil.dropTableData(ops.io(), lastMetadata); } LOG.info("Dropped table: {}", identifier); return true; } catch (NoSuchTableException | NoSuchObjectException e) { LOG.info("Skipping drop, table does not exist: {}", identifier, e); return false; } catch (TException e) { throw new RuntimeException("Failed to drop " + identifier, e); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException("Interrupted in call to dropTable", e); } }
@Test public void testCreateTableDefaultSortOrder() throws Exception { Schema schema = getTestSchema(); PartitionSpec spec = PartitionSpec.builderFor(schema).bucket("data", 16).build(); TableIdentifier tableIdent = TableIdentifier.of(DB_NAME, "tbl"); try { Table table = catalog.createTable(tableIdent, schema, spec); assertThat(table.sortOrder().orderId()).as("Order ID must match").isEqualTo(0); assertThat(table.sortOrder().isUnsorted()).as("Order must unsorted").isTrue(); assertThat(hmsTableParameters()) .as("Must not have default sort order in catalog") .doesNotContainKey(DEFAULT_SORT_ORDER); } finally { catalog.dropTable(tableIdent); } }
@Override public Writable put(K key, Writable value) { addToMap(key.getClass()); addToMap(value.getClass()); return instance.put(key, value); }
@Test @SuppressWarnings("deprecation") public void testForeignClass() { SortedMapWritable<Text> inMap = new SortedMapWritable<Text>(); inMap.put(new Text("key"), new UTF8("value")); inMap.put(new Text("key2"), new UTF8("value2")); SortedMapWritable<Text> outMap = new SortedMapWritable<Text>(inMap); SortedMapWritable<Text> copyOfCopy = new SortedMapWritable<Text>(outMap); assertEquals(1, copyOfCopy.getNewClasses()); }
public static void main(String[] args) { // An Executor that provides methods to manage termination and methods that can // produce a Future for tracking progress of one or more asynchronous tasks. ExecutorService executor = null; try { // Create a MessageQueue object. var msgQueue = new MessageQueue(); LOGGER.info("Submitting TaskGenerators and ServiceExecutor threads."); // Create three TaskGenerator threads. Each of them will submit different number of jobs. final var taskRunnable1 = new TaskGenerator(msgQueue, 5); final var taskRunnable2 = new TaskGenerator(msgQueue, 1); final var taskRunnable3 = new TaskGenerator(msgQueue, 2); // Create e service which should process the submitted jobs. final var srvRunnable = new ServiceExecutor(msgQueue); // Create a ThreadPool of 2 threads and // submit all Runnable task for execution to executor executor = Executors.newFixedThreadPool(2); executor.submit(taskRunnable1); executor.submit(taskRunnable2); executor.submit(taskRunnable3); // submitting serviceExecutor thread to the Executor service. executor.submit(srvRunnable); // Initiates an orderly shutdown. LOGGER.info("Initiating shutdown." + " Executor will shutdown only after all the Threads are completed."); executor.shutdown(); // Wait for SHUTDOWN_TIME seconds for all the threads to complete // their tasks and then shut down the executor and then exit. if (!executor.awaitTermination(SHUTDOWN_TIME, TimeUnit.SECONDS)) { LOGGER.info("Executor was shut down and Exiting."); executor.shutdownNow(); } } catch (Exception e) { LOGGER.error(e.getMessage()); } }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { try { final String resourceId = fileid.getFileId(file); final ResourceUpdateModel resourceUpdateModel = new ResourceUpdateModel(); ResourceUpdateModelUpdate resourceUpdateModelUpdate = new ResourceUpdateModelUpdate(); UiWin32 uiWin32 = new UiWin32(); uiWin32.setCreationMillis(null != status.getCreated() ? status.getCreated() : null); uiWin32.setLastModificationMillis(null != status.getModified() ? status.getModified() : null); resourceUpdateModelUpdate.setUiwin32(uiWin32); resourceUpdateModel.setUpdate(resourceUpdateModelUpdate); new UpdateResourceApi(new EueApiClient(session)).resourceResourceIdPatch(resourceId, resourceUpdateModel, null, null, null); } catch(ApiException e) { throw new EueExceptionMappingService().map("Failure to write attributes of {0}", e, file); } }
@Test public void testSetTimestampDirectory() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final Path container = new EueDirectoryFeature(session, fileid).mkdir(new Path( new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final long containerModification = new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate(); final Path folder = new EueDirectoryFeature(session, fileid).mkdir(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), null); final long folderModification = new EueAttributesFinderFeature(session, fileid).find(folder).getModificationDate(); assertNotNull(new EueAttributesFinderFeature(session, fileid).find(folder)); final long modified = Instant.now().minusSeconds(5 * 24 * 60 * 60).getEpochSecond() * 1000; new EueTimestampFeature(session, fileid).setTimestamp(folder, modified); assertEquals(modified, new EueAttributesFinderFeature(session, fileid).find(folder).getModificationDate()); // Write file to directory and see if timestamp changes final Path file = new EueTouchFeature(session, fileid).touch(new Path(folder, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(modified, new EueAttributesFinderFeature(session, fileid).find(folder).getModificationDate()); assertEquals(containerModification, new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate()); final byte[] content = RandomUtils.nextBytes(8235); final long ts = System.currentTimeMillis(); final TransferStatus status = new TransferStatus().withLength(content.length).withModified(ts); final Checksum checksum = new EueWriteFeature(session, fileid).checksum(file, status).compute(new ByteArrayInputStream(content), new TransferStatus().withLength(content.length)); status.withChecksum(checksum); final HttpResponseOutputStream<EueWriteFeature.Chunk> out = new EueWriteFeature(session, fileid).write(file, status.exists(true), new DisabledConnectionCallback()); final ByteArrayInputStream in = new ByteArrayInputStream(content); final TransferStatus progress = new TransferStatus(); final BytecountStreamListener count = new BytecountStreamListener(); new StreamCopier(new TransferStatus(), progress).withListener(count).transfer(in, out); assertEquals(containerModification, new EueAttributesFinderFeature(session, fileid).find(container).getModificationDate()); assertNotEquals(folderModification, new EueAttributesFinderFeature(session, fileid).find(folder).getModificationDate()); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static TopicConsumerConfigurationData ofTopicName(@NonNull String topicName, int priorityLevel) { return of(new TopicNameMatcher.TopicName(topicName), priorityLevel); }
@Test public void testOfFactoryMethod() { TopicConsumerConfigurationData topicConsumerConfigurationData = TopicConsumerConfigurationData .ofTopicName("foo", 1); assertThat(topicConsumerConfigurationData.getTopicNameMatcher().matches("foo")).isTrue(); assertThat(topicConsumerConfigurationData.getPriorityLevel()).isEqualTo(1); }
public static SQLException toSQLException(final Exception cause, final DatabaseType databaseType) { if (cause instanceof SQLException) { return (SQLException) cause; } if (cause instanceof ShardingSphereSQLException) { return ((ShardingSphereSQLException) cause).toSQLException(); } if (cause instanceof SQLDialectException) { if (cause instanceof DatabaseProtocolException) { return new DatabaseProtocolSQLException(cause.getMessage()).toSQLException(); } Optional<SQLDialectExceptionMapper> dialectExceptionMapper = DatabaseTypedSPILoader.findService(SQLDialectExceptionMapper.class, databaseType); if (dialectExceptionMapper.isPresent()) { return dialectExceptionMapper.get().convert((SQLDialectException) cause); } } if (cause instanceof ShardingSphereServerException) { return new ServerSQLException(cause).toSQLException(); } return new UnknownSQLException(cause).toSQLException(); }
@Test void assertToSQLExceptionWithShardingSphereSQLException() { ShardingSphereSQLException cause = mock(ShardingSphereSQLException.class); SQLException expected = new SQLException(""); when(cause.toSQLException()).thenReturn(expected); assertThat(SQLExceptionTransformEngine.toSQLException(cause, databaseType), is(expected)); }
public void publishArtifacts(List<ArtifactPlan> artifactPlans, EnvironmentVariableContext environmentVariableContext) { final File pluggableArtifactFolder = publishPluggableArtifacts(artifactPlans, environmentVariableContext); try { final List<ArtifactPlan> mergedPlans = artifactPlanFilter.getBuiltInMergedArtifactPlans(artifactPlans); if (isMetadataFolderEmpty(pluggableArtifactFolder)) { LOGGER.info("Pluggable metadata folder is empty."); } else if (pluggableArtifactFolder != null) { mergedPlans.add(0, new ArtifactPlan(ArtifactPlanType.file, format("%s%s*", pluggableArtifactFolder.getName(), File.separator), PLUGGABLE_ARTIFACT_METADATA_FOLDER)); } for (ArtifactPlan artifactPlan : mergedPlans) { try { artifactPlan.publishBuiltInArtifacts(goPublisher, workingDirectory); } catch (Exception e) { failedArtifact.add(artifactPlan); } } if (!failedArtifact.isEmpty()) { StringBuilder builder = new StringBuilder(); for (ArtifactPlan artifactPlan : failedArtifact) { artifactPlan.printArtifactInfo(builder); } throw new RuntimeException(format("[%s] Uploading finished. Failed to upload %s.", PRODUCT_NAME, builder)); } } finally { FileUtils.deleteQuietly(pluggableArtifactFolder); } }
@Test public void shouldDeletePluggableArtifactMetadataDirectory() throws Exception { TestFileUtil.createTestFile(workingFolder, "installer.zip"); TestFileUtil.createTestFile(workingFolder, "testreports.xml"); final ArtifactStore artifactStore = new ArtifactStore("s3", "cd.go.s3", create("Foo", false, "Bar")); final ArtifactStores artifactStores = new ArtifactStores(artifactStore); final ArtifactPlan artifactPlan = new ArtifactPlan(new PluggableArtifactConfig("installers", "s3", create("Baz", true, "Car"))); List<ArtifactPlan> artifactPlans = Arrays.asList( new ArtifactPlan(ArtifactPlanType.file, "installer.zip", "dist"), new ArtifactPlan(ArtifactPlanType.unit, "testreports.xml", "testreports"), artifactPlan ); when(artifactExtension.publishArtifact(eq("cd.go.s3"), eq(artifactPlan), eq(artifactStore), anyString(), eq(env))) .thenReturn(new PublishArtifactResponse(Collections.singletonMap("Foo", "Bar"))); final GoPublisher publisher = mock(GoPublisher.class); assertThat(workingFolder.list()).containsExactlyInAnyOrder("testreports.xml", "installer.zip", "cruise-output"); new ArtifactsPublisher(publisher, artifactExtension, artifactStores, registry, workingFolder) .publishArtifacts(artifactPlans, env); assertThat(workingFolder.list()).containsExactlyInAnyOrder("testreports.xml", "installer.zip", "cruise-output"); }
static Set<PipelineOptionSpec> getOptionSpecs( Class<? extends PipelineOptions> optionsInterface, boolean skipHidden) { Iterable<Method> methods = ReflectHelpers.getClosureOfMethodsOnInterface(optionsInterface); Multimap<String, Method> propsToGetters = getPropertyNamesToGetters(methods); ImmutableSet.Builder<PipelineOptionSpec> setBuilder = ImmutableSet.builder(); for (Map.Entry<String, Method> propAndGetter : propsToGetters.entries()) { String prop = propAndGetter.getKey(); Method getter = propAndGetter.getValue(); @SuppressWarnings("unchecked") Class<? extends PipelineOptions> declaringClass = (Class<? extends PipelineOptions>) getter.getDeclaringClass(); if (!PipelineOptions.class.isAssignableFrom(declaringClass)) { continue; } if (skipHidden && declaringClass.isAnnotationPresent(Hidden.class)) { continue; } setBuilder.add(PipelineOptionSpec.of(declaringClass, prop, getter)); } return setBuilder.build(); }
@Test public void testExcludesHiddenInterfaces() { Set<PipelineOptionSpec> properties = PipelineOptionsReflector.getOptionSpecs(HiddenOptions.class, true); assertThat(properties, not(hasItem(hasName("foo")))); }
@Override public CqlSession currentSession() { return cqlSession; }
@Test public void testAsyncQuery(CassandraParams params) { params.execute("create table test_table(id int, value varchar, primary key (id));\n"); params.execute("insert into test_table(id, value) values (1,'test1');\n"); record Entity(Integer id, String value) {} var qctx = new QueryContext( "SELECT id, value FROM test_table WHERE value = :value allow filtering", "SELECT id, value FROM test_table WHERE value = ? allow filtering" ); withDb(params, db -> { var result = db.query(qctx, stmt -> { var s = stmt.bind("test1"); return db.currentSession().execute(s).map(row -> { var __id = row.isNull("id") ? null : row.getInt("id"); var __value = row.getString("value"); return new Entity(__id, __value); }); }); Assertions.assertThat(result) .hasSize(1) .first() .isEqualTo(new Entity(1, "test1")); }); }
@Override public String named() { return PluginEnum.NETTY_HTTP_CLIENT.getName(); }
@Test public void testNamed() { assertEquals(PluginEnum.NETTY_HTTP_CLIENT.getName(), nettyHttpClientPlugin.named()); }
public void ensureFolder(String parentPath, String name) throws IOException, InvalidTokenException { Map<String, Object> rawFolder = new LinkedHashMap<>(); rawFolder.put("name", name); String url; try { url = getUriBuilder() .setPath(API_PATH_PREFIX + "/mounts/primary/files/folder") .setParameter("path", parentPath) .build() .toString(); } catch (URISyntaxException e) { throw new IllegalStateException("Could not produce url.", e); } Request.Builder requestBuilder = getRequestBuilder(url); requestBuilder.post( RequestBody.create( MediaType.parse("application/json"), objectMapper.writeValueAsString(rawFolder))); try (Response response = getResponse(requestBuilder)) { int code = response.code(); // 409 response code means that the folder already exists if ((code < 200 || code > 299) && code != 409) { throw new KoofrClientIOException(response); } } }
@Test public void testEnsureFolderAlreadyExists() throws Exception { server.enqueue(new MockResponse().setResponseCode(409)); client.ensureFolder("/path/to/folder", "name"); assertEquals(1, server.getRequestCount()); final RecordedRequest recordedRequest = server.takeRequest(); assertEquals("POST", recordedRequest.getMethod()); assertEquals( "/api/v2/mounts/primary/files/folder?path=%2Fpath%2Fto%2Ffolder", recordedRequest.getPath()); assertEquals("Bearer acc", recordedRequest.getHeader("Authorization")); assertEquals("2.1", recordedRequest.getHeader("X-Koofr-Version")); assertEquals( "application/json; charset=utf-8", recordedRequest.getHeader("Content-Type")); assertEquals("{\"name\":\"name\"}", recordedRequest.getBody().readUtf8()); }
@Override public long computePullFromWhereWithException(MessageQueue mq) throws MQClientException { long result = -1; final ConsumeFromWhere consumeFromWhere = this.defaultMQPushConsumerImpl.getDefaultMQPushConsumer().getConsumeFromWhere(); final OffsetStore offsetStore = this.defaultMQPushConsumerImpl.getOffsetStore(); switch (consumeFromWhere) { case CONSUME_FROM_LAST_OFFSET_AND_FROM_MIN_WHEN_BOOT_FIRST: case CONSUME_FROM_MIN_OFFSET: case CONSUME_FROM_MAX_OFFSET: case CONSUME_FROM_LAST_OFFSET: { long lastOffset = offsetStore.readOffset(mq, ReadOffsetType.READ_FROM_STORE); if (lastOffset >= 0) { result = lastOffset; } // First start,no offset else if (-1 == lastOffset) { if (mq.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) { result = 0L; } else { try { result = this.mQClientFactory.getMQAdminImpl().maxOffset(mq); } catch (MQClientException e) { log.warn("Compute consume offset from last offset exception, mq={}, exception={}", mq, e); throw e; } } } else { throw new MQClientException(ResponseCode.QUERY_NOT_FOUND, "Failed to query consume offset from " + "offset store"); } break; } case CONSUME_FROM_FIRST_OFFSET: { long lastOffset = offsetStore.readOffset(mq, ReadOffsetType.READ_FROM_STORE); if (lastOffset >= 0) { result = lastOffset; } else if (-1 == lastOffset) { //the offset will be fixed by the OFFSET_ILLEGAL process result = 0L; } else { throw new MQClientException(ResponseCode.QUERY_NOT_FOUND, "Failed to query offset from offset " + "store"); } break; } case CONSUME_FROM_TIMESTAMP: { long lastOffset = offsetStore.readOffset(mq, ReadOffsetType.READ_FROM_STORE); if (lastOffset >= 0) { result = lastOffset; } else if (-1 == lastOffset) { if (mq.getTopic().startsWith(MixAll.RETRY_GROUP_TOPIC_PREFIX)) { try { result = this.mQClientFactory.getMQAdminImpl().maxOffset(mq); } catch (MQClientException e) { log.warn("Compute consume offset from last offset exception, mq={}, exception={}", mq, e); throw e; } } else { try { long timestamp = UtilAll.parseDate(this.defaultMQPushConsumerImpl.getDefaultMQPushConsumer().getConsumeTimestamp(), UtilAll.YYYYMMDDHHMMSS).getTime(); result = this.mQClientFactory.getMQAdminImpl().searchOffset(mq, timestamp); } catch (MQClientException e) { log.warn("Compute consume offset from last offset exception, mq={}, exception={}", mq, e); throw e; } } } else { throw new MQClientException(ResponseCode.QUERY_NOT_FOUND, "Failed to query offset from offset " + "store"); } break; } default: break; } if (result < 0) { throw new MQClientException(ResponseCode.SYSTEM_ERROR, "Found unexpected result " + result); } return result; }
@Test public void testComputePullFromWhereWithException_ne_minus1() throws MQClientException { for (ConsumeFromWhere where : new ConsumeFromWhere[]{ ConsumeFromWhere.CONSUME_FROM_LAST_OFFSET, ConsumeFromWhere.CONSUME_FROM_FIRST_OFFSET, ConsumeFromWhere.CONSUME_FROM_TIMESTAMP}) { consumer.setConsumeFromWhere(where); when(offsetStore.readOffset(any(MessageQueue.class), any(ReadOffsetType.class))).thenReturn(0L); assertEquals(0, rebalanceImpl.computePullFromWhereWithException(mq)); } }
@Override public List<Intent> compile(SinglePointToMultiPointIntent intent, List<Intent> installable) { Set<Link> links = new HashSet<>(); final boolean allowMissingPaths = intentAllowsPartialFailure(intent); boolean hasPaths = false; boolean missingSomePaths = false; for (ConnectPoint egressPoint : intent.egressPoints()) { if (egressPoint.deviceId().equals(intent.ingressPoint().deviceId())) { // Do not need to look for paths, since ingress and egress // devices are the same. if (deviceService.isAvailable(egressPoint.deviceId())) { hasPaths = true; } else { missingSomePaths = true; } continue; } Path path = getPath(intent, intent.ingressPoint().deviceId(), egressPoint.deviceId()); if (path != null) { hasPaths = true; links.addAll(path.links()); } else { missingSomePaths = true; } } // Allocate bandwidth if a bandwidth constraint is set ConnectPoint ingressCP = intent.filteredIngressPoint().connectPoint(); List<ConnectPoint> egressCPs = intent.filteredEgressPoints().stream() .map(fcp -> fcp.connectPoint()) .collect(Collectors.toList()); List<ConnectPoint> pathCPs = links.stream() .flatMap(l -> Stream.of(l.src(), l.dst())) .collect(Collectors.toList()); pathCPs.add(ingressCP); pathCPs.addAll(egressCPs); allocateBandwidth(intent, pathCPs); if (!hasPaths) { throw new IntentException("Cannot find any path between ingress and egress points."); } else if (!allowMissingPaths && missingSomePaths) { throw new IntentException("Missing some paths between ingress and egress points."); } Intent result = LinkCollectionIntent.builder() .appId(intent.appId()) .key(intent.key()) .selector(intent.selector()) .treatment(intent.treatment()) .links(links) .filteredIngressPoints(ImmutableSet.of(intent.filteredIngressPoint())) .filteredEgressPoints(intent.filteredEgressPoints()) .priority(intent.priority()) .applyTreatmentOnEgress(true) .constraints(intent.constraints()) .resourceGroup(intent.resourceGroup()) .build(); return Collections.singletonList(result); }
@Test public void testNonTrivialSelectorsIntent() { FilteredConnectPoint ingress = new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1)); Set<FilteredConnectPoint> egress = ImmutableSet.of( new FilteredConnectPoint(new ConnectPoint(DID_3, PORT_1), DefaultTrafficSelector.builder().matchVlanId(VlanId.vlanId("100")).build()), new FilteredConnectPoint(new ConnectPoint(DID_4, PORT_1), DefaultTrafficSelector.builder().matchVlanId(VlanId.vlanId("200")).build()) ); TrafficSelector ipPrefixSelector = DefaultTrafficSelector.builder() .matchIPDst(IpPrefix.valueOf("192.168.100.0/24")) .build(); SinglePointToMultiPointIntent intent = makeIntent(ingress, egress, ipPrefixSelector); String[] hops = {S2}; SinglePointToMultiPointIntentCompiler compiler = makeCompiler(hops); assertThat(compiler, is(notNullValue())); List<Intent> result = compiler.compile(intent, null); assertThat(result, is(notNullValue())); assertThat(result, hasSize(1)); Intent resultIntent = result.get(0); assertThat(resultIntent, instanceOf(LinkCollectionIntent.class)); if (resultIntent instanceof LinkCollectionIntent) { LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent; assertThat(linkIntent.links(), hasSize(3)); assertThat(linkIntent.links(), linksHasPath(S1, S2)); assertThat(linkIntent.links(), linksHasPath(S2, S3)); assertThat(linkIntent.links(), linksHasPath(S2, S4)); Set<FilteredConnectPoint> ingressPoints = linkIntent.filteredIngressPoints(); assertThat("Link collection ingress points do not match base intent", ingressPoints.size() == 1 && ingressPoints.contains(intent.filteredIngressPoint())); assertThat("Link collection egress points do not match base intent", linkIntent.filteredEgressPoints().equals(intent.filteredEgressPoints())); assertThat(linkIntent.selector(), is(ipPrefixSelector)); } assertThat("key is inherited", resultIntent.key(), is(intent.key())); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testOutlineSetupOnce() { run("outline-setup-once.feature"); }
public Optional<String> findAlias(final String projectionName) { for (Projection each : projections) { if (each instanceof ShorthandProjection) { Optional<Projection> projection = ((ShorthandProjection) each).getActualColumns().stream().filter(optional -> projectionName.equalsIgnoreCase(getOriginalColumnName(optional))).findFirst(); if (projection.isPresent()) { return projection.map(Projection::getExpression); } } if (projectionName.equalsIgnoreCase(SQLUtils.getExactlyValue(each.getExpression()))) { return each.getAlias().map(IdentifierValue::getValue); } } return Optional.empty(); }
@Test void assertFindAliasWithOutAlias() { ProjectionsContext projectionsContext = new ProjectionsContext(0, 0, true, Collections.emptyList()); assertFalse(projectionsContext.findAlias("").isPresent()); }
@Override public TenantDO getTenantByWebsite(String website) { return tenantMapper.selectByWebsite(website); }
@Test public void testGetTenantByWebsite() { // mock 数据 TenantDO dbTenant = randomPojo(TenantDO.class, o -> o.setWebsite("https://www.iocoder.cn")); tenantMapper.insert(dbTenant);// @Sql: 先插入出一条存在的数据 // 调用 TenantDO result = tenantService.getTenantByWebsite("https://www.iocoder.cn"); // 校验存在 assertPojoEquals(result, dbTenant); }
public <T> T getStore(final StoreQueryParameters<T> storeQueryParameters) { final String storeName = storeQueryParameters.storeName(); final QueryableStoreType<T> queryableStoreType = storeQueryParameters.queryableStoreType(); final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType); if (!globalStore.isEmpty()) { return queryableStoreType.create(globalStoreProvider, storeName); } return queryableStoreType.create( new WrappingStoreProvider(storeProviders.values(), storeQueryParameters), storeName ); }
@Test public void shouldReturnKVStoreWithPartitionWhenItExists() { assertNotNull(storeProvider.getStore(StoreQueryParameters.fromNameAndType(keyValueStore, QueryableStoreTypes.keyValueStore()).withPartition(numStateStorePartitions - 1))); }
public long indexOf(double x, double y) { if (!rectangle.contains(x, y)) { // Put things outside the box at the end // This will also handle infinities and NaNs return Long.MAX_VALUE; } int xInt = (int) (xScale * (x - rectangle.getXMin())); int yInt = (int) (yScale * (y - rectangle.getYMin())); return discreteIndexOf(xInt, yInt); }
@Test public void testDegenerateVerticalRectangle() { HilbertIndex hilbert = new HilbertIndex(new Rectangle(0, 0, 0, 4)); assertEquals(hilbert.indexOf(0., 0.), 0); assertTrue(hilbert.indexOf(0., 1.) < hilbert.indexOf(0., 2.)); assertEquals(hilbert.indexOf(2., 0.), Long.MAX_VALUE); assertEquals(hilbert.indexOf(2., 2.), Long.MAX_VALUE); }
public T getOrDefault(final T defaultValue) { return _delegate.getOrDefault(defaultValue); }
@Test public void testGetOrDefaultWithValue() { final Promise<String> delegate = Promises.value("value"); final Promise<String> promise = new DelegatingPromise<String>(delegate); assertEquals(delegate.getOrDefault("defaulValue"), promise.getOrDefault("defaultValue")); }
public boolean isValid() throws IOException { if (contractBinary.equals(BIN_NOT_PROVIDED)) { throw new UnsupportedOperationException( "Contract binary not present in contract wrapper, " + "please generate your wrapper using -abiFile=<file>"); } if (contractAddress.equals("")) { throw new UnsupportedOperationException( "Contract binary not present, you will need to regenerate your smart " + "contract wrapper with web3j v2.2.0+"); } EthGetCode ethGetCode = transactionManager.getCode(contractAddress, DefaultBlockParameterName.LATEST); if (ethGetCode.hasError()) { return false; } String code = cleanHexPrefix(ethGetCode.getCode()); int metadataIndex = -1; for (String metadataIndicator : METADATA_HASH_INDICATORS) { metadataIndex = code.indexOf(metadataIndicator); if (metadataIndex != -1) { code = code.substring(0, metadataIndex); break; } } // There may be multiple contracts in the Solidity bytecode, hence we only check for a // match with a subset return !code.isEmpty() && contractBinary.contains(code); }
@Test public void testIsValidSkipMetadataIpfs() throws Exception { prepareEthGetCode( TEST_CONTRACT_BINARY + "a2646970667358221220" + "a9bc86938894dc250f6ea25dd823d4472fad6087edcda429a3504e3713a9fc880029"); Contract contract = deployContract(createTransactionReceipt()); assertTrue(contract.isValid()); }
public static void mergeMap(boolean decrypt, Map<String, Object> config) { merge(decrypt, config); }
@Test public void testMap_valueCastToInt() { Map<String, Object> testMap = new HashMap<>(); testMap.put("key", "${TEST.int: 1}"); CentralizedManagement.mergeMap(true, testMap); Assert.assertTrue(testMap.get("key") instanceof Integer); }
@Override public <T> @Nullable Schema schemaFor(TypeDescriptor<T> typeDescriptor) { checkForDynamicType(typeDescriptor); return ProtoSchemaTranslator.getSchema((Class<Message>) typeDescriptor.getRawType()); }
@Test public void testOptionalPrimitiveSchema() { Schema schema = new ProtoMessageSchema().schemaFor(TypeDescriptor.of(OptionalPrimitive.class)); assertEquals(OPTIONAL_PRIMITIVE_SCHEMA, schema); }
@Override public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { return this.processRequest(ctx.channel(), request, true); }
@Test public void testBatchAck_appendAck() throws RemotingCommandException { { // buffer addAk OK PopBufferMergeService popBufferMergeService = mock(PopBufferMergeService.class); when(popBufferMergeService.addAk(anyInt(), any())).thenReturn(true); when(popMessageProcessor.getPopBufferMergeService()).thenReturn(popBufferMergeService); BatchAck bAck1 = new BatchAck(); bAck1.setConsumerGroup(MixAll.DEFAULT_CONSUMER_GROUP); bAck1.setTopic(topic); bAck1.setStartOffset(MIN_OFFSET_IN_QUEUE); bAck1.setBitSet(new BitSet()); bAck1.getBitSet().set(1); bAck1.setRetry("0"); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.BATCH_ACK_MESSAGE, null); BatchAckMessageRequestBody reqBody = new BatchAckMessageRequestBody(); reqBody.setAcks(Collections.singletonList(bAck1)); request.setBody(reqBody.encode()); request.makeCustomHeaderToNet(); RemotingCommand response = ackMessageProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); } { // buffer addAk fail PopBufferMergeService popBufferMergeService = mock(PopBufferMergeService.class); when(popBufferMergeService.addAk(anyInt(), any())).thenReturn(false); when(popMessageProcessor.getPopBufferMergeService()).thenReturn(popBufferMergeService); // store putMessage OK PutMessageResult putMessageResult = new PutMessageResult(PutMessageStatus.PUT_OK, null); when(messageStore.putMessage(any())).thenReturn(putMessageResult); BatchAck bAck1 = new BatchAck(); bAck1.setConsumerGroup(MixAll.DEFAULT_CONSUMER_GROUP); bAck1.setTopic(topic); bAck1.setStartOffset(MIN_OFFSET_IN_QUEUE); bAck1.setBitSet(new BitSet()); bAck1.getBitSet().set(1); bAck1.setRetry("0"); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.BATCH_ACK_MESSAGE, null); BatchAckMessageRequestBody reqBody = new BatchAckMessageRequestBody(); reqBody.setAcks(Arrays.asList(bAck1)); request.setBody(reqBody.encode()); request.makeCustomHeaderToNet(); RemotingCommand response = ackMessageProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); } }
public void subtract(AggregatedMetricValues other) { for (Map.Entry<Short, MetricValues> entry : other.metricValues().entrySet()) { short metricId = entry.getKey(); MetricValues otherValuesForMetric = entry.getValue(); MetricValues valuesForMetric = valuesFor(metricId); if (valuesForMetric == null) { throw new IllegalStateException("Cannot subtract a values from a non-existing MetricValues"); } if (valuesForMetric.length() != otherValuesForMetric.length()) { throw new IllegalStateException("The two values arrays have different lengths " + valuesForMetric.length() + " and " + otherValuesForMetric.length()); } valuesForMetric.subtract(otherValuesForMetric); } }
@Test public void testDeduct() { Map<Short, MetricValues> valuesByMetricId = getValuesByMetricId(); AggregatedMetricValues aggregatedMetricValues = new AggregatedMetricValues(valuesByMetricId); aggregatedMetricValues.subtract(aggregatedMetricValues); for (Map.Entry<Short, MetricValues> entry : valuesByMetricId.entrySet()) { MetricValues values = entry.getValue(); for (int j = 0; j < 10; j++) { assertEquals(0, values.get(j), 0.01); } } }
@Override public long getDictDataCountByDictType(String dictType) { return dictDataMapper.selectCountByDictType(dictType); }
@Test public void testGetDictDataCountByDictType() { // mock 数据 dictDataMapper.insert(randomDictDataDO(o -> o.setDictType("yunai"))); dictDataMapper.insert(randomDictDataDO(o -> o.setDictType("tudou"))); dictDataMapper.insert(randomDictDataDO(o -> o.setDictType("yunai"))); // 准备参数 String dictType = "yunai"; // 调用 long count = dictDataService.getDictDataCountByDictType(dictType); // 校验 assertEquals(2L, count); }
public OpenConfigLogicalChannelAssignmentsHandler addAssignment( OpenConfigAssignmentHandler assignment) { modelObject.addToAssignment(assignment.getModelObject()); return this; }
@Test public void testAddAssignment() { // test Handler OpenConfigLogicalChannelAssignmentsHandler logicalChannelAssignments = new OpenConfigLogicalChannelAssignmentsHandler(parent); // call addAssignment OpenConfigAssignmentHandler assignment = new OpenConfigAssignmentHandler(2, logicalChannelAssignments); // expected ModelObject DefaultLogicalChannelAssignments modelObject = new DefaultLogicalChannelAssignments(); DefaultAssignment assign = new DefaultAssignment(); assign.index(2); modelObject.addToAssignment(assign); assertEquals("[NG]addAssignment:ModelObject(Assignment added) is not an expected one.\n", modelObject, logicalChannelAssignments.getModelObject()); }
public Optional<CloudAccount> cloudAccount() { return cloudAccount; }
@Test public void testCloudAccount() { String json = "{\"cloudAccount\": {\"id\": \"012345678912\"}}"; PrepareParams params = PrepareParams.fromJson(json.getBytes(StandardCharsets.UTF_8), TenantName.defaultName(), Duration.ZERO); assertEquals(CloudAccount.from("012345678912"), params.cloudAccount().get()); }
public void deregister() { if (StringUtils.isEmpty(RegisterContext.INSTANCE.getClientInfo().getServiceId())) { LOGGER.warning("No service to de-register for nacos client..."); return; } String serviceId = RegisterContext.INSTANCE.getClientInfo().getServiceId(); String group = nacosRegisterConfig.getGroup(); try { NamingService namingService = nacosServiceManager.getNamingService(); namingService.deregisterInstance(serviceId, group, instance); } catch (NacosException e) { LOGGER.log(Level.SEVERE, String.format(Locale.ENGLISH, "failed when deRegister service," + "serviceId={%s}", serviceId), e); } }
@Test public void testDeregister() throws NacosException { mockNamingService(); nacosClient.deregister(); Assert.assertNotNull(ReflectUtils.getFieldValue(nacosClient, "instance")); }
@JsonCreator public static SizeBasedRotationStrategyConfig create(@JsonProperty(TYPE_FIELD) String type, @JsonProperty("max_size") @Min(1) long maxSize) { return new AutoValue_SizeBasedRotationStrategyConfig(type, maxSize); }
@Test public void testCreate() throws Exception { final SizeBasedRotationStrategyConfig config = SizeBasedRotationStrategyConfig.create(1000L); assertThat(config.maxSize()).isEqualTo(1000L); }