focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static void merge(Configuration config, Configuration pythonConfiguration) { Configuration toMerge = new Configuration(pythonConfiguration); if (toMerge.contains(PythonOptions.PYTHON_FILES)) { if (config.contains(PythonOptions.PYTHON_FILES)) { config.set( PythonOptions.PYTHON_FILES, String.join( FILE_DELIMITER, toMerge.get(PythonOptions.PYTHON_FILES), config.get(PythonOptions.PYTHON_FILES))); } else { config.set(PythonOptions.PYTHON_FILES, toMerge.get(PythonOptions.PYTHON_FILES)); } toMerge.removeConfig(PythonOptions.PYTHON_FILES); } if (toMerge.contains(PythonOptions.PYTHON_ARCHIVES)) { if (config.contains(PythonOptions.PYTHON_ARCHIVES)) { config.set( PythonOptions.PYTHON_ARCHIVES, String.join( FILE_DELIMITER, toMerge.get(PythonOptions.PYTHON_ARCHIVES), config.get(PythonOptions.PYTHON_ARCHIVES))); } else { config.set( PythonOptions.PYTHON_ARCHIVES, toMerge.get(PythonOptions.PYTHON_ARCHIVES)); } toMerge.removeConfig(PythonOptions.PYTHON_ARCHIVES); } config.addAll(toMerge); }
@Test void testPythonDependencyConfigMerge() { Configuration config = new Configuration(); config.set( PythonOptions.PYTHON_ARCHIVES, "hdfs:///tmp_dir/file1.zip,hdfs:///tmp_dir/file2.zip"); config.set( PythonOptions.PYTHON_FILES, "hdfs:///tmp_dir/file3.zip,hdfs:///tmp_dir/file4.zip"); Configuration config2 = new Configuration(); config2.set( PythonOptions.PYTHON_ARCHIVES, "hdfs:///tmp_dir/file5.zip,hdfs:///tmp_dir/file6.zip"); config2.set( PythonOptions.PYTHON_FILES, "hdfs:///tmp_dir/file7.zip,hdfs:///tmp_dir/file8.zip"); Configuration expectedConfiguration = new Configuration(); expectedConfiguration.set( PythonOptions.PYTHON_ARCHIVES, "hdfs:///tmp_dir/file5.zip,hdfs:///tmp_dir/file6.zip,hdfs:///tmp_dir/file1.zip,hdfs:///tmp_dir/file2.zip"); expectedConfiguration.set( PythonOptions.PYTHON_FILES, "hdfs:///tmp_dir/file7.zip,hdfs:///tmp_dir/file8.zip,hdfs:///tmp_dir/file3.zip,hdfs:///tmp_dir/file4.zip"); merge(config, config2); verifyConfiguration(expectedConfiguration, config); }
public String getConfigForDisplay() { String pluginId = getPluginId(); SCMMetadataStore metadataStore = SCMMetadataStore.getInstance(); List<ConfigurationProperty> propertiesToBeUsedForDisplay = ConfigurationDisplayUtil.getConfigurationPropertiesToBeUsedForDisplay(metadataStore, pluginId, configuration); String prefix = metadataStore.hasPlugin(pluginId) ? "" : "WARNING! Plugin missing. "; return prefix + configuration.forDisplay(propertiesToBeUsedForDisplay); }
@Test void shouldNotDisplayEmptyValuesInGetConfigForDisplay() { SCMMetadataStore.getInstance().addMetadataFor("plugin-id", new SCMConfigurations(), null); Configuration configuration = new Configuration(create("rk1", false, ""), create("rk2", false, "some-non-empty-value"), create("rk3", false, null)); SCM scm = SCMMother.create("scm", "scm-name", "plugin-id", "1.0", configuration); assertThat(scm.getConfigForDisplay()).isEqualTo("[rk2=some-non-empty-value]"); }
@Deprecated public JobTracker.State getJobTrackerState() { return JobTracker.State.RUNNING; }
@SuppressWarnings("deprecation") @Test (timeout = 10000) public void testJobTrackerState() { Assert.assertEquals(JobTracker.State.RUNNING, clusterStatus.getJobTrackerState()); }
public static FromEndOfWindow pastEndOfWindow() { return new FromEndOfWindow(); }
@Test public void testTimerForEndOfWindow() throws Exception { tester = TriggerStateMachineTester.forTrigger( AfterWatermarkStateMachine.pastEndOfWindow(), FixedWindows.of(Duration.millis(100))); assertThat(tester.getNextTimer(TimeDomain.EVENT_TIME), nullValue()); injectElements(1); IntervalWindow window = new IntervalWindow(new Instant(0), new Instant(100)); assertThat(tester.getNextTimer(TimeDomain.EVENT_TIME), equalTo(window.maxTimestamp())); }
public static RowRanges calculateRowRanges( FilterCompat.Filter filter, ColumnIndexStore columnIndexStore, Set<ColumnPath> paths, long rowCount) { return filter.accept(new FilterCompat.Visitor<RowRanges>() { @Override public RowRanges visit(FilterPredicateCompat filterPredicateCompat) { try { return filterPredicateCompat .getFilterPredicate() .accept(new ColumnIndexFilter(columnIndexStore, paths, rowCount)); } catch (MissingOffsetIndexException e) { LOGGER.info(e.getMessage()); return RowRanges.createSingle(rowCount); } } @Override public RowRanges visit(UnboundRecordFilterCompat unboundRecordFilterCompat) { return RowRanges.createSingle(rowCount); } @Override public RowRanges visit(NoOpFilter noOpFilter) { return RowRanges.createSingle(rowCount); } }); }
@Test public void testFiltering() { Set<ColumnPath> paths = paths("column1", "column2", "column3", "column4", "column6"); assertAllRows( calculateRowRanges( FilterCompat.get(userDefined(intColumn("column1"), AnyInt.class)), STORE, paths, TOTAL_ROW_COUNT), TOTAL_ROW_COUNT); assertRows( calculateRowRanges( FilterCompat.get(contains(eq(intColumn("column6"), 7))), STORE, paths, TOTAL_ROW_COUNT), 7, 8, 9, 10, 11, 12, 13); assertRows( calculateRowRanges( FilterCompat.get( and(contains(eq(intColumn("column6"), 7)), contains(eq(intColumn("column6"), 10)))), STORE, paths, TOTAL_ROW_COUNT), 9, 10, 11, 12, 13); assertRows( calculateRowRanges( FilterCompat.get( or(contains(eq(intColumn("column6"), 7)), contains(eq(intColumn("column6"), 20)))), STORE, paths, TOTAL_ROW_COUNT), 7, 8, 9, 10, 11, 12, 13, 21, 22, 23, 24, 25, 26); Set<Integer> set1 = new HashSet<>(); set1.add(7); assertRows( calculateRowRanges(FilterCompat.get(in(intColumn("column1"), set1)), STORE, paths, TOTAL_ROW_COUNT), 7, 8, 9, 10, 11, 12, 13); set1.add(1); assertRows( calculateRowRanges(FilterCompat.get(in(intColumn("column1"), set1)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13); assertRows( calculateRowRanges(FilterCompat.get(notIn(intColumn("column1"), set1)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); Set<Binary> set2 = new HashSet<>(); set2.add(fromString("Zulu")); set2.add(fromString("Alfa")); assertRows( calculateRowRanges(FilterCompat.get(in(binaryColumn("column2"), set2)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); assertRows( calculateRowRanges( FilterCompat.get(notIn(binaryColumn("column2"), set2)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); Set<Double> set3 = new HashSet<>(); set3.add(2.03); assertRows( calculateRowRanges(FilterCompat.get(in(doubleColumn("column3"), set3)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 16, 17, 18, 19, 20, 21, 22); assertRows( calculateRowRanges( FilterCompat.get(notIn(doubleColumn("column3"), set3)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); set3.add(9.98); assertRows( calculateRowRanges(FilterCompat.get(in(doubleColumn("column3"), set3)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25); assertRows( calculateRowRanges( FilterCompat.get(notIn(doubleColumn("column3"), set3)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); set3.add(null); assertRows( calculateRowRanges(FilterCompat.get(in(doubleColumn("column3"), set3)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); assertRows( calculateRowRanges( FilterCompat.get(notIn(doubleColumn("column3"), set3)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); Set<Boolean> set4 = new HashSet<>(); set4.add(null); assertRows( calculateRowRanges(FilterCompat.get(in(booleanColumn("column4"), set4)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); // no column index, can't filter this row assertRows( calculateRowRanges( FilterCompat.get(notIn(booleanColumn("column4"), set4)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); Set<Integer> set5 = new HashSet<>(); set5.add(7); set5.add(20); assertRows( calculateRowRanges(FilterCompat.get(in(intColumn("column5"), set5)), STORE, paths, TOTAL_ROW_COUNT), new long[0]); assertRows( calculateRowRanges(FilterCompat.get(notIn(intColumn("column5"), set5)), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29); assertRows( calculateRowRanges( FilterCompat.get(and( and(eq(intColumn("column1"), null), eq(binaryColumn("column2"), null)), and(eq(doubleColumn("column3"), null), eq(booleanColumn("column4"), null)))), STORE, paths, TOTAL_ROW_COUNT), 6, 9); assertRows( calculateRowRanges( FilterCompat.get(and( and(notEq(intColumn("column1"), null), notEq(binaryColumn("column2"), null)), and(notEq(doubleColumn("column3"), null), notEq(booleanColumn("column4"), null)))), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25); assertRows( calculateRowRanges( FilterCompat.get(or( and(lt(intColumn("column1"), 20), gtEq(binaryColumn("column2"), fromString("Quebec"))), and( gt(doubleColumn("column3"), 5.32), ltEq(binaryColumn("column4"), fromString("XYZ"))))), STORE, paths, TOTAL_ROW_COUNT), 0, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 23, 24, 25); assertRows( calculateRowRanges( FilterCompat.get(and( and(gtEq(intColumn("column1"), 7), gt(binaryColumn("column2"), fromString("India"))), and(eq(doubleColumn("column3"), null), notEq(binaryColumn("column4"), null)))), STORE, paths, TOTAL_ROW_COUNT), 7, 16, 17, 18, 19, 20); assertRows( calculateRowRanges( FilterCompat.get(and( or( invert(userDefined(intColumn("column1"), AnyInt.class)), eq(binaryColumn("column2"), fromString("Echo"))), eq(doubleColumn("column3"), 6.0))), STORE, paths, TOTAL_ROW_COUNT), 23, 24, 25); assertRows( calculateRowRanges( FilterCompat.get(and( userDefined(intColumn("column1"), IntegerIsDivisableWith3.class), and( userDefined(binaryColumn("column2"), BinaryUtf8StartsWithB.class), userDefined(doubleColumn("column3"), DoubleIsInteger.class)))), STORE, paths, TOTAL_ROW_COUNT), 21, 22, 23, 24, 25); assertRows( calculateRowRanges( FilterCompat.get(and( and(gtEq(intColumn("column1"), 7), lt(intColumn("column1"), 11)), and( gt(binaryColumn("column2"), fromString("Romeo")), ltEq(binaryColumn("column2"), fromString("Tango"))))), STORE, paths, TOTAL_ROW_COUNT), 7, 11, 12, 13); }
@Override public List<String> listDbNames() { try (Connection connection = getConnection()) { return Lists.newArrayList(schemaResolver.listSchemas(connection)); } catch (SQLException e) { throw new StarRocksConnectorException("list db names for JDBC catalog fail!", e); } }
@Test public void testListDatabaseNames() { try { JDBCMetadata jdbcMetadata = new JDBCMetadata(properties, "catalog", dataSource); dbResult.beforeFirst(); List<String> result = jdbcMetadata.listDbNames(); List<String> expectResult = Lists.newArrayList("test"); Assert.assertEquals(expectResult, result); } catch (Exception e) { Assert.fail(); } }
static Schema getSchema(Class<? extends Message> clazz) { return getSchema(ProtobufUtil.getDescriptorForClass(clazz)); }
@Test public void testPrimitiveSchema() { assertEquals( TestProtoSchemas.PRIMITIVE_SCHEMA, ProtoSchemaTranslator.getSchema(Proto3SchemaMessages.Primitive.class)); }
public static String getVersionDesc(int value) { int length = Version.values().length; if (value >= length) { return Version.values()[length - 1].name(); } return Version.values()[value].name(); }
@Test public void testGetVersionDesc_higherVersion() throws Exception { String desc = "HIGHER_VERSION"; assertThat(MQVersion.getVersionDesc(Integer.MAX_VALUE)).isEqualTo(desc); }
public static boolean isNormalizedPathOutsideWorkingDir(String path) { final String normalize = FilenameUtils.normalize(path); final String prefix = FilenameUtils.getPrefix(normalize); return (normalize != null && StringUtils.isBlank(prefix)); }
@Test public void shouldReturnFalseIfGivenFolderIsAbsolute() { assertThat(FilenameUtil.isNormalizedPathOutsideWorkingDir("c:\\foo"), is(false)); }
@Override protected void initializeExtraResults(MapResultData parentResult) { parentResult.setResult(RESULT_CTX_GRANULARITY, new ValueResultData(getGranularity())); parentResult.setResult(RESULT_Y_AXIS, new ValueResultData(getYAxis())); parentResult.setResult(RESULT_X_AXIS, new ValueResultData(getXAxis())); parentResult.setResult(RESULT_SAMPLE_VARIABLE_NAME, new ValueResultData(getSampleVariableName())); parentResult.setResult(RESULT_CONTENT_MESSAGE, new ValueResultData(getContentMessage())); }
@Test public void testInitializeExtraResults() { customGraphConsumer.initializeExtraResults(resultData); JsonizerVisitor jsonizer = new JsonizerVisitor(); for (Map.Entry<String, ResultData> entrySet : resultData.entrySet()) { Object testedValue = entrySet.getValue().accept(jsonizer); String key = entrySet.getKey(); if (key.equals("granularity")) { assertThat(testedValue, equalTo("60000")); } else if (key.equals("X_Axis")) { assertThat(testedValue, equalTo("\"X axis name\"")); } else if (key.equals("Y_Axis")) { assertThat(testedValue, equalTo("\"Y axis name\"")); } else if (key.equals("sample_Metric_Name")) { assertThat(testedValue, equalTo("\"ulp_lag_ratio\"")); } else if (key.equals("content_Message")) { assertThat(testedValue, equalTo("\"content message\"")); } } }
@CanIgnoreReturnValue public final Ordered containsAtLeastElementsIn(@Nullable Iterable<?> expectedIterable) { List<?> actual = Lists.newLinkedList(checkNotNull(this.actual)); Collection<?> expected = iterableToCollection(expectedIterable); List<@Nullable Object> missing = newArrayList(); List<@Nullable Object> actualNotInOrder = newArrayList(); boolean ordered = true; // step through the expected elements... for (Object e : expected) { int index = actual.indexOf(e); if (index != -1) { // if we find the element in the actual list... // drain all the elements that come before that element into actualNotInOrder moveElements(actual, actualNotInOrder, index); // and remove the element from the actual list actual.remove(0); } else { // otherwise try removing it from actualNotInOrder... if (actualNotInOrder.remove(e)) { // if it was in actualNotInOrder, we're not in order ordered = false; } else { // if it's not in actualNotInOrder, we're missing an expected element missing.add(e); } } } // if we have any missing expected elements, fail if (!missing.isEmpty()) { return failAtLeast(expected, missing); } return ordered ? IN_ORDER : new Ordered() { @Override public void inOrder() { ImmutableList.Builder<Fact> facts = ImmutableList.builder(); facts.add(simpleFact("required elements were all found, but order was wrong")); facts.add(fact("expected order for required elements", expected)); List<Object> actualOrder = Lists.newArrayList(checkNotNull(IterableSubject.this.actual)); if (actualOrder.retainAll(expected)) { facts.add(fact("but order was", actualOrder)); facts.add(fullContents()); failWithoutActual(facts.build()); } else { failWithActual(facts.build()); } } }; }
@Test public void iterableContainsAtLeastElementsInArray() { assertThat(asList(1, 2, 3)).containsAtLeastElementsIn(new Integer[] {1, 2}); expectFailureWhenTestingThat(asList(1, 2, 3)) .containsAtLeastElementsIn(new Integer[] {1, 2, 4}); assertFailureKeys("missing (1)", "---", "expected to contain at least", "but was"); assertFailureValue("missing (1)", "4"); assertFailureValue("expected to contain at least", "[1, 2, 4]"); }
@Operation(summary = "Gets the status of ongoing database migrations, if any", description = "Return the detailed status of ongoing database migrations" + " including starting date. If no migration is ongoing or needed it is still possible to call this endpoint and receive appropriate information.") @GetMapping public DatabaseMigrationsResponse getStatus() { Optional<Long> currentVersion = databaseVersion.getVersion(); checkState(currentVersion.isPresent(), NO_CONNECTION_TO_DB); DatabaseVersion.Status status = databaseVersion.getStatus(); if (status == DatabaseVersion.Status.UP_TO_DATE || status == DatabaseVersion.Status.REQUIRES_DOWNGRADE) { return new DatabaseMigrationsResponse(databaseMigrationState); } else if (!database.getDialect().supportsMigration()) { return new DatabaseMigrationsResponse(DatabaseMigrationState.Status.STATUS_NOT_SUPPORTED); } else { return switch (databaseMigrationState.getStatus()) { case RUNNING, FAILED, SUCCEEDED -> new DatabaseMigrationsResponse(databaseMigrationState); case NONE -> new DatabaseMigrationsResponse(DatabaseMigrationState.Status.MIGRATION_REQUIRED); default -> throw new IllegalArgumentException(UNSUPPORTED_DATABASE_MIGRATION_STATUS); }; } }
@Test void getStatus_whenDbMigrationsRunning_returnRunning() throws Exception { when(databaseVersion.getStatus()).thenReturn(DatabaseVersion.Status.REQUIRES_UPGRADE); when(dialect.supportsMigration()).thenReturn(true); when(migrationState.getStatus()).thenReturn(RUNNING); when(migrationState.getStartedAt()).thenReturn(Optional.of(SOME_DATE)); when(migrationState.getExpectedFinishDate(any())).thenReturn(Optional.of(SOME_DATE)); when(migrationState.getCompletedMigrations()).thenReturn(1); when(migrationState.getTotalMigrations()).thenReturn(10); mockMvc.perform(get(DATABASE_MIGRATIONS_ENDPOINT)).andExpectAll(status().isOk(), content().json("{\"status\":\"MIGRATION_RUNNING\",\"completedSteps\":1,\"totalSteps\":10," + "\"message\":\"Database migration is running.\",\"expectedFinishTimestamp\":\"" + SOME_DATE_STRING + "\"}")); }
public static Schema toArrowSchema(RowType rowType) { Collection<Field> fields = rowType.getFields().stream() .map(f -> ArrowUtils.toArrowField(f.getName(), f.getType())) .collect(Collectors.toCollection(ArrayList::new)); return new Schema(fields); }
@Test void testConvertBetweenLogicalTypeAndArrowType() { Schema schema = ArrowUtils.toArrowSchema(rowType); assertThat(schema.getFields()).hasSize(testFields.size()); List<Field> fields = schema.getFields(); for (int i = 0; i < schema.getFields().size(); i++) { // verify convert from RowType to ArrowType assertThat(fields.get(i).getName()).isEqualTo(testFields.get(i).f0); assertThat(fields.get(i).getType()).isEqualTo(testFields.get(i).f2); } }
public static <T> T notNull(T obj, String name) { if (obj == null) { throw new IllegalArgumentException(name + " cannot be null"); } return obj; }
@Test(expected = IllegalArgumentException.class) public void notNullNull() { Check.notNull(null, "name"); }
public static UserGroupInformation getUGI(HttpServletRequest request, Configuration conf) throws IOException { return getUGI(null, request, conf); }
@Test public void testGetNonProxyUgi() throws IOException { conf.set(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "hdfs://localhost:4321/"); ServletContext context = mock(ServletContext.class); String realUser = "TheDoctor"; String user = "TheNurse"; conf.set(DFSConfigKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos"); UserGroupInformation.setConfiguration(conf); UserGroupInformation ugi; HttpServletRequest request; // have to be auth-ed with remote user request = getMockRequest(null, null, null); try { JspHelper.getUGI(context, request, conf); Assert.fail("bad request allowed"); } catch (IOException ioe) { Assert.assertEquals( "Security enabled but user not authenticated by filter", ioe.getMessage()); } request = getMockRequest(null, realUser, null); try { JspHelper.getUGI(context, request, conf); Assert.fail("bad request allowed"); } catch (IOException ioe) { Assert.assertEquals( "Security enabled but user not authenticated by filter", ioe.getMessage()); } // ugi for remote user request = getMockRequest(realUser, null, null); ugi = JspHelper.getUGI(context, request, conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(), realUser); checkUgiFromAuth(ugi); // ugi for remote user = real user request = getMockRequest(realUser, realUser, null); ugi = JspHelper.getUGI(context, request, conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(), realUser); checkUgiFromAuth(ugi); // if there is remote user via SPNEGO, ignore user.name param request = getMockRequest(realUser, user, null); ugi = JspHelper.getUGI(context, request, conf); Assert.assertNull(ugi.getRealUser()); Assert.assertEquals(ugi.getShortUserName(), realUser); checkUgiFromAuth(ugi); }
@Override public Future<RestResponse> restRequest(RestRequest request) { return restRequest(request, new RequestContext()); }
@Test(dataProvider = "isD2Async") public void testStatsConsumerRemoveOne(boolean isD2Async) throws Exception { AtomicReference<ServiceProperties> serviceProperties = new AtomicReference<>(); TestBackupRequestsStrategyStatsConsumer statsConsumer = new TestBackupRequestsStrategyStatsConsumer(); serviceProperties.set(createServiceProperties(null)); BackupRequestsClient client = createClient(serviceProperties::get, statsConsumer, isD2Async); URI uri = URI.create("d2://testService"); RestRequest restRequest = new RestRequestBuilder(uri).setEntity(CONTENT).build(); RequestContext requestContext = new RequestContext(); requestContext.putLocalAttr(R2Constants.OPERATION, "get"); Future<RestResponse> response = client.restRequest(restRequest, requestContext); assertEquals(response.get().getStatus(), 200); List<StatsConsumerEvent> events = statsConsumer.getEvents(); assertEquals(events.size(), 0); serviceProperties .set(createServiceProperties(Arrays.asList(createBackupRequestsConfiguration(5, "get"), createBackupRequestsConfiguration(1, "batch_get")))); requestContext = new RequestContext(); requestContext.putLocalAttr(R2Constants.OPERATION, "get"); response = client.restRequest(restRequest, requestContext); assertEquals(response.get().getStatus(), 200); events = statsConsumer.getEvents(); assertEquals(events.size(), 2); assertEquals(events.get(0).isEventAdd(), true); assertEquals(events.get(0).getService(), SERVICE_NAME); assertEquals(events.get(0).getOperation(), "get"); BackupRequestsStrategyStatsProvider statsProvider1 = events.get(0).getStatsProvider(); assertNotNull(statsProvider1); assertEquals(events.get(1).isEventAdd(), true); assertEquals(events.get(1).getService(), SERVICE_NAME); assertEquals(events.get(1).getOperation(), "batch_get"); BackupRequestsStrategyStatsProvider statsProvider2 = events.get(1).getStatsProvider(); assertNotNull(statsProvider2); serviceProperties .set(createServiceProperties(Arrays.asList(createBackupRequestsConfiguration(5, "get")))); requestContext = new RequestContext(); requestContext.putLocalAttr(R2Constants.OPERATION, "get"); response = client.restRequest(restRequest, requestContext); assertEquals(response.get().getStatus(), 200); events = statsConsumer.getEvents(); assertEquals(events.size(), 3); assertEquals(events.get(2).isEventAdd(), false); assertEquals(events.get(2).getService(), SERVICE_NAME); assertEquals(events.get(2).getOperation(), "batch_get"); BackupRequestsStrategyStatsProvider removedStatsProvider = events.get(2).getStatsProvider(); assertNotNull(removedStatsProvider); assertSame(statsProvider2, removedStatsProvider); }
public static KeyFormat sanitizeKeyFormat( final KeyFormat keyFormat, final List<SqlType> newKeyColumnSqlTypes, final boolean allowKeyFormatChangeToSupportNewKeySchema ) { return sanitizeKeyFormatWrapping( !allowKeyFormatChangeToSupportNewKeySchema ? keyFormat : sanitizeKeyFormatForTypeCompatibility( sanitizeKeyFormatForMultipleColumns( keyFormat, newKeyColumnSqlTypes.size()), newKeyColumnSqlTypes ), newKeyColumnSqlTypes.size() == 1 ); }
@Test public void shouldLeaveApplicableKeyWrappingUnchangedWhenSanitizing() { // Given: final KeyFormat format = KeyFormat.nonWindowed( FormatInfo.of(JsonFormat.NAME), SerdeFeatures.of(SerdeFeature.WRAP_SINGLES)); // When: final KeyFormat sanitized = SerdeFeaturesFactory.sanitizeKeyFormat(format, SINGLE_SQL_TYPE, true); // Then: assertThat(sanitized.getFormatInfo(), equalTo(FormatInfo.of(JsonFormat.NAME))); assertThat(sanitized.getFeatures(), equalTo(SerdeFeatures.of(SerdeFeature.WRAP_SINGLES))); }
protected static int getConfigInt(String name, int defaultVal, int minVal) { if (cacheMap.containsKey(name)) { return (int)cacheMap.get(name); } int val = NumberUtils.toInt(getConfig(name)); if (val == 0) { val = defaultVal; } else if (val < minVal) { val = minVal; } cacheMap.put(name, val); return val; }
@Test public void testGetConfigInt() { // clear cache DashboardConfig.clearCache(); // default value assertEquals(0, DashboardConfig.getConfigInt("t", 0, 10)); DashboardConfig.clearCache(); assertEquals(1, DashboardConfig.getConfigInt("t", 1, 10)); // property, wrong format System.setProperty("t", "asdf"); DashboardConfig.clearCache(); assertEquals(0, DashboardConfig.getConfigInt("t", 0, 10)); System.setProperty("t", ""); DashboardConfig.clearCache(); assertEquals(0, DashboardConfig.getConfigInt("t", 0, 10)); // min value System.setProperty("t", "2"); DashboardConfig.clearCache(); assertEquals(2, DashboardConfig.getConfigInt("t", 0, 1)); DashboardConfig.clearCache(); assertEquals(10, DashboardConfig.getConfigInt("t", 0, 10)); DashboardConfig.clearCache(); assertEquals(2, DashboardConfig.getConfigInt("t", 0, -1)); // env environmentVariables.set("t", "20"); DashboardConfig.clearCache(); assertEquals(20, DashboardConfig.getConfigInt("t", 0, 10)); // wrong format env var, but it will override property environmentVariables.set("t", "20dddd"); DashboardConfig.clearCache(); assertEquals(0, DashboardConfig.getConfigInt("t", 0, 10)); // clear env, it will take property environmentVariables.set("t", ""); DashboardConfig.clearCache(); assertEquals(10, DashboardConfig.getConfigInt("t", 0, 10)); DashboardConfig.clearCache(); assertEquals(2, DashboardConfig.getConfigInt("t", 0, 1)); // enable cache System.setProperty("t", "666"); DashboardConfig.clearCache(); assertEquals(666, DashboardConfig.getConfigInt("t", 0, 1)); System.setProperty("t", "777"); assertEquals(666, DashboardConfig.getConfigInt("t", 0, 1)); System.setProperty("t", "555"); assertEquals(666, DashboardConfig.getConfigInt("t", 0, 1)); }
public void setCell(final int columnIndex, final Object value) { Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.length + 1); data[columnIndex - 1] = value; }
@Test void assertSetCellWithColumnIndexOutOfRange() { assertThrows(IllegalArgumentException.class, () -> memoryResultSetRow.setCell(2, "new")); }
@Override public JobLogDO getJobLog(Long id) { return jobLogMapper.selectById(id); }
@Test public void testGetJobLog() { // mock 数据 JobLogDO dbJobLog = randomPojo(JobLogDO.class, o -> o.setExecuteIndex(1)); jobLogMapper.insert(dbJobLog); // 准备参数 Long id = dbJobLog.getId(); // 调用 JobLogDO jobLog = jobLogService.getJobLog(id); // 断言 assertPojoEquals(dbJobLog, jobLog); }
@Override public BackgroundException map(final String message, final ApiException failure, final Path file) { if(null != failure.getResponseBody()) { try { final JsonObject json = JsonParser.parseReader(new StringReader(failure.getResponseBody())).getAsJsonObject(); if(json.has("errorCode")) { if(json.get("errorCode").isJsonPrimitive()) { final int errorCode = json.getAsJsonPrimitive("errorCode").getAsInt(); switch(failure.getCode()) { case HttpStatus.SC_NOT_FOUND: switch(errorCode) { // "debugInfo":"Node not found","errorCode":-41000 case -41000: // "debugInfo":"File not found","errorCode":-40751 case -40751: // Invalidate cache on Node not found fileid.cache(file, null); break; } break; } } } } catch(JsonParseException e) { // Ignore } } return super.map(message, failure, file); }
@Test public void testMap() { final BackgroundException e = new SDSExceptionMappingService(new SDSNodeIdProvider( new SDSSession(new Host(new SDSProtocol()), new DisabledX509TrustManager(), new DefaultX509KeyManager()) )).map(new ApiException("m", 403, Collections.emptyMap(), "{\"errorCode\" = -40761}")); assertEquals("Error -40761. Please contact your web hosting service provider for assistance.", e.getDetail()); }
@Override public CompletableFuture<SendPushNotificationResult> sendNotification(PushNotification pushNotification) { Message.Builder builder = Message.builder() .setToken(pushNotification.deviceToken()) .setAndroidConfig(AndroidConfig.builder() .setPriority(pushNotification.urgent() ? AndroidConfig.Priority.HIGH : AndroidConfig.Priority.NORMAL) .build()); final String key = switch (pushNotification.notificationType()) { case NOTIFICATION -> "newMessageAlert"; case ATTEMPT_LOGIN_NOTIFICATION_HIGH_PRIORITY -> "attemptLoginContext"; case CHALLENGE -> "challenge"; case RATE_LIMIT_CHALLENGE -> "rateLimitChallenge"; }; builder.putData(key, pushNotification.data() != null ? pushNotification.data() : ""); final Timer.Sample sample = Timer.start(); return GoogleApiUtil.toCompletableFuture(firebaseMessagingClient.sendAsync(builder.build()), executor) .whenComplete((ignored, throwable) -> sample.stop(SEND_NOTIFICATION_TIMER)) .thenApply(ignored -> new SendPushNotificationResult(true, Optional.empty(), false, Optional.empty())) .exceptionally(throwable -> { if (ExceptionUtils.unwrap(throwable) instanceof final FirebaseMessagingException firebaseMessagingException) { final String errorCode; if (firebaseMessagingException.getMessagingErrorCode() != null) { errorCode = firebaseMessagingException.getMessagingErrorCode().name(); } else { logger.warn("Received an FCM exception with no error code", firebaseMessagingException); errorCode = "unknown"; } final boolean unregistered = firebaseMessagingException.getMessagingErrorCode() == MessagingErrorCode.UNREGISTERED; return new SendPushNotificationResult(false, Optional.of(errorCode), unregistered, Optional.empty()); } else { throw ExceptionUtils.wrap(throwable); } }); }
@Test void testSendMessageRejected() { final PushNotification pushNotification = new PushNotification("foo", PushNotification.TokenType.FCM, PushNotification.NotificationType.NOTIFICATION, null, null, null, true); final FirebaseMessagingException invalidArgumentException = mock(FirebaseMessagingException.class); when(invalidArgumentException.getMessagingErrorCode()).thenReturn(MessagingErrorCode.INVALID_ARGUMENT); final SettableApiFuture<String> sendFuture = SettableApiFuture.create(); sendFuture.setException(invalidArgumentException); when(firebaseMessaging.sendAsync(any())).thenReturn(sendFuture); final SendPushNotificationResult result = fcmSender.sendNotification(pushNotification).join(); verify(firebaseMessaging).sendAsync(any(Message.class)); assertFalse(result.accepted()); assertEquals(Optional.of("INVALID_ARGUMENT"), result.errorCode()); assertFalse(result.unregistered()); }
public static Double toDouble(Object value, Double defaultValue) { return convertQuietly(Double.class, value, defaultValue); }
@Test public void floatToDoubleTest() { final float a = 0.45f; final double b = Convert.toDouble(a); assertEquals(0.45D, b, 0); }
@Override public void refreshLoadedJobCache() throws IOException { UserGroupInformation user = checkAcls("refreshLoadedJobCache"); try { jobHistoryService.refreshLoadedJobCache(); } catch (UnsupportedOperationException e) { HSAuditLogger.logFailure(user.getShortUserName(), "refreshLoadedJobCache", adminAcl.toString(), HISTORY_ADMIN_SERVER, e.getMessage()); throw e; } HSAuditLogger.logSuccess(user.getShortUserName(), "refreshLoadedJobCache", HISTORY_ADMIN_SERVER); }
@Test public void testRefreshLoadedJobCache() throws Exception { String[] args = new String[1]; args[0] = "-refreshLoadedJobCache"; hsAdminClient.run(args); verify(jobHistoryService).refreshLoadedJobCache(); }
@VisibleForTesting static String toSnakeCase(String name) { return name.replaceAll("(.)([A-Z][a-z])", "$1_$2").toLowerCase(); }
@Test void testSnakeCaseConversion() { assertThat(ConfigOptionsDocGenerator.toSnakeCase("RocksOptions")) .isEqualTo("rocks_options"); assertThat(ConfigOptionsDocGenerator.toSnakeCase("RocksDBOptions")) .isEqualTo("rocksdb_options"); assertThat(ConfigOptionsDocGenerator.toSnakeCase("DBOptions")).isEqualTo("db_options"); }
public void deleteAccessConfig(final String addr, final String accessKey, final long timeoutMillis) throws RemotingException, InterruptedException, MQClientException { DeleteAccessConfigRequestHeader requestHeader = new DeleteAccessConfigRequestHeader(); requestHeader.setAccessKey(accessKey); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.DELETE_ACL_CONFIG, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { return; } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void testDeleteAccessConfig_Exception() throws InterruptedException, RemotingException { doAnswer(mock -> { RemotingCommand request = mock.getArgument(1); return createErrorResponse4DeleteAclConfig(request); }).when(remotingClient).invokeSync(anyString(), any(RemotingCommand.class), anyLong()); try { mqClientAPI.deleteAccessConfig(brokerAddr, "11111", 3 * 1000); } catch (MQClientException ex) { assertThat(ex.getResponseCode()).isEqualTo(210); assertThat(ex.getErrorMessage()).isEqualTo("corresponding to accessConfig has been deleted failed"); } }
@ProcessElement public void processElement(OutputReceiver<InitialPipelineState> receiver) throws IOException { LOG.info(daoFactory.getStreamTableDebugString()); LOG.info(daoFactory.getMetadataTableDebugString()); LOG.info("ChangeStreamName: " + daoFactory.getChangeStreamName()); boolean resume = false; DetectNewPartitionsState detectNewPartitionsState = daoFactory.getMetadataTableDao().readDetectNewPartitionsState(); switch (existingPipelineOptions) { case RESUME_OR_NEW: // perform resumption. if (detectNewPartitionsState != null) { resume = true; startTime = detectNewPartitionsState.getWatermark(); LOG.info("Resuming from previous pipeline with low watermark of {}", startTime); } else { LOG.info( "Attempted to resume, but previous watermark does not exist, starting at {}", startTime); } break; case RESUME_OR_FAIL: // perform resumption. if (detectNewPartitionsState != null) { resume = true; startTime = detectNewPartitionsState.getWatermark(); LOG.info("Resuming from previous pipeline with low watermark of {}", startTime); } else { LOG.error("Previous pipeline with the same change stream name doesn't exist, stopping"); return; } break; case FAIL_IF_EXISTS: if (detectNewPartitionsState != null) { LOG.error( "A previous pipeline exists with the same change stream name and existingPipelineOption is set to FAIL_IF_EXISTS."); return; } break; case SKIP_CLEANUP: if (detectNewPartitionsState != null) { LOG.error( "A previous pipeline exists with the same change stream name and existingPipelineOption is set to SKIP_CLEANUP. This option should only be used in tests."); return; } break; default: LOG.error("Unexpected existingPipelineOptions option."); // terminate pipeline return; } daoFactory.getMetadataTableDao().writeDetectNewPartitionVersion(); receiver.output(new InitialPipelineState(startTime, resume)); }
@Test public void testInitializeDefault() throws IOException { Instant startTime = Instant.now(); InitializeDoFn initializeDoFn = new InitializeDoFn( daoFactory, startTime, BigtableIO.ExistingPipelineOptions.FAIL_IF_EXISTS); initializeDoFn.processElement(outputReceiver); verify(outputReceiver, times(1)).output(new InitialPipelineState(startTime, false)); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { // Must split otherwise 413 Request Entity Too Large is returned for(List<Path> partition : new Partition<>(new ArrayList<>(files.keySet()), new HostPreferences(session.getHost()).getInteger("googledrive.delete.multiple.partition"))) { final BatchRequest batch = session.getClient().batch(); final List<BackgroundException> failures = new CopyOnWriteArrayList<>(); for(Path file : partition) { try { this.queue(file, batch, callback, failures); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Cannot delete {0}", e, file); } } if(!partition.isEmpty()) { try { batch.execute(); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map(e); } for(BackgroundException e : failures) { throw e; } } } }
@Test(expected = NotfoundException.class) public void testDeleteNotFound() throws Exception { final Path test = new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new DriveBatchDeleteFeature(session, new DriveFileIdProvider(session)).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static void tripSuggestions( List<CharSequence> suggestions, final int maxSuggestions, List<CharSequence> stringsPool) { while (suggestions.size() > maxSuggestions) { removeSuggestion(suggestions, maxSuggestions, stringsPool); } }
@Test public void testTrimSuggestionsWhenNoNeed() { ArrayList<CharSequence> list = new ArrayList<>( Arrays.<CharSequence>asList("typed", "something", "duped", "duped", "something")); IMEUtil.tripSuggestions(list, 10, mStringPool); Assert.assertEquals(5, list.size()); Assert.assertEquals("typed", list.get(0)); Assert.assertEquals("something", list.get(1)); Assert.assertEquals("duped", list.get(2)); Assert.assertEquals("duped", list.get(3)); Assert.assertEquals("something", list.get(4)); }
public static ExchangeServer bind(String url, Replier<?> replier) throws RemotingException { return bind(URL.valueOf(url), replier); }
@Test void testBind() throws RemotingException { String url = "dubbo://127.0.0.1:12345?exchanger=mockExchanger"; Exchangers.bind(url, Mockito.mock(Replier.class)); Exchangers.bind(url, new ChannelHandlerAdapter(), Mockito.mock(Replier.class)); Exchangers.bind(url, new ExchangeHandlerDispatcher()); Assertions.assertThrows( RuntimeException.class, () -> Exchangers.bind((URL) null, new ExchangeHandlerDispatcher())); Assertions.assertThrows(RuntimeException.class, () -> Exchangers.bind(url, (ExchangeHandlerDispatcher) null)); }
@Override public List<String> filter(final ReadwriteSplittingDataSourceGroupRule rule, final List<String> toBeFilteredReadDataSources) { List<String> result = new LinkedList<>(toBeFilteredReadDataSources); result.removeIf(rule.getDisabledDataSourceNames()::contains); return result; }
@Test void assertGetReadDataSourceNamesWithDisabledDataSourceNames() { rule.disableDataSource("read_ds_0"); assertThat(new DisabledReadDataSourcesFilter().filter(rule, Arrays.asList("read_ds_0", "read_ds_1")), is(Collections.singletonList("read_ds_1"))); }
static final String[] getPrincipalNames(String keytabFileName) throws IOException { Keytab keytab = Keytab.loadKeytab(new File(keytabFileName)); Set<String> principals = new HashSet<>(); List<PrincipalName> entries = keytab.getPrincipals(); for (PrincipalName entry : entries) { principals.add(entry.getName().replace("\\", "/")); } return principals.toArray(new String[0]); }
@Test public void testGetPrincipalNamesFromKeytabWithPattern() throws IOException { createKeyTab(testKeytab, testPrincipals); // read the keytab file // look for principals with HTTP as the first part Pattern httpPattern = Pattern.compile("HTTP/.*"); String[] httpPrincipals = KerberosUtil.getPrincipalNames(testKeytab, httpPattern); Assert.assertNotNull("principals cannot be null", httpPrincipals); int expectedSize = 0; List<String> httpPrincipalList = Arrays.asList(httpPrincipals); for (String principal : testPrincipals) { if (httpPattern.matcher(principal).matches()) { Assert.assertTrue("missing principal "+principal, httpPrincipalList.contains(principal)); expectedSize++; } } Assert.assertEquals(expectedSize, httpPrincipals.length); }
public static InstrumentedScheduledExecutorService newScheduledThreadPool( int corePoolSize, MetricRegistry registry, String name) { return new InstrumentedScheduledExecutorService( Executors.newScheduledThreadPool(corePoolSize), registry, name); }
@Test public void testNewScheduledThreadPoolWithThreadFactoryAndName() throws Exception { final ScheduledExecutorService executorService = InstrumentedExecutors.newScheduledThreadPool(2, defaultThreadFactory, registry, "xs"); executorService.schedule(new NoopRunnable(), 0, TimeUnit.SECONDS); assertThat(registry.meter("xs.scheduled.once").getCount()).isEqualTo(1L); final Field delegateField = InstrumentedScheduledExecutorService.class.getDeclaredField("delegate"); delegateField.setAccessible(true); final ScheduledThreadPoolExecutor delegate = (ScheduledThreadPoolExecutor) delegateField.get(executorService); assertThat(delegate.getCorePoolSize()).isEqualTo(2); assertThat(delegate.getThreadFactory()).isSameAs(defaultThreadFactory); executorService.shutdown(); }
private Mono<ServerResponse> getByName(ServerRequest request) { var name = request.pathVariable("name"); return singlePageFinder.getByName(name) .flatMap(result -> ServerResponse.ok() .contentType(MediaType.APPLICATION_JSON) .bodyValue(result) ); }
@Test void getByName() { SinglePageVo singlePage = SinglePageVo.builder() .metadata(metadata("fake-page")) .spec(new SinglePage.SinglePageSpec()) .build(); when(singlePageFinder.getByName(eq("fake-page"))) .thenReturn(Mono.just(singlePage)); webTestClient.get() .uri("/singlepages/fake-page") .exchange() .expectStatus().isOk() .expectHeader().contentType(MediaType.APPLICATION_JSON) .expectBody() .jsonPath("$.metadata.name").isEqualTo("fake-page"); verify(singlePageFinder).getByName("fake-page"); }
public Map<String, QualifiedDataSourceState> loadStates() { Collection<String> qualifiedDataSourceNodes = repository.getChildrenKeys(QualifiedDataSourceNode.getRootPath()); Map<String, QualifiedDataSourceState> result = new HashMap<>(qualifiedDataSourceNodes.size(), 1F); qualifiedDataSourceNodes.forEach(each -> { String yamlContent = repository.query(QualifiedDataSourceNode.getQualifiedDataSourceNodePath(new QualifiedDataSource(each))); if (!Strings.isNullOrEmpty(yamlContent)) { result.put(each, new YamlQualifiedDataSourceStateSwapper().swapToObject(YamlEngine.unmarshal(yamlContent, YamlQualifiedDataSourceState.class))); } }); return result; }
@Test void assertLoadStatus() { List<String> disabledDataSources = Arrays.asList("replica_query_db.readwrite_ds.replica_ds_0", "other_schema.other_ds.other_ds0"); when(repository.getChildrenKeys(anyString())).thenReturn(disabledDataSources); assertDoesNotThrow(() -> new QualifiedDataSourceStatePersistService(repository).loadStates()); }
@Override void releaseAllResources() throws IOException { if (!isReleased) { isReleased = true; ResultSubpartitionView view = subpartitionView; if (view != null) { view.releaseAllResources(); subpartitionView = null; } } }
@Test void testUnblockReleasedChannel() throws Exception { SingleInputGate inputGate = createSingleInputGate(1); LocalInputChannel localChannel = createLocalInputChannel(inputGate, new ResultPartitionManager()); localChannel.releaseAllResources(); assertThatThrownBy(localChannel::resumeConsumption) .isInstanceOf(IllegalStateException.class); }
@Override public List<Acl.User> getAvailableAclUsers() { final List<Acl.User> users = new ArrayList<Acl.User>(Arrays.asList( new Acl.CanonicalUser(), new Acl.GroupUser(Acl.GroupUser.AUTHENTICATED, false) { @Override public String getPlaceholder() { return LocaleFactory.localizedString("Google Account Holders", "S3"); } }, new Acl.GroupUser(Acl.GroupUser.EVERYONE, false)) ); users.add(new Acl.EmailUser() { @Override public String getPlaceholder() { return LocaleFactory.localizedString("Google Account Email Address", "S3"); } }); // Google Apps customers can associate their email accounts with an Internet domain name. When you do // this, each email account takes the form username@yourdomain.com. You can specify a scope by using // any Internet domain name that is associated with a Google Apps account. users.add(new Acl.DomainUser(StringUtils.EMPTY) { @Override public String getPlaceholder() { return LocaleFactory.localizedString("Google Apps Domain", "S3"); } }); users.add(new Acl.EmailGroupUser(StringUtils.EMPTY, true) { @Override public String getPlaceholder() { return LocaleFactory.localizedString("Google Group Email Address", "S3"); } }); return users; }
@Test public void testRoles() { final GoogleStorageAccessControlListFeature f = new GoogleStorageAccessControlListFeature(session); final List<Acl.User> users = f.getAvailableAclUsers(); assertTrue(f.getAvailableAclUsers().stream().filter(user -> user instanceof Acl.CanonicalUser).findAny().isPresent()); assertTrue(f.getAvailableAclUsers().stream().filter(user -> user instanceof Acl.EmailUser).findAny().isPresent()); assertTrue(f.getAvailableAclUsers().stream().filter(user -> user instanceof Acl.EmailGroupUser).findAny().isPresent()); assertTrue(f.getAvailableAclUsers().stream().filter(user -> user instanceof Acl.DomainUser).findAny().isPresent()); }
public SortedMapWritable() { super(); this.instance = new TreeMap<K, Writable>(); }
@Test @SuppressWarnings("unchecked") public void testSortedMapWritable() { Text[] keys = { new Text("key1"), new Text("key2"), new Text("key3"), }; BytesWritable[] values = { new BytesWritable("value1".getBytes()), new BytesWritable("value2".getBytes()), new BytesWritable("value3".getBytes()) }; SortedMapWritable<Text> inMap = new SortedMapWritable<Text>(); for (int i = 0; i < keys.length; i++) { inMap.put(keys[i], values[i]); } assertEquals(0, inMap.firstKey().compareTo(keys[0])); assertEquals(0, inMap.lastKey().compareTo(keys[2])); SortedMapWritable<Text> outMap = new SortedMapWritable<Text>(inMap); assertEquals(inMap.size(), outMap.size()); for (Map.Entry<Text, Writable> e: inMap.entrySet()) { assertTrue(outMap.containsKey(e.getKey())); WritableComparable<WritableComparable<?>> aValue = (WritableComparable<WritableComparable<?>>) outMap.get(e.getKey()); WritableComparable<WritableComparable<?>> bValue = (WritableComparable<WritableComparable<?>>) e.getValue(); assertEquals(0, aValue.compareTo(bValue)); } // Now for something a little harder... Text[] maps = { new Text("map1"), new Text("map2") }; SortedMapWritable<Text> mapOfMaps = new SortedMapWritable<Text>(); mapOfMaps.put(maps[0], inMap); mapOfMaps.put(maps[1], outMap); SortedMapWritable<Text> copyOfMapOfMaps = new SortedMapWritable<Text>(mapOfMaps); for (int i = 0; i < maps.length; i++) { assertTrue(copyOfMapOfMaps.containsKey(maps[i])); SortedMapWritable<Text> a = (SortedMapWritable<Text>) mapOfMaps.get(maps[i]); SortedMapWritable<Text> b = (SortedMapWritable<Text>) copyOfMapOfMaps.get(maps[i]); assertEquals(a.size(), b.size()); for (Writable key: a.keySet()) { assertTrue(b.containsKey(key)); // This will work because we know what we put into each set WritableComparable<WritableComparable<?>> aValue = (WritableComparable<WritableComparable<?>>) a.get(key); WritableComparable<WritableComparable<?>> bValue = (WritableComparable<WritableComparable<?>>) b.get(key); assertEquals(0, aValue.compareTo(bValue)); } } }
public Optional<T> get(int subpartitionId, int bufferIndex) { // first of all, try to get region in memory. Optional<T> regionOpt = getCachedRegionContainsTargetBufferIndex(subpartitionId, bufferIndex); if (regionOpt.isPresent()) { T region = regionOpt.get(); checkNotNull( // this is needed for cache entry remove algorithm like LRU. internalCache.getIfPresent( new CachedRegionKey(subpartitionId, region.getFirstBufferIndex()))); return Optional.of(region); } else { // try to find target region and load it into cache if founded. spilledRegionManager.findRegion(subpartitionId, bufferIndex, true); return getCachedRegionContainsTargetBufferIndex(subpartitionId, bufferIndex); } }
@Test void testGetNonExistentRegion() { Optional<TestingFileDataIndexRegion> region = indexCache.get(0, 0); // get a non-existent region. assertThat(region).isNotPresent(); }
@Override public AppResponse process(Flow flow, AppRequest request) { if (!appSession.getWithBsn()) { digidClient.remoteLog("1487", Map.of(lowerUnderscore(ACCOUNT_ID), appSession.getAccountId(), "hidden", true)); return new NokResponse("no_bsn_on_account"); } boolean reRequestLetter = flow.getName().equals(ReApplyActivateActivationCode.NAME); if(reRequestLetter){ digidClient.remoteLog("914", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } Map<String, Object> result = digidClient.createLetter(appSession.getAccountId(), appSession.getActivationMethod(), reRequestLetter); if (result.get(ERROR) != null){ if(result.get(ERROR).equals("too_often")){ digidClient.remoteLog("906", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new NokTooOftenResponse((Map<String, Object>) result.get(PAYLOAD), (String) result.get(ERROR)); } else if(result.get(ERROR).equals("too_soon")){ digidClient.remoteLog("758", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId())); } else if(result.get(ERROR).equals("too_many_letter_requests")){ digidClient.remoteLog("1554", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); } return new NokResponse((String) result.get(ERROR)); } appSession.setRegistrationId(((Integer) result.get(lowerUnderscore(REGISTRATION_ID))).longValue()); digidClient.remoteLog("904", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new OkResponse(); }
@Test void processNoBsn() { mockedAppSession.setWithBsn(false); NokResponse appResponse = (NokResponse) letterSent.process(mockedFlow, mockedAbstractAppRequest); verify(digidClientMock, times(1)).remoteLog("1487", Map.of("hidden", true, lowerUnderscore(ACCOUNT_ID), mockedAppSession.getAccountId())); assertEquals(NO_BSN_ERROR_STRING, appResponse.getError()); }
public void delete(String key) { delete(Collections.singletonList(key)); }
@Test public void delete_missingFile_ignored() { cache.delete("fakeKey"); }
@Override public String format(final Schema schema) { final String converted = SchemaWalker.visit(schema, new Converter()) + typePostFix(schema); return options.contains(Option.AS_COLUMN_LIST) ? stripTopLevelStruct(converted) : converted; }
@Test public void shouldFormatOptionalString() { assertThat(DEFAULT.format(Schema.OPTIONAL_STRING_SCHEMA), is("VARCHAR")); assertThat(STRICT.format(Schema.OPTIONAL_STRING_SCHEMA), is("VARCHAR")); }
public ImmutableMap<String, Pipeline> resolvePipelines(PipelineMetricRegistry pipelineMetricRegistry) { final Map<String, Rule> ruleNameMap = resolveRules(); // Read all pipelines and parse them final ImmutableMap.Builder<String, Pipeline> pipelineIdMap = ImmutableMap.builder(); try (final var pipelineStream = pipelineDaoSupplier.get()) { pipelineStream.forEach(pipelineDao -> { Pipeline pipeline; try { pipeline = ruleParser.parsePipeline(pipelineDao.id(), pipelineDao.source()); } catch (ParseException e) { LOG.warn("Ignoring non parseable pipeline <{}/{}> with errors <{}>", pipelineDao.title(), pipelineDao.id(), e.getErrors()); pipeline = Pipeline.empty("Failed to parse pipeline: " + pipelineDao.id()); } //noinspection ConstantConditions pipelineIdMap.put(pipelineDao.id(), resolvePipeline(pipelineMetricRegistry, pipeline, ruleNameMap)); }); } return pipelineIdMap.build(); }
@Test void resolvePipelines() { final var registry = PipelineMetricRegistry.create(metricRegistry, Pipeline.class.getName(), Rule.class.getName()); final var resolver = new PipelineResolver( new PipelineRuleParser(new FunctionRegistry(Map.of())), PipelineResolverConfig.of( () -> Stream.of(rule1), () -> Stream.of(pipeline1), () -> Stream.of(connections1, connections2) ) ); final var pipelines = resolver.resolvePipelines(registry); assertThat(pipelines).hasSize(1); assertThat(pipelines.get("pipeline-1")).satisfies(pipeline -> { assertThat(pipeline.id()).isEqualTo("pipeline-1"); assertThat(pipeline.name()).isEqualTo("test-pipeline-1"); assertThat(pipeline.stages()).hasSize(1); assertThat(pipeline.stages().first()).satisfies(stage -> { assertThat(stage.stage()).isEqualTo(5); assertThat(stage.match()).isEqualTo(Stage.Match.EITHER); assertThat(stage.ruleReferences()).isEqualTo(List.of("test-rule-1")); assertThat(stage.getRules()).hasSize(1); assertThat(stage.getRules().get(0)).satisfies(rule -> { assertThat(rule.id()).isEqualTo("rule-1"); assertThat(rule.name()).isEqualTo("test-rule-1"); }); }); }); assertThat(metricRegistry.getMetrics().keySet()).containsExactlyInAnyOrder( "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.pipeline-1.5.not-matched", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.pipeline-1.5.matched", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.pipeline-1.5.failed", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.pipeline-1.5.executed", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.matched", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.not-matched", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.failed", "org.graylog.plugins.pipelineprocessor.ast.Rule.rule-1.executed", "org.graylog.plugins.pipelineprocessor.ast.Pipeline.pipeline-1.executed", "org.graylog.plugins.pipelineprocessor.ast.Pipeline.pipeline-1.stage.5.executed" ); }
public Editor edit(String key) throws IOException { return edit(key, ANY_SEQUENCE_NUMBER); }
@Test public void cannotOperateOnEditAfterPublish() throws Exception { DiskLruCache.Editor editor = cache.edit("k1"); editor.set(0, "A"); editor.set(1, "B"); editor.commit(); assertInoperable(editor); }
static ProjectMeasuresQuery newProjectMeasuresQuery(List<Criterion> criteria, @Nullable Set<String> projectUuids) { ProjectMeasuresQuery query = new ProjectMeasuresQuery(); Optional.ofNullable(projectUuids).ifPresent(query::setProjectUuids); criteria.forEach(criterion -> processCriterion(criterion, query)); return query; }
@Test public void create_query_on_tag_using_in_operator() { ProjectMeasuresQuery query = newProjectMeasuresQuery( singletonList(Criterion.builder().setKey("tags").setOperator(IN).setValues(asList("java", "js")).build()), emptySet()); assertThat(query.getTags().get()).containsOnly("java", "js"); }
public boolean appliesTo(String pipelineName, String stageName) { boolean pipelineMatches = this.pipelineName.equals(pipelineName) || this.pipelineName.equals(GoConstants.ANY_PIPELINE); boolean stageMatches = this.stageName.equals(stageName) || this.stageName.equals(GoConstants.ANY_STAGE); return pipelineMatches && stageMatches; }
@Test void shouldNotApplyIfPipelineDiffers() { NotificationFilter filter = new NotificationFilter("cruise1", GoConstants.ANY_STAGE, StageEvent.Breaks, false); assertThat(filter.appliesTo("cruise2", "dev")).isFalse(); }
public static void cleanupInternalTopicSchemas( final String applicationId, final SchemaRegistryClient schemaRegistryClient) { getInternalSubjectNames(applicationId, schemaRegistryClient) .forEach(subject -> tryDeleteInternalSubject( applicationId, schemaRegistryClient, subject)); }
@Test public void shouldNotRetryIf40401() throws Exception { // Given: when(schemaRegistryClient.getAllSubjects()).thenReturn(ImmutableList.of( APP_ID + "SOME-changelog-key", APP_ID + "SOME-changelog-value" )); when(schemaRegistryClient.deleteSubject(any())).thenThrow(new RestClientException("foo", 404, 40401)); // When: SchemaRegistryUtil.cleanupInternalTopicSchemas(APP_ID, schemaRegistryClient); // Then not exception (only tried once): verify(schemaRegistryClient, times(1)).deleteSubject(APP_ID + "SOME-changelog-key"); verify(schemaRegistryClient, times(1)).deleteSubject(APP_ID + "SOME-changelog-value"); }
public static Future<Void> reconcileJmxSecret(Reconciliation reconciliation, SecretOperator secretOperator, SupportsJmx cluster) { return secretOperator.getAsync(reconciliation.namespace(), cluster.jmx().secretName()) .compose(currentJmxSecret -> { Secret desiredJmxSecret = cluster.jmx().jmxSecret(currentJmxSecret); if (desiredJmxSecret != null) { // Desired secret is not null => should be updated return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), desiredJmxSecret) .map((Void) null); } else if (currentJmxSecret != null) { // Desired secret is null but current is not => we should delete the secret return secretOperator.reconcile(reconciliation, reconciliation.namespace(), cluster.jmx().secretName(), null) .map((Void) null); } else { // Both current and desired secret are null => nothing to do return Future.succeededFuture(); } }); }
@Test public void testDisabledJmxWithMissingSecret(VertxTestContext context) { KafkaClusterSpec spec = new KafkaClusterSpecBuilder().build(); JmxModel jmx = new JmxModel(NAMESPACE, NAME, LABELS, OWNER_REFERENCE, spec); SecretOperator mockSecretOps = mock(SecretOperator.class); when(mockSecretOps.getAsync(eq(NAMESPACE), eq(NAME))).thenReturn(Future.succeededFuture()); Checkpoint async = context.checkpoint(); ReconcilerUtils.reconcileJmxSecret(Reconciliation.DUMMY_RECONCILIATION, mockSecretOps, new MockJmxCluster(jmx)) .onComplete(context.succeeding(v -> context.verify(() -> { verify(mockSecretOps, never()).reconcile(any(), any(), any(), any()); async.flag(); }))); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testCompletedFetchRemoval() { // Ensure the removal of completed fetches that cause an Exception if and only if they contain empty records. buildFetcher(OffsetResetStrategy.NONE, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_UNCOMMITTED); assignFromUser(mkSet(tp0, tp1, tp2, tp3)); subscriptions.seek(tp0, 1); subscriptions.seek(tp1, 1); subscriptions.seek(tp2, 1); subscriptions.seek(tp3, 1); assertEquals(1, sendFetches()); Map<TopicIdPartition, FetchResponseData.PartitionData> partitions = new LinkedHashMap<>(); partitions.put(tidp1, new FetchResponseData.PartitionData() .setPartitionIndex(tp1.partition()) .setHighWatermark(100) .setRecords(records)); partitions.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); partitions.put(tidp2, new FetchResponseData.PartitionData() .setPartitionIndex(tp2.partition()) .setHighWatermark(100) .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); partitions.put(tidp3, new FetchResponseData.PartitionData() .setPartitionIndex(tp3.partition()) .setHighWatermark(100) .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(partialRecords)); client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); List<ConsumerRecord<byte[], byte[]>> fetchedRecords = new ArrayList<>(); fetchRecordsInto(fetchedRecords); assertEquals(fetchedRecords.size(), subscriptions.position(tp1).offset - 1); assertEquals(4, subscriptions.position(tp1).offset); assertEquals(3, fetchedRecords.size()); List<OffsetOutOfRangeException> oorExceptions = new ArrayList<>(); try { fetchRecordsInto(fetchedRecords); } catch (OffsetOutOfRangeException oor) { oorExceptions.add(oor); } // Should have received one OffsetOutOfRangeException for partition tp1 assertEquals(1, oorExceptions.size()); OffsetOutOfRangeException oor = oorExceptions.get(0); assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0)); assertEquals(oor.offsetOutOfRangePartitions().size(), 1); fetchRecordsInto(fetchedRecords); // Should not have received an Exception for tp2. assertEquals(6, subscriptions.position(tp2).offset); assertEquals(5, fetchedRecords.size()); int numExceptionsExpected = 3; List<KafkaException> kafkaExceptions = new ArrayList<>(); for (int i = 1; i <= numExceptionsExpected; i++) { try { fetchRecordsInto(fetchedRecords); } catch (KafkaException e) { kafkaExceptions.add(e); } } // Should have received as much as numExceptionsExpected Kafka exceptions for tp3. assertEquals(numExceptionsExpected, kafkaExceptions.size()); }
@Override public ByteBuf unwrap() { return buffer; }
@Test public void testIsContiguous() { ByteBuf buf = newBuffer(4); assertEquals(buf.unwrap().isContiguous(), buf.isContiguous()); buf.release(); }
public static Schema getBeamSchemaFromProtoSchema(String schemaString, String messageName) { Descriptors.Descriptor descriptor = getDescriptorFromProtoSchema(schemaString, messageName); return ProtoDynamicMessageSchema.forDescriptor(ProtoDomain.buildFrom(descriptor), descriptor) .getSchema(); }
@Test public void testProtoSchemaStringToBeamSchema() { Schema schema = ProtoByteUtils.getBeamSchemaFromProtoSchema(PROTO_STRING_SCHEMA, "MyMessage"); Assert.assertEquals(schema.getFieldNames(), SCHEMA.getFieldNames()); }
ImmutableList<PayloadDefinition> validatePayloads(List<PayloadDefinition> payloads) { for (PayloadDefinition p : payloads) { checkArgument(p.hasName(), "Parsed payload does not have a name."); checkArgument( p.getInterpretationEnvironment() != PayloadGeneratorConfig.InterpretationEnvironment .INTERPRETATION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an interpretation_environment."); checkArgument( p.getExecutionEnvironment() != PayloadGeneratorConfig.ExecutionEnvironment.EXECUTION_ENVIRONMENT_UNSPECIFIED, "Parsed payload does not have an exeuction_environment."); checkArgument( !p.getVulnerabilityTypeList().isEmpty(), "Parsed payload has no entries for vulnerability_type."); checkArgument(p.hasPayloadString(), "Parsed payload does not have a payload_string."); if (p.getUsesCallbackServer().getValue()) { checkArgument( p.getPayloadString().getValue().contains("$TSUNAMI_PAYLOAD_TOKEN_URL"), "Parsed payload uses callback server but $TSUNAMI_PAYLOAD_TOKEN_URL not found in" + " payload_string."); } else { checkArgument( p.getValidationType() != PayloadValidationType.VALIDATION_TYPE_UNSPECIFIED, "Parsed payload has no validation_type and does not use the callback server."); if (p.getValidationType() == PayloadValidationType.VALIDATION_REGEX) { checkArgument( p.hasValidationRegex(), "Parsed payload has no validation_regex but uses PayloadValidationType.REGEX"); } } } return ImmutableList.copyOf(payloads); }
@Test public void validatePayloads_withoutExecutionEnvironment_throwsException() throws IOException { PayloadDefinition p = goodCallbackDefinition.clearExecutionEnvironment().build(); Throwable thrown = assertThrows( IllegalArgumentException.class, () -> module.validatePayloads(ImmutableList.of(p))); assertThat(thrown).hasMessageThat().contains("exeuction_environment"); }
public List<GitLabBranch> getBranches(String gitlabUrl, String pat, Long gitlabProjectId) { String url = format("%s/projects/%s/repository/branches", gitlabUrl, gitlabProjectId); LOG.debug("get branches : [{}]", url); Request request = new Request.Builder() .addHeader(PRIVATE_TOKEN, pat) .get() .url(url) .build(); try (Response response = client.newCall(request).execute()) { checkResponseIsSuccessful(response); String body = response.body().string(); LOG.trace("loading branches payload result : [{}]", body); return Arrays.asList(new GsonBuilder().create().fromJson(body, GitLabBranch[].class)); } catch (JsonSyntaxException e) { throw new IllegalArgumentException("Could not parse GitLab answer to retrieve project branches. Got a non-json payload as result."); } catch (IOException e) { logException(url, e); throw new IllegalStateException(e.getMessage(), e); } }
@Test public void fail_get_branches_with_unexpected_io_exception_with_detailed_log() throws IOException { server.shutdown(); assertThatThrownBy(() -> underTest.getBranches(gitlabUrl, "token", 0L)) .isInstanceOf(IllegalStateException.class) .hasMessageContaining("Failed to connect to " + server.getHostName()); assertThat(logTester.logs(Level.INFO).get(0)) .contains("Gitlab API call to [" + server.url("/projects/0/repository/branches") + "] " + "failed with error message : [Failed to connect to " + server.getHostName()); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } ShardCheckpoint that = (ShardCheckpoint) o; return streamName.equals(that.streamName) && shardId.equals(that.shardId) && Objects.equals(sequenceNumber, that.sequenceNumber) && shardIteratorType == that.shardIteratorType && Objects.equals(subSequenceNumber, that.subSequenceNumber) && Objects.equals(timestamp, that.timestamp); }
@Test public void testEquals() { assertEquals( new ShardCheckpoint("stream-01", "shard-000", new StartingPoint(Instant.ofEpochMilli(112))), new ShardCheckpoint( "stream-01", "shard-000", new StartingPoint(Instant.ofEpochMilli(112)))); assertEquals( new ShardCheckpoint( "stream-01", "shard-000", ShardIteratorType.AFTER_SEQUENCE_NUMBER, "9", 0L), new ShardCheckpoint( "stream-01", "shard-000", ShardIteratorType.AFTER_SEQUENCE_NUMBER, "9", 0L)); assertNotEquals( new ShardCheckpoint( "stream-01", "shard-000", ShardIteratorType.AFTER_SEQUENCE_NUMBER, "10", 0L), new ShardCheckpoint( "stream-01", "shard-000", ShardIteratorType.AFTER_SEQUENCE_NUMBER, "9", 0L)); assertNotEquals( new ShardCheckpoint("stream-01", "shard-000", new StartingPoint(Instant.ofEpochMilli(112))), new ShardCheckpoint( "stream-01", "shard-000", new StartingPoint(Instant.ofEpochMilli(113)))); }
@Override public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream, ChannelPromise promise) { return writeHeadersInternal(ctx, streamId, headers, padding, endStream, false, 0, (short) 0, false, promise); }
@Test public void writeHeadersWithPadding() throws Exception { int streamId = 1; Http2Headers headers = new DefaultHttp2Headers() .method("GET").path("/").authority("foo.com").scheme("https"); frameWriter.writeHeaders(ctx, streamId, headers, 5, true, promise); byte[] expectedPayload = headerPayload(streamId, headers, (byte) 4); byte[] expectedFrameBytes = { (byte) 0x00, (byte) 0x00, (byte) 0x0f, // payload length = 16 (byte) 0x01, // payload type = 1 (byte) 0x0d, // flags = (0x01 | 0x04 | 0x08) (byte) 0x00, (byte) 0x00, (byte) 0x00, (byte) 0x01 // stream id = 1 }; expectedOutbound = Unpooled.copiedBuffer(expectedFrameBytes, expectedPayload); assertEquals(expectedOutbound, outbound); }
public static <V> Read<V> read() { return new AutoValue_SparkReceiverIO_Read.Builder<V>().build(); }
@Test public void testReadObjectCreationFailsIfGetOffsetFnIsNull() { assertThrows( IllegalArgumentException.class, () -> SparkReceiverIO.<String>read().withGetOffsetFn(null)); }
@VisibleForTesting static DefaultIssue toDefaultIssue(IssueCache.Issue next) { DefaultIssue defaultIssue = new DefaultIssue(); defaultIssue.setKey(next.getKey()); defaultIssue.setType(RuleType.valueOf(next.getRuleType())); defaultIssue.setComponentUuid(next.hasComponentUuid() ? next.getComponentUuid() : null); defaultIssue.setComponentKey(next.getComponentKey()); defaultIssue.setProjectUuid(next.getProjectUuid()); defaultIssue.setProjectKey(next.getProjectKey()); defaultIssue.setRuleKey(RuleKey.parse(next.getRuleKey())); defaultIssue.setLanguage(next.hasLanguage() ? next.getLanguage() : null); defaultIssue.setSeverity(next.hasSeverity() ? next.getSeverity() : null); defaultIssue.setManualSeverity(next.getManualSeverity()); defaultIssue.setMessage(next.hasMessage() ? next.getMessage() : null); defaultIssue.setMessageFormattings(next.hasMessageFormattings() ? next.getMessageFormattings() : null); defaultIssue.setLine(next.hasLine() ? next.getLine() : null); defaultIssue.setGap(next.hasGap() ? next.getGap() : null); defaultIssue.setEffort(next.hasEffort() ? Duration.create(next.getEffort()) : null); defaultIssue.setStatus(next.getStatus()); defaultIssue.setResolution(next.hasResolution() ? next.getResolution() : null); defaultIssue.setAssigneeUuid(next.hasAssigneeUuid() ? next.getAssigneeUuid() : null); defaultIssue.setAssigneeLogin(next.hasAssigneeLogin() ? next.getAssigneeLogin() : null); defaultIssue.setChecksum(next.hasChecksum() ? next.getChecksum() : null); defaultIssue.setAuthorLogin(next.hasAuthorLogin() ? next.getAuthorLogin() : null); next.getCommentsList().forEach(c -> defaultIssue.addComment(toDefaultIssueComment(c))); defaultIssue.setTags(ImmutableSet.copyOf(STRING_LIST_SPLITTER.split(next.getTags()))); defaultIssue.setCodeVariants(ImmutableSet.copyOf(STRING_LIST_SPLITTER.split(next.getCodeVariants()))); defaultIssue.setRuleDescriptionContextKey(next.hasRuleDescriptionContextKey() ? next.getRuleDescriptionContextKey() : null); defaultIssue.setLocations(next.hasLocations() ? next.getLocations() : null); defaultIssue.setIsFromExternalRuleEngine(next.getIsFromExternalRuleEngine()); defaultIssue.setCreationDate(new Date(next.getCreationDate())); defaultIssue.setUpdateDate(next.hasUpdateDate() ? new Date(next.getUpdateDate()) : null); defaultIssue.setCloseDate(next.hasCloseDate() ? new Date(next.getCloseDate()) : null); defaultIssue.setCurrentChangeWithoutAddChange(next.hasCurrentChanges() ? toDefaultIssueChanges(next.getCurrentChanges()) : null); defaultIssue.setNew(next.getIsNew()); defaultIssue.setIsOnChangedLine(next.getIsOnChangedLine()); defaultIssue.setIsNewCodeReferenceIssue(next.getIsNewCodeReferenceIssue()); defaultIssue.setCopied(next.getIsCopied()); defaultIssue.setBeingClosed(next.getBeingClosed()); defaultIssue.setOnDisabledRule(next.getOnDisabledRule()); defaultIssue.setChanged(next.getIsChanged()); defaultIssue.setSendNotifications(next.getSendNotifications()); defaultIssue.setSelectedAt(next.hasSelectedAt() ? next.getSelectedAt() : null); defaultIssue.setQuickFixAvailable(next.getQuickFixAvailable()); defaultIssue.setPrioritizedRule(next.getIsPrioritizedRule()); defaultIssue.setIsNoLongerNewCodeReferenceIssue(next.getIsNoLongerNewCodeReferenceIssue()); defaultIssue.setCleanCodeAttribute(next.hasCleanCodeAttribute() ? CleanCodeAttribute.valueOf(next.getCleanCodeAttribute()) : null); if (next.hasAnticipatedTransitionUuid()) { defaultIssue.setAnticipatedTransitionUuid(next.getAnticipatedTransitionUuid()); } for (IssueCache.Impact impact : next.getImpactsList()) { defaultIssue.addImpact(SoftwareQuality.valueOf(impact.getSoftwareQuality()), Severity.valueOf(impact.getSeverity())); } for (IssueCache.FieldDiffs protoFieldDiffs : next.getChangesList()) { defaultIssue.addChange(toDefaultIssueChanges(protoFieldDiffs)); } return defaultIssue; }
@Test public void toDefaultIssue_whenRuleDescriptionContextKeyPresent_shouldSetItInDefaultIssue() { IssueCache.Issue issue = prepareIssueWithCompulsoryFields() .setRuleDescriptionContextKey(TEST_CONTEXT_KEY) .build(); DefaultIssue defaultIssue = ProtobufIssueDiskCache.toDefaultIssue(issue); assertThat(defaultIssue.getRuleDescriptionContextKey()).contains(TEST_CONTEXT_KEY); }
@Bean public ShenyuContextDecorator dubboShenyuContextDecorator() { return new DubboShenyuContextDecorator(); }
@Test public void testDubboShenyuContextDecorator() { new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(DubboCommonConfiguration.class)) .withBean(DubboCommonConfigurationTest.class) .withPropertyValues("debug=true") .run(context -> { ShenyuContextDecorator decorator = context.getBean("dubboShenyuContextDecorator", ShenyuContextDecorator.class); assertNotNull(decorator); }); }
@Override public WorkerIdentity get() { // Look at configurations first if (mConf.isSetByUser(PropertyKey.WORKER_IDENTITY_UUID)) { String uuidStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID); final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr); LOG.debug("Loaded worker identity from configuration: {}", workerIdentity); return workerIdentity; } // Try loading from the identity file String filePathStr = mConf.getString(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH); final Path idFile = Paths.get(filePathStr); try (BufferedReader reader = Files.newBufferedReader(idFile)) { List<String> nonCommentLines = reader.lines() .filter(line -> !line.startsWith("#")) .filter(line -> !line.trim().isEmpty()) .collect(Collectors.toList()); if (nonCommentLines.size() > 0) { if (nonCommentLines.size() > 1) { LOG.warn("Multiple worker identities configured in {}, only the first one will be used", idFile); } String uuidStr = nonCommentLines.get(0); final WorkerIdentity workerIdentity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(uuidStr); LOG.debug("Loaded worker identity from file {}: {}", idFile, workerIdentity); return workerIdentity; } } catch (FileNotFoundException | NoSuchFileException ignored) { // if not existent, proceed to auto generate one LOG.debug("Worker identity file {} not found", idFile); } catch (IOException e) { // in case of other IO error, better stop worker from starting up than use a new identity throw new RuntimeException( String.format("Failed to read worker identity from identity file %s", idFile), e); } // No identity is supplied by the user // Assume this is the first time the worker starts up, and generate a new one LOG.debug("Auto generating new worker identity as no identity is supplied by the user"); UUID generatedId = mUUIDGenerator.get(); WorkerIdentity identity = WorkerIdentity.ParserV1.INSTANCE.fromUUID(generatedId); LOG.debug("Generated worker identity as {}", identity); try (BufferedWriter writer = Files.newBufferedWriter(idFile, StandardCharsets.UTF_8, StandardOpenOption.CREATE_NEW, StandardOpenOption.WRITE)) { writer.write("# Worker identity automatically generated at "); writer.write(OffsetDateTime.now().format(DateTimeFormatter.RFC_1123_DATE_TIME)); writer.newLine(); writer.write(generatedId.toString()); writer.newLine(); } catch (Exception e) { LOG.warn("Failed to persist automatically generated worker identity ({}) to {}, " + "this worker will lose its identity after restart", identity, idFile, e); } try { // set the file to be read-only Set<PosixFilePermission> permSet = Files.getPosixFilePermissions(idFile); Set<PosixFilePermission> nonWritablePermSet = Sets.filter(permSet, perm -> perm != PosixFilePermission.OWNER_WRITE && perm != PosixFilePermission.GROUP_WRITE && perm != PosixFilePermission.OTHERS_WRITE); Files.setPosixFilePermissions(idFile, nonWritablePermSet); } catch (Exception e) { LOG.warn("Failed to set identity file to be read-only", e); } return identity; }
@Test public void shouldNotOverwriteExistingIdFile() throws Exception { AlluxioProperties props = new AlluxioProperties(); props.set(PropertyKey.WORKER_IDENTITY_UUID_FILE_PATH, mUuidFilePath); // create the file first, but add only comment lines // it should not be overwritten with the auto generated id try (BufferedWriter fout = Files.newBufferedWriter(mUuidFilePath)) { fout.write("# comment"); fout.newLine(); } AlluxioConfiguration conf = new InstancedConfiguration(props); WorkerIdentityProvider provider = new WorkerIdentityProvider(conf, () -> mReferenceUuid); // this should succeed without any exception WorkerIdentity identity = provider.get(); assertEquals(mReferenceUuid, WorkerIdentity.ParserV1.INSTANCE.toUUID(identity)); // the original id file is left intact assertTrue(Files.exists(mUuidFilePath)); try (BufferedReader reader = Files.newBufferedReader(mUuidFilePath)) { String content = IOUtils.toString(reader); assertEquals("# comment\n", content); } }
@SuppressWarnings({"SimplifyBooleanReturn"}) public static Map<String, ParamDefinition> cleanupParams(Map<String, ParamDefinition> params) { if (params == null || params.isEmpty()) { return params; } Map<String, ParamDefinition> mapped = params.entrySet().stream() .collect( MapHelper.toListMap( Map.Entry::getKey, p -> { ParamDefinition param = p.getValue(); if (param.getType() == ParamType.MAP) { MapParamDefinition mapParamDef = param.asMapParamDef(); if (mapParamDef.getValue() == null && (mapParamDef.getInternalMode() == InternalParamMode.OPTIONAL)) { return mapParamDef; } return MapParamDefinition.builder() .name(mapParamDef.getName()) .value(cleanupParams(mapParamDef.getValue())) .expression(mapParamDef.getExpression()) .name(mapParamDef.getName()) .validator(mapParamDef.getValidator()) .tags(mapParamDef.getTags()) .mode(mapParamDef.getMode()) .meta(mapParamDef.getMeta()) .build(); } else { return param; } })); Map<String, ParamDefinition> filtered = mapped.entrySet().stream() .filter( p -> { ParamDefinition param = p.getValue(); if (param.getInternalMode() == InternalParamMode.OPTIONAL) { if (param.getValue() == null && param.getExpression() == null) { return false; } else if (param.getType() == ParamType.MAP && param.asMapParamDef().getValue() != null && param.asMapParamDef().getValue().isEmpty()) { return false; } else { return true; } } else { Checks.checkTrue( param.getValue() != null || param.getExpression() != null, String.format( "[%s] is a required parameter (type=[%s])", p.getKey(), param.getType())); return true; } }) .collect(MapHelper.toListMap(Map.Entry::getKey, Map.Entry::getValue)); return cleanIntermediateMetadata(filtered); }
@Test public void testCleanupAllNestedPresentParams() throws JsonProcessingException { for (ParamMode mode : ParamMode.values()) { Map<String, ParamDefinition> allParams = parseParamDefMap( String.format( "{'map': {'type': 'MAP','value': {'present': {'type': 'STRING', 'mode': '%s', 'value': 'hello'}}}}", mode.toString())); Map<String, ParamDefinition> cleanedParams = ParamsMergeHelper.cleanupParams(allParams); assertEquals(1, cleanedParams.get("map").asMapParamDef().getValue().size()); } }
@Override public KeyValueSegment getOrCreateSegmentIfLive(final long segmentId, final ProcessorContext context, final long streamTime) { final KeyValueSegment segment = super.getOrCreateSegmentIfLive(segmentId, context, streamTime); cleanupExpiredSegments(streamTime); return segment; }
@Test public void shouldGetCorrectSegmentString() { final KeyValueSegment segment = segments.getOrCreateSegmentIfLive(0, context, -1L); assertEquals("KeyValueSegment(id=0, name=test.0)", segment.toString()); }
@Override public Map<String, Set<String>> readClasspathStorages() { log.debug("Reading extensions storages from classpath"); Map<String, Set<String>> result = new LinkedHashMap<>(); final Set<String> bucket = new HashSet<>(); try { Enumeration<URL> urls = getExtensionResource(getClass().getClassLoader()); if (urls.hasMoreElements()) { collectExtensions(urls, bucket); } else { log.debug("Cannot find '{}'", EXTENSIONS_RESOURCE); } debugExtensions(bucket); result.put(null, bucket); } catch (IOException | URISyntaxException e) { log.error(e.getMessage(), e); } return result; }
@Test void readClasspathStorages() { PluginManager pluginManager = mock(PluginManager.class); ServiceProviderExtensionFinder finder = new ServiceProviderExtensionFinder(pluginManager) { @Override Enumeration<URL> getExtensionResource(ClassLoader classLoader) throws IOException { return getExtensionEnumeration(); } }; Map<String, Set<String>> storages = finder.readClasspathStorages(); assertNotNull(storages); assertTrue(storages.containsKey(null)); Set<String> extensions = storages.get(null); assertEquals(2, extensions.size()); assertThat(extensions, containsInAnyOrder(HELLO_GREETER_EXTENSION, WELCOME_GREETER_EXTENSION)); }
public AstNode rewrite(final AstNode node, final C context) { return rewriter.process(node, context); }
@Test public void shouldRewriteQueryWithHaving() { // Given: final Query query = givenQuery(Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(expression)); when(expressionRewriter.apply(expression, context)).thenReturn(rewrittenExpression); // When: final AstNode rewritten = rewriter.rewrite(query, context); // Then: assertThat(rewritten, equalTo(new Query( location, rewrittenSelect, rewrittenRelation, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of(rewrittenExpression), Optional.of(refinementInfo), false, optionalInt)) ); }
@Override public Map<String, ConfigChangeItem> doParse(String oldContent, String newContent, String type) throws IOException { Properties oldProps = new Properties(); Properties newProps = new Properties(); if (StringUtils.isNotBlank(oldContent)) { oldProps.load(new StringReader(oldContent)); } if (StringUtils.isNotBlank(newContent)) { newProps.load(new StringReader(newContent)); } return filterChangeData(oldProps, newProps); }
@Test void testAddKey() throws IOException { Map<String, ConfigChangeItem> map = parser.doParse("", "app.name = nacos", type); assertNull(map.get("app.name").getOldValue()); assertEquals("nacos", map.get("app.name").getNewValue()); }
@Override public void register(long ref, String uuid, boolean file) { requireNonNull(uuid, "uuid can not be null"); Long existingRef = refsByUuid.get(uuid); if (existingRef != null) { checkArgument(ref == existingRef, "Uuid '%s' already registered under ref '%s' in repository", uuid, existingRef); boolean existingIsFile = fileUuids.contains(uuid); checkArgument(file == existingIsFile, "Uuid '%s' already registered but %sas a File", uuid, existingIsFile ? "" : "not "); } else { refsByUuid.put(uuid, ref); if (file) { fileUuids.add(uuid); } } }
@Test public void register_throws_IAE_same_uuid_added_with_as_file() { underTest.register(SOME_REF, SOME_UUID, true); assertThatThrownBy(() -> underTest.register(SOME_REF, SOME_UUID, false)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Uuid '" + SOME_UUID + "' already registered but as a File"); }
@VisibleForTesting static Object convertAvroField(Object avroValue, Schema schema) { if (avroValue == null) { return null; } switch (schema.getType()) { case NULL: case INT: case LONG: case DOUBLE: case FLOAT: case BOOLEAN: return avroValue; case ENUM: case STRING: return avroValue.toString(); // can be a String or org.apache.avro.util.Utf8 case UNION: for (Schema s : schema.getTypes()) { if (s.getType() == Schema.Type.NULL) { continue; } return convertAvroField(avroValue, s); } throw new IllegalArgumentException("Found UNION schema but it doesn't contain any type"); case ARRAY: case BYTES: case FIXED: case RECORD: case MAP: default: throw new UnsupportedOperationException("Unsupported avro schema type=" + schema.getType() + " for value field schema " + schema.getName()); } }
@Test public void testConvertAvroBoolean() { Object converted = BaseJdbcAutoSchemaSink.convertAvroField(true, createFieldAndGetSchema((builder) -> builder.name("field").type().booleanType().noDefault())); Assert.assertEquals(converted, true); }
public void densify(FeatureMap fMap) { // Densify! - guitar solo List<String> featureNames = new ArrayList<>(fMap.keySet()); Collections.sort(featureNames); densify(featureNames); }
@Test public void testListExampleDensify() { MockOutput output = new MockOutput("UNK"); Example<MockOutput> example, expected; // Single feature example = new ListExample<>(output, new String[]{"F0"}, new double[]{1.0}); example.densify(Arrays.asList(featureNames)); expected = new ListExample<>(new MockOutput("UNK"), featureNames, new double[]{1.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0}); checkDenseExample(expected,example); testProtoSerialization(example); // Already dense example = new ListExample<>(output, featureNames, new double[]{1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}); example.densify(Arrays.asList(featureNames)); expected = new ListExample<>(new MockOutput("UNK"), featureNames, new double[]{1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0}); checkDenseExample(expected,example); testProtoSerialization(example); // No edges example = new ListExample<>(output, new String[]{"F1","F3","F5","F6","F8"}, new double[]{1.0,1.0,1.0,1.0,1.0}); example.densify(Arrays.asList(featureNames)); expected = new ListExample<>(new MockOutput("UNK"), featureNames, new double[]{0.0,1.0,0.0,1.0,0.0,1.0,1.0,0.0,1.0,0.0}); checkDenseExample(expected,example); testProtoSerialization(example); // Only edges example = new ListExample<>(output, new String[]{"F0","F1","F8","F9"}, new double[]{1.0,1.0,1.0,1.0}); example.densify(Arrays.asList(featureNames)); expected = new ListExample<>(new MockOutput("UNK"), featureNames, new double[]{1.0,1.0,0.0,0.0,0.0,0.0,0.0,0.0,1.0,1.0}); checkDenseExample(expected,example); testProtoSerialization(example); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testExplainAnalyze() { analyze("EXPLAIN ANALYZE SELECT * FROM t1"); }
public static long getLeastCallCount(String appName) { FaultToleranceConfig config = getConfig(appName); return config.getLeastCallCount(); }
@Test public void getLeastCallCount() throws Exception { Assert.assertEquals(FaultToleranceConfigManager.getLeastCallCount(null), defaultConfig.getLeastCallCount()); }
public static boolean isValidHost(String host) { if (host == null) { return false; } if (host.length() > 253) { return false; } int labelLength = 0; for (int i = 0, n = host.length(); i < n; i++) { char ch = host.charAt(i); if (ch == '.') { if (labelLength < 1 || labelLength > 63) { return false; } labelLength = 0; } else { labelLength++; } if ((ch < 'a' || ch > 'z') && (ch < '0' || ch > '9') && ch != '-' && ch != '.') { return false; } } if (labelLength < 1 || labelLength > 63) { return false; } return true; }
@Test public void testIsValidHost() { assertTrue(Hosts.isValidHost("ni.hao")); assertFalse(Hosts.isValidHost("ni.ha~o")); assertFalse(Hosts.isValidHost(".ni.hao")); assertFalse(Hosts.isValidHost("ni.hao.")); assertFalse(Hosts.isValidHost("ni..hao.")); assertFalse(Hosts.isValidHost(".")); assertFalse(Hosts.isValidHost("")); assertFalse(Hosts.isValidHost(null)); }
public abstract void createTableLike(String dbName, String existingTblName, String newTableName, boolean ifNotExists, boolean isExternal, String location) throws HCatException;
@Test public void testCreateTableLike() throws Exception { HCatClient client = HCatClient.create(new Configuration(hcatConf)); String tableName = "tableone"; String cloneTable = "tabletwo"; client.dropTable(null, tableName, true); client.dropTable(null, cloneTable, true); ArrayList<HCatFieldSchema> cols = new ArrayList<HCatFieldSchema>(); cols.add(new HCatFieldSchema("id", Type.INT, "id columns")); cols.add(new HCatFieldSchema("value", Type.STRING, "id columns")); HCatCreateTableDesc tableDesc = HCatCreateTableDesc .create(null, tableName, cols).fileFormat("rcfile").build(); client.createTable(tableDesc); // create a new table similar to previous one. client.createTableLike(null, tableName, cloneTable, true, false, null); List<String> tables = client.listTableNamesByPattern(null, "table*"); assertTrue(tables.size() == 2); client.close(); }
public Optional<Measure> toMeasure(@Nullable ScannerReport.Measure batchMeasure, Metric metric) { Objects.requireNonNull(metric); if (batchMeasure == null) { return Optional.empty(); } Measure.NewMeasureBuilder builder = Measure.newMeasureBuilder(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(builder, batchMeasure); case LONG: return toLongMeasure(builder, batchMeasure); case DOUBLE: return toDoubleMeasure(builder, batchMeasure); case BOOLEAN: return toBooleanMeasure(builder, batchMeasure); case STRING: return toStringMeasure(builder, batchMeasure); case LEVEL: return toLevelMeasure(builder, batchMeasure); case NO_VALUE: return toNoValueMeasure(builder); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_throws_NPE_if_metric_argument_is_null() { assertThatThrownBy(() -> underTest.toMeasure(EMPTY_BATCH_MEASURE, null)) .isInstanceOf(NullPointerException.class); }
public B init(Boolean init) { this.init = init; return getThis(); }
@Test void init() { ReferenceBuilder builder = new ReferenceBuilder(); builder.init(true); Assertions.assertTrue(builder.build().isInit()); builder.init(false); Assertions.assertFalse(builder.build().isInit()); }
public static L2ModificationInstruction modL2Src(MacAddress addr) { checkNotNull(addr, "Src l2 address cannot be null"); return new L2ModificationInstruction.ModEtherInstruction( L2ModificationInstruction.L2SubType.ETH_SRC, addr); }
@Test public void testModL2SrcMethod() { final Instruction instruction = Instructions.modL2Src(mac1); final L2ModificationInstruction.ModEtherInstruction modEtherInstruction = checkAndConvert(instruction, Instruction.Type.L2MODIFICATION, L2ModificationInstruction.ModEtherInstruction.class); assertThat(modEtherInstruction.mac(), is(equalTo(mac1))); assertThat(modEtherInstruction.subtype(), is(equalTo(L2ModificationInstruction.L2SubType.ETH_SRC))); }
public static Sessions withGapDuration(Duration gapDuration) { return new Sessions(gapDuration); }
@Test public void testMerging() throws Exception { Map<IntervalWindow, Set<String>> expected = new HashMap<>(); expected.put(new IntervalWindow(new Instant(1), new Instant(40)), set(1, 10, 15, 22, 30)); expected.put(new IntervalWindow(new Instant(95), new Instant(111)), set(95, 100, 101)); assertEquals( expected, runWindowFn( Sessions.withGapDuration(Duration.millis(10)), Arrays.asList(1L, 15L, 30L, 100L, 101L, 95L, 22L, 10L))); }
@Override public Long computeValue(final Collection<T> elementsInBin, final int totalElements) { if (totalElements > 0) { return (long) (100 * ((float) elementsInBin.size() / totalElements)); } else { return 0L; } }
@Test void testReturnsProperPercentage() { final Long result = toTest.computeValue( List.of( QueryExecutionStats.builder().duration(13).build() ), 50 ); assertEquals(2, result); //1 out of 50 is 2% }
@Override /** * {@inheritDoc} Handles the bundle's completion report. Parses the monitoringInfos in the * response, then updates the MetricsRegistry. */ public void onCompleted(BeamFnApi.ProcessBundleResponse response) { response.getMonitoringInfosList().stream() .filter(monitoringInfo -> !monitoringInfo.getPayload().isEmpty()) .map(this::parseAndUpdateMetric) .distinct() .forEach(samzaMetricsContainer::updateMetrics); }
@Test public void testDistribution() { // Count = 123, sum = 124, min = 125, max = 126 byte[] payload = "\173\174\175\176".getBytes(Charset.defaultCharset()); MetricsApi.MonitoringInfo monitoringInfo = MetricsApi.MonitoringInfo.newBuilder() .setType(DISTRIBUTION_INT64_TYPE) .setPayload(ByteString.copyFrom(payload)) .putLabels(MonitoringInfoConstants.Labels.NAMESPACE, EXPECTED_NAMESPACE) .putLabels(MonitoringInfoConstants.Labels.NAME, EXPECTED_COUNTER_NAME) .build(); BeamFnApi.ProcessBundleResponse response = BeamFnApi.ProcessBundleResponse.newBuilder().addMonitoringInfos(monitoringInfo).build(); // Execute samzaMetricsBundleProgressHandler.onCompleted(response); // Verify MetricName metricName = MetricName.named(EXPECTED_NAMESPACE, EXPECTED_COUNTER_NAME); DistributionCell gauge = (DistributionCell) samzaMetricsContainer.getContainer(stepName).getDistribution(metricName); assertEquals(123L, gauge.getCumulative().count()); assertEquals(124L, gauge.getCumulative().sum()); assertEquals(125L, gauge.getCumulative().min()); assertEquals(126L, gauge.getCumulative().max()); }
@Override public int compare(String version1, String version2) { if(ObjectUtil.equal(version1, version2)) { return 0; } if (version1 == null && version2 == null) { return 0; } else if (version1 == null) {// null或""视为最小版本,排在前 return -1; } else if (version2 == null) { return 1; } return CompareUtil.compare(Version.of(version1), Version.of(version2)); }
@Test public void versionComparatorTest5() { int compare = VersionComparator.INSTANCE.compare("V1.2", "V1.1"); assertTrue(compare > 0); // 自反测试 compare = VersionComparator.INSTANCE.compare("V1.1", "V1.2"); assertTrue(compare < 0); }
@Override public Post retrieve(Integer postId, Context context, String password) { LOGGER.debug("Calling retrievePosts: postId {}; postContext: {}", postId, context); if (postId <= 0) { throw new IllegalArgumentException("Please provide a non zero post id"); } Objects.requireNonNull(context, "Provide a post context"); return getSpi().retrieve(this.getApiVersion(), postId, context, password); }
@Test public void testRetrievePost() { final Post post = servicePosts.retrieve(1); assertThat(post, not(nullValue())); assertThat(post.getId(), is(greaterThan(0))); }
@Override public RList<V> get(K key) { String keyHash = keyHash(key); String setName = getValuesName(keyHash); return new RedissonList<V>(codec, commandExecutor, setName, null) { @Override public RFuture<Boolean> addAsync(V value) { return RedissonListMultimap.this.putAsync(key, value); } @Override public RFuture<Boolean> addAllAsync(Collection<? extends V> c) { return RedissonListMultimap.this.putAllAsync(key, c); } @Override public RFuture<Boolean> removeAsync(Object value) { return RedissonListMultimap.this.removeAsync(key, value); } @Override public RFuture<Boolean> removeAllAsync(Collection<?> c) { if (c.isEmpty()) { return new CompletableFutureWrapper<>(false); } List<Object> args = new ArrayList<>(c.size() + 1); args.add(encodeMapKey(key)); encode(args, c); return commandExecutor.evalWriteAsync(RedissonListMultimap.this.getRawName(), codec, RedisCommands.EVAL_BOOLEAN, "local v = 0 " + "for i = 2, #ARGV, 1 do " + "if redis.call('lrem', KEYS[2], 0, ARGV[i]) == 1 then " + "v = 1; " + "end " +"end " + "if v == 1 and redis.call('exists', KEYS[2]) == 0 then " + "redis.call('hdel', KEYS[1], ARGV[1]); " +"end " + "return v", Arrays.asList(RedissonListMultimap.this.getRawName(), setName), args.toArray()); } @Override public RFuture<Boolean> deleteAsync() { ByteBuf keyState = encodeMapKey(key); return RedissonListMultimap.this.fastRemoveAsync(Arrays.asList(keyState), Arrays.asList(RedissonListMultimap.this.getRawName(), setName), RedisCommands.EVAL_BOOLEAN_AMOUNT); } @Override public RFuture<Boolean> clearExpireAsync() { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Boolean> expireAsync(long timeToLive, TimeUnit timeUnit, String param, String... keys) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override protected RFuture<Boolean> expireAtAsync(long timestamp, String param, String... keys) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Long> remainTimeToLiveAsync() { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Void> renameAsync(String newName) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } @Override public RFuture<Boolean> renamenxAsync(String newName) { throw new UnsupportedOperationException("This operation is not supported for SetMultimap values Set"); } }; }
@Test public void testPutAll() { RListMultimap<SimpleKey, SimpleValue> map = redisson.getListMultimap("test1"); List<SimpleValue> values = Arrays.asList(new SimpleValue("1"), new SimpleValue("2"), new SimpleValue("3"), new SimpleValue("3")); assertThat(map.putAll(new SimpleKey("0"), values)).isTrue(); assertThat(map.putAll(new SimpleKey("0"), Arrays.asList(new SimpleValue("1")))).isTrue(); List<SimpleValue> testValues = Arrays.asList(new SimpleValue("1"), new SimpleValue("2"), new SimpleValue("3"), new SimpleValue("3"), new SimpleValue("1")); assertThat(map.get(new SimpleKey("0"))).containsExactlyElementsOf(testValues); }
@Override public boolean isScanAllowedUsingPermissionsFromDevopsPlatform() { checkState(authAppInstallationToken != null, "An auth app token is required in case repository permissions checking is necessary."); String[] orgaAndRepoTokenified = devOpsProjectCreationContext.fullName().split("/"); String organization = orgaAndRepoTokenified[0]; String repository = orgaAndRepoTokenified[1]; Set<DevOpsPermissionsMappingDto> permissionsMappingDtos = dbClient.githubPermissionsMappingDao() .findAll(dbClient.openSession(false), devOpsPlatformSettings.getDevOpsPlatform()); boolean userHasDirectAccessToRepo = doesUserHaveScanPermission(organization, repository, permissionsMappingDtos); if (userHasDirectAccessToRepo) { return true; } return doesUserBelongToAGroupWithScanPermission(organization, repository, permissionsMappingDtos); }
@Test void isScanAllowedUsingPermissionsFromDevopsPlatform_whenNoAuthToken_throws() { githubProjectCreator = new GithubProjectCreator(dbClient, devOpsProjectCreationContext, projectKeyGenerator, gitHubSettings, null, permissionService, permissionUpdater, managedProjectService, githubApplicationClient, githubPermissionConverter, null); assertThatIllegalStateException().isThrownBy(() -> githubProjectCreator.isScanAllowedUsingPermissionsFromDevopsPlatform()) .withMessage("An auth app token is required in case repository permissions checking is necessary."); }
@Override public void runMigrations() { createMigrationsIndexIfNotExists(); super.runMigrations(); }
@Test void testValidateIndicesNoIndexPrefix() throws IOException { ElasticSearchDBCreator elasticSearchDBCreator = new ElasticSearchDBCreator(elasticSearchStorageProviderMock, elasticSearchClient(), null); assertThatThrownBy(elasticSearchDBCreator::validateIndices) .isInstanceOf(JobRunrException.class) .hasMessage("Not all required indices are available by JobRunr!"); elasticSearchDBCreator.runMigrations(); assertThatCode(elasticSearchDBCreator::validateIndices).doesNotThrowAnyException(); assertThat(elasticSearchClient().indices().get(g -> g.index("*")).result()).hasSize(5); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { if(!new LocalFindFeature(session).find(file)) { throw new NotfoundException(file.getAbsolute()); } if(status.isExists()) { new LocalDeleteFeature(session).delete(Collections.singletonMap(renamed, status), new DisabledPasswordCallback(), callback); } if(!session.toPath(file).toFile().renameTo(session.toPath(renamed).toFile())) { throw new LocalExceptionMappingService().map("Cannot rename {0}", new NoSuchFileException(file.getName()), file); } return renamed; }
@Test public void testMoveOverride() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path workdir = new LocalHomeFinderFeature().find(); final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(test, new TransferStatus()); final Path target = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(target, new TransferStatus()); new LocalMoveFeature(session).move(test, target, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new LocalFindFeature(session).find(test)); assertTrue(new LocalFindFeature(session).find(target)); new LocalDeleteFeature(session).delete(Collections.<Path>singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static <T> void maybeMergeOptions(Properties props, String key, OptionSet options, OptionSpec<T> spec) { if (options.has(spec) || !props.containsKey(key)) { T value = options.valueOf(spec); if (value == null) { props.remove(key); } else { props.put(key, value.toString()); } } }
@Test public void testMaybeMergeOptionsNotOverwriteExisting() { setUpOptions(); props.put("skey", "existing-string"); props.put("ikey", "300"); props.put("sokey", "existing-string-2"); props.put("iokey", "400"); props.put("sondkey", "existing-string-3"); props.put("iondkey", "500"); OptionSet options = parser.parse(); CommandLineUtils.maybeMergeOptions(props, "skey", options, stringOpt); CommandLineUtils.maybeMergeOptions(props, "ikey", options, intOpt); CommandLineUtils.maybeMergeOptions(props, "sokey", options, stringOptOptionalArg); CommandLineUtils.maybeMergeOptions(props, "iokey", options, intOptOptionalArg); CommandLineUtils.maybeMergeOptions(props, "sondkey", options, stringOptOptionalArgNoDefault); CommandLineUtils.maybeMergeOptions(props, "iondkey", options, intOptOptionalArgNoDefault); assertEquals("existing-string", props.get("skey")); assertEquals("300", props.get("ikey")); assertEquals("existing-string-2", props.get("sokey")); assertEquals("400", props.get("iokey")); assertEquals("existing-string-3", props.get("sondkey")); assertEquals("500", props.get("iondkey")); }
ComponentKey createComponentKey(String projectKey, Map<String, String> characteristics) { boolean containsBranchCharacteristics = characteristics.keySet().stream().anyMatch(BRANCH_CHARACTERISTICS::contains); if (containsBranchCharacteristics) { checkState(delegate != null, "Current edition does not support branch feature"); return delegate.createComponentKey(projectKey, characteristics); } return new ComponentKeyImpl(projectKey); }
@Test public void createComponentKey_whenCharacteristicsIsRandom_returnsComponentKey() { String projectKey = randomAlphanumeric(12); Map<String, String> nonEmptyMap = newRandomNonEmptyMap(); ComponentKey componentKey = underTestWithBranch.createComponentKey(projectKey, nonEmptyMap); assertThat(componentKey).isEqualTo(underTestWithBranch.createComponentKey(projectKey, NO_CHARACTERISTICS)); assertThat(componentKey.getKey()).isEqualTo(projectKey); assertThat(componentKey.getBranchName()).isEmpty(); assertThat(componentKey.getPullRequestKey()).isEmpty(); verifyNoInteractions(branchSupportDelegate); }
public static ParamType getSchemaFromType(final Type type) { return getSchemaFromType(type, JAVA_TO_ARG_TYPE); }
@Test public void shouldGetBooleanSchemaForBooleanClass() { assertThat( UdfUtil.getSchemaFromType(Boolean.class), equalTo(ParamTypes.BOOLEAN) ); }
public static StatementExecutorResponse execute( final ConfiguredStatement<DescribeFunction> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final DescribeFunction describeFunction = statement.getStatement(); final FunctionName functionName = FunctionName.of(describeFunction.getFunctionName()); if (executionContext.getMetaStore().isAggregate(functionName)) { return StatementExecutorResponse.handled(Optional.of( describeAggregateFunction(executionContext, functionName, statement.getMaskedStatementText()))); } if (executionContext.getMetaStore().isTableFunction(functionName)) { return StatementExecutorResponse.handled(Optional.of( describeTableFunction(executionContext, functionName, statement.getMaskedStatementText()))); } return StatementExecutorResponse.handled(Optional.of( describeNonAggregateFunction(executionContext, functionName, statement.getMaskedStatementText()))); }
@Test public void shouldDescribeUDAFWithObjVarArgs() { // When: final FunctionDescriptionList functionList = (FunctionDescriptionList) CustomExecutors.DESCRIBE_FUNCTION.execute( engine.configure("DESCRIBE FUNCTION OBJ_COL_ARG;"), mock(SessionProperties.class), engine.getEngine(), engine.getServiceContext() ).getEntity().orElseThrow(IllegalStateException::new); // Then: assertThat(functionList, new TypeSafeMatcher<FunctionDescriptionList>() { @Override protected boolean matchesSafely(final FunctionDescriptionList item) { return functionList.getName().equals("OBJ_COL_ARG") && functionList.getType().equals(FunctionType.AGGREGATE); } @Override public void describeTo(final Description description) { description.appendText(functionList.getName()); } }); }
@Override public List<String> getServerList() { return serverList.isEmpty() ? serversFromEndpoint : serverList; }
@Test void testConstructWithEndpointWithCustomPathAndName() throws Exception { clientProperties.setProperty(PropertyKeyConst.CONTEXT_PATH, "aaa"); clientProperties.setProperty(PropertyKeyConst.ENDPOINT_CLUSTER_NAME, "bbb"); clientProperties.setProperty(PropertyKeyConst.ENDPOINT, "127.0.0.1"); Mockito.reset(nacosRestTemplate); Mockito.when(nacosRestTemplate.get(eq("http://127.0.0.1:8080/aaa/bbb"), any(), any(), any())) .thenReturn(httpRestResult); serverListManager = new ServerListManager(clientProperties, "test"); List<String> serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); }
@Override public int getMaximumPoolSize() { return maxPoolSize; }
@Test public void getMaximumPoolSize() { int maxPoolSize = 123; assertEquals(maxPoolSize, newManagedExecutorService(maxPoolSize, 1).getMaximumPoolSize()); }
public List<Pair<Long, Long>> getTransactionIdByCoordinateBe(String coordinateHost, int limit) { ArrayList<Pair<Long, Long>> txnInfos = new ArrayList<>(); readLock(); try { idToRunningTransactionState.values().stream() .filter(t -> (t.getCoordinator().sourceType == TransactionState.TxnSourceType.BE && t.getCoordinator().ip.equals(coordinateHost))) .limit(limit) .forEach(t -> txnInfos.add(new Pair<>(t.getDbId(), t.getTransactionId()))); } finally { readUnlock(); } return txnInfos; }
@Test public void testGetTransactionIdByCoordinateBe() throws UserException { DatabaseTransactionMgr masterDbTransMgr = masterTransMgr.getDatabaseTransactionMgr(GlobalStateMgrTestUtil.testDbId1); List<Pair<Long, Long>> transactionInfoList = masterDbTransMgr.getTransactionIdByCoordinateBe("be1", 10); assertEquals(6, transactionInfoList.size()); assertEquals(GlobalStateMgrTestUtil.testDbId1, transactionInfoList.get(0).first.longValue()); assertEquals(TransactionStatus.PREPARE, masterDbTransMgr.getTransactionState(transactionInfoList.get(0).second).getTransactionStatus()); }
public ReadOperation getReadOperation() { if (operations == null || operations.isEmpty()) { throw new IllegalStateException("Map task has no operation."); } Operation readOperation = operations.get(0); if (!(readOperation instanceof ReadOperation)) { throw new IllegalStateException("First operation in the map task is not a ReadOperation."); } return (ReadOperation) readOperation; }
@Test public void testValidOperations() throws Exception { TestOutputReceiver receiver = new TestOutputReceiver(counterSet, NameContextsForTests.nameContextForTest()); List<Operation> operations = Arrays.<Operation>asList(new TestReadOperation(receiver, createContext("ReadOperation"))); ExecutionStateTracker stateTracker = ExecutionStateTracker.newForTest(); try (MapTaskExecutor executor = new MapTaskExecutor(operations, counterSet, stateTracker)) { Assert.assertEquals(operations.get(0), executor.getReadOperation()); } }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command( config, MigrationsUtil::getKsqlClient, getMigrationsDir(getConfigFile(), config), Clock.systemDefaultZone() ); }
@Test public void shouldApplyArgumentVariablesEveryMigration() throws Exception { // Given: command = PARSER.parse("-a", "-d", "name=tame", "-d", "dame=blame"); createMigrationFile(1, NAME, migrationsDir, "INSERT INTO FOO VALUES ('${name}');"); createMigrationFile(2, NAME, migrationsDir, "INSERT INTO FOO VALUES ('${dame}');"); when(versionQueryResult.get()).thenReturn(ImmutableList.of()); when(ksqlClient.getVariables()).thenReturn( ImmutableMap.of("name", "tame", "dame", "blame") ); givenAppliedMigration(1, NAME, MigrationState.MIGRATED); // When: final int result = command.command(config, (cfg, headers) -> ksqlClient, migrationsDir, Clock.fixed( Instant.ofEpochMilli(1000), ZoneId.systemDefault())); // Then: assertThat(result, is(0)); final InOrder inOrder = inOrder(ksqlClient); inOrder.verify(ksqlClient).insertInto("`FOO`", new KsqlObject(ImmutableMap.of("`A`", "tame"))); inOrder.verify(ksqlClient).insertInto("`FOO`", new KsqlObject(ImmutableMap.of("`A`", "blame"))); inOrder.verify(ksqlClient).close(); inOrder.verifyNoMoreInteractions(); }
private ArrayAccess() { }
@Test public void shouldSupportNegativeIndex() { // Given: final List<Integer> list = ImmutableList.of(1, 2); // When: final Integer access = ArrayAccess.arrayAccess(list, -1); // Then: assertThat(access, is(2)); }
@Override protected Mono<Void> doExecute(final ServerWebExchange exchange, final ShenyuPluginChain chain, final SelectorData selector, final RuleData rule) { JwtConfig jwtConfig = Singleton.INST.get(JwtConfig.class); String authorization = exchange.getRequest().getHeaders().getFirst(HttpHeaders.AUTHORIZATION); String token = exchange.getRequest().getHeaders().getFirst(TOKEN); // check secreteKey if (StringUtils.isEmpty(jwtConfig.getSecretKey())) { Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.SECRET_KEY_MUST_BE_CONFIGURED); return WebFluxResultUtils.result(exchange, error); } // compatible processing String finalAuthorization = compatible(token, authorization); Map<String, Object> jwtBody = checkAuthorization(finalAuthorization, jwtConfig.getSecretKey()); if (Objects.isNull(jwtBody)) { Object error = ShenyuResultWrap.error(exchange, ShenyuResultEnum.ERROR_TOKEN); return WebFluxResultUtils.result(exchange, error); } return chain.execute(executeRuleHandle(rule, exchange, jwtBody)); }
@Test public void testDoExecute() { ruleData.setHandle("{\"converter\":[{\"jwtVal\":\"userId\",\"headerVal\":\"id\"}]}"); jwtPluginDataHandlerUnderTest.handlerRule(ruleData); when(this.chain.execute(any())).thenReturn(Mono.empty()); Mono<Void> mono = jwtPluginUnderTest.doExecute(exchange, chain, selectorData, ruleData); StepVerifier.create(mono).expectSubscription().verifyComplete(); verify(chain) .execute(argThat(exchange -> hasHeader(exchange, "id", "1"))); }
@Override public AppSettings load() { Properties p = loadPropertiesFile(homeDir); Set<String> keysOverridableFromEnv = stream(ProcessProperties.Property.values()).map(ProcessProperties.Property::getKey) .collect(Collectors.toSet()); keysOverridableFromEnv.addAll(p.stringPropertyNames()); // 1st pass to load static properties Props staticProps = reloadProperties(keysOverridableFromEnv, p); keysOverridableFromEnv.addAll(getDynamicPropertiesKeys(staticProps)); // 2nd pass to load dynamic properties like `ldap.*.url` or `ldap.*.baseDn` which keys depend on values of static // properties loaded in 1st step Props props = reloadProperties(keysOverridableFromEnv, p); new ProcessProperties(serviceLoaderWrapper).completeDefaults(props); stream(consumers).forEach(c -> c.accept(props)); return new AppSettingsImpl(props); }
@Test public void throws_ISE_if_file_fails_to_be_loaded() throws Exception { File homeDir = temp.newFolder(); File propsFileAsDir = new File(homeDir, "conf/sonar.properties"); FileUtils.forceMkdir(propsFileAsDir); AppSettingsLoaderImpl underTest = new AppSettingsLoaderImpl(system, new String[0], homeDir, serviceLoaderWrapper); assertThatThrownBy(() -> underTest.load()) .isInstanceOf(IllegalStateException.class) .hasMessage("Cannot open file " + propsFileAsDir.getAbsolutePath()); }
boolean sendRecords() { int processed = 0; recordBatch(toSend.size()); final SourceRecordWriteCounter counter = toSend.isEmpty() ? null : new SourceRecordWriteCounter(toSend.size(), sourceTaskMetricsGroup); for (final SourceRecord preTransformRecord : toSend) { ProcessingContext<SourceRecord> context = new ProcessingContext<>(preTransformRecord); final SourceRecord record = transformationChain.apply(context, preTransformRecord); final ProducerRecord<byte[], byte[]> producerRecord = convertTransformedRecord(context, record); if (producerRecord == null || context.failed()) { counter.skipRecord(); recordDropped(preTransformRecord); processed++; continue; } log.trace("{} Appending record to the topic {} with key {}, value {}", this, record.topic(), record.key(), record.value()); Optional<SubmittedRecords.SubmittedRecord> submittedRecord = prepareToSendRecord(preTransformRecord, producerRecord); try { final String topic = producerRecord.topic(); maybeCreateTopic(topic); producer.send( producerRecord, (recordMetadata, e) -> { if (e != null) { if (producerClosed) { log.trace("{} failed to send record to {}; this is expected as the producer has already been closed", AbstractWorkerSourceTask.this, topic, e); } else { log.error("{} failed to send record to {}: ", AbstractWorkerSourceTask.this, topic, e); } log.trace("{} Failed record: {}", AbstractWorkerSourceTask.this, preTransformRecord); producerSendFailed(context, false, producerRecord, preTransformRecord, e); if (retryWithToleranceOperator.getErrorToleranceType() == ToleranceType.ALL) { counter.skipRecord(); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); } } else { counter.completeRecord(); log.trace("{} Wrote record successfully: topic {} partition {} offset {}", AbstractWorkerSourceTask.this, recordMetadata.topic(), recordMetadata.partition(), recordMetadata.offset()); recordSent(preTransformRecord, producerRecord, recordMetadata); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::ack); if (topicTrackingEnabled) { recordActiveTopic(producerRecord.topic()); } } }); // Note that this will cause retries to take place within a transaction } catch (RetriableException | org.apache.kafka.common.errors.RetriableException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}'. Backing off before retrying: ", this, producerRecord.topic(), producerRecord.partition(), e); toSend = toSend.subList(processed, toSend.size()); submittedRecord.ifPresent(SubmittedRecords.SubmittedRecord::drop); counter.retryRemaining(); return false; } catch (ConnectException e) { log.warn("{} Failed to send record to topic '{}' and partition '{}' due to an unrecoverable exception: ", this, producerRecord.topic(), producerRecord.partition(), e); log.trace("{} Failed to send {} with unrecoverable exception: ", this, producerRecord, e); throw e; } catch (KafkaException e) { producerSendFailed(context, true, producerRecord, preTransformRecord, e); } processed++; recordDispatched(preTransformRecord); } toSend = null; batchDispatched(); return true; }
@Test public void testTopicCreateSucceedsWhenCreateReturnsExistingTopicFound() { createWorkerTask(); SourceRecord record1 = new SourceRecord(PARTITION, OFFSET, TOPIC, 1, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); expectSendRecord(emptyHeaders()); expectApplyTransformationChain(); when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(foundTopic(TOPIC)); workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); ArgumentCaptor<ProducerRecord<byte[], byte[]>> sent = verifySendRecord(2); List<ProducerRecord<byte[], byte[]>> capturedValues = sent.getAllValues(); assertEquals(2, capturedValues.size()); verifyTaskGetTopic(2); verifyTopicCreation(); }
public static SmtpCommand valueOf(CharSequence commandName) { ObjectUtil.checkNotNull(commandName, "commandName"); SmtpCommand command = COMMANDS.get(commandName.toString()); return command != null ? command : new SmtpCommand(AsciiString.of(commandName)); }
@Test public void getCommandFromCache() { assertSame(SmtpCommand.DATA, SmtpCommand.valueOf("DATA")); assertSame(SmtpCommand.EHLO, SmtpCommand.valueOf("EHLO")); assertNotSame(SmtpCommand.EHLO, SmtpCommand.valueOf("ehlo")); }
public List<String> asList() { return asList(String.class); }
@Test void asList_returns_list_of_raw() { DataTable table = createSingleColumnNumberTable(); assertEquals(asList("1", "2"), table.asList()); }
public double generateNewOptimalDelay() { final double x = uniformRandom(randMax) + baseX; return constantT * Math.log(x * factorT); }
@Test void shouldNotExceedTmaxBackoff() { final OptimalMulticastDelayGenerator generator = new OptimalMulticastDelayGenerator(MAX_BACKOFF, GROUP_SIZE); for (int i = 0; i < 100_000; i++) { final double delay = generator.generateNewOptimalDelay(); assertThat(delay, lessThanOrEqualTo((double)MAX_BACKOFF)); } }
public boolean isLocalApplicable() { return joinMode.equals(BROADCAST) || joinMode.equals(COLOCATE) || joinMode.equals(LOCAL_HASH_BUCKET) || joinMode.equals(SHUFFLE_HASH_BUCKET) || joinMode.equals(REPLICATED); }
@Test public void testIsLocalApplicable() throws IOException { ConnectContext ctx = UtFrameUtils.createDefaultCtx(); Object[][] testCases = new Object[][]{ {JoinNode.DistributionMode.BROADCAST, true}, {JoinNode.DistributionMode.COLOCATE, true}, {JoinNode.DistributionMode.LOCAL_HASH_BUCKET, true}, {JoinNode.DistributionMode.SHUFFLE_HASH_BUCKET, true}, {JoinNode.DistributionMode.PARTITIONED, false}, {JoinNode.DistributionMode.REPLICATED, true}, }; for (Object[] tc : testCases) { JoinNode.DistributionMode joinMode = (JoinNode.DistributionMode) tc[0]; Boolean expect = (Boolean) tc[1]; RuntimeFilterDescription rf = new RuntimeFilterDescription(ctx.getSessionVariable()); rf.setJoinMode(joinMode); Assert.assertEquals(rf.isLocalApplicable(), expect); } }