focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void persistInstance(final InstanceEntity instance) { final InstanceRegisterRequest req = new InstanceRegisterRequest(); req.setInstanceId(buildInstanceNodeName(instance)); req.setWeight(1); req.setHost(instance.getHost()); req.setPort(instance.getPort()); req.setService(instance.getAppName()); req.setNamespace(namespace); try { providerAPI.registerInstance(req); } catch (PolarisException e) { throw new ShenyuException(e); } }
@Test public void testPersistInstance() { InstanceEntity data = InstanceEntity.builder() .appName("shenyu-test") .host("shenyu-host") .port(9195) .build(); final String key = "shenyu-test"; repository.persistInstance(data); assertTrue(storage.containsKey(key)); final Instance instance = storage.get(key); assertEquals(data.getHost(), instance.getHost()); assertEquals(data.getPort(), instance.getPort()); assertEquals(data.getAppName(), instance.getService()); repository.close(); }
public static void main(String[] args) throws Exception { final var dataSource = createDataSource(); deleteSchema(dataSource); createSchema(dataSource); final var dao = new HotelDaoImpl(dataSource); // Add rooms addRooms(dao); // Print room booking status getRoomStatus(dao); var hotel = new Hotel(dao); // Book rooms hotel.bookRoom(1); hotel.bookRoom(2); hotel.bookRoom(3); hotel.bookRoom(4); hotel.bookRoom(5); hotel.bookRoom(6); // Cancel booking for a few rooms hotel.cancelRoomBooking(1); hotel.cancelRoomBooking(3); hotel.cancelRoomBooking(5); getRoomStatus(dao); deleteSchema(dataSource); }
@Test void shouldExecuteWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public static String toSvgName( String name ) { if ( isPngName( name ) ) { name = name.substring( 0, name.length() - 4 ) + SVG_EXTENSION; } return name; }
@Test public void testToSvgName() throws Exception { assertTrue( SvgSupport.isSvgName( "my_file.svg" ) ); assertTrue( SvgSupport.isSvgName( "my_file.SVG" ) ); assertTrue( SvgSupport.isSvgName( ".svg" ) ); assertFalse( SvgSupport.isSvgName( "svg" ) ); assertFalse( SvgSupport.isSvgName( "myFile.png" ) ); assertEquals( "myFile.svg", SvgSupport.toSvgName( "myFile.png" ) ); }
public static <K, V> Printed<K, V> toFile(final String filePath) { Objects.requireNonNull(filePath, "filePath can't be null"); if (Utils.isBlank(filePath)) { throw new TopologyException("filePath can't be an empty string"); } try { return new Printed<>(Files.newOutputStream(Paths.get(filePath))); } catch (final IOException e) { throw new TopologyException("Unable to write stream to file at [" + filePath + "] " + e.getMessage()); } }
@Test public void shouldThrowTopologyExceptionIfFilePathDoesntExist() { assertThrows(TopologyException.class, () -> Printed.toFile("/this/should/not/exist")); }
@Override public long getCompletedTaskCount() { return executedCount; }
@Test public void getCompletedTaskCount_whenTasksSubmitted() { final int taskCount = 10; final ManagedExecutorService executorService = newManagedExecutorService(); for (int i = 0; i < taskCount; i++) { executeNopTask(executorService); } assertTrueEventually(() -> assertEquals(taskCount, executorService.getCompletedTaskCount())); }
@Override public void accept(final MeterEntity entity, final DataTable value) { setEntityId(entity.id()); setServiceId(entity.serviceId()); this.value.setMaxValue(value); }
@Test public void testAccept() { function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1); assertThat(function.getValue()).isEqualTo(HTTP_CODE_COUNT_1); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2); assertThat(function.getValue()).isEqualTo(HTTP_CODE_COUNT_3); }
private static Map<String, Set<Dependency>> checkOptionalFlags( Map<String, Set<Dependency>> bundledDependenciesByModule, Map<String, DependencyTree> dependenciesByModule) { final Map<String, Set<Dependency>> allViolations = new HashMap<>(); for (String module : bundledDependenciesByModule.keySet()) { LOG.debug("Checking module '{}'.", module); if (!dependenciesByModule.containsKey(module)) { throw new IllegalStateException( String.format( "Module %s listed by shade-plugin, but not dependency-plugin.", module)); } final Collection<Dependency> bundledDependencies = bundledDependenciesByModule.get(module); final DependencyTree dependencyTree = dependenciesByModule.get(module); final Set<Dependency> violations = checkOptionalFlags(module, bundledDependencies, dependencyTree); if (violations.isEmpty()) { LOG.info("OK: {}", module); } else { allViolations.put(module, violations); } } return allViolations; }
@Test void testTransitiveBundledDependencyMayNotBeOptionalIfParentHasTestScope() { final Dependency dependencyA = createTestDependency("a"); final Dependency dependencyB = createMandatoryDependency("b"); final Set<Dependency> bundled = Collections.singleton(dependencyB); final DependencyTree dependencyTree = new DependencyTree() .addDirectDependency(dependencyA) .addTransitiveDependencyTo(dependencyB, dependencyA); final Set<Dependency> violations = ShadeOptionalChecker.checkOptionalFlags(MODULE, bundled, dependencyTree); assertThat(violations).isEmpty(); }
CircuitBreakerRegistry createCircuitBreakerRegistry( CircuitBreakerConfigurationProperties circuitBreakerProperties, RegistryEventConsumer<CircuitBreaker> circuitBreakerRegistryEventConsumer, CompositeCustomizer<CircuitBreakerConfigCustomizer> customizerMap) { Map<String, CircuitBreakerConfig> configs = circuitBreakerProperties.getConfigs() .entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, entry -> circuitBreakerProperties .createCircuitBreakerConfig(entry.getKey(), entry.getValue(), customizerMap))); return CircuitBreakerRegistry.of(configs, circuitBreakerRegistryEventConsumer, Map.copyOf(circuitBreakerProperties.getTags())); }
@Test public void testCreateCircuitBreakerRegistry() { InstanceProperties instanceProperties1 = new InstanceProperties(); instanceProperties1.setSlidingWindowSize(1000); InstanceProperties instanceProperties2 = new InstanceProperties(); instanceProperties2.setSlidingWindowSize(1337); CircuitBreakerConfigurationProperties circuitBreakerConfigurationProperties = new CircuitBreakerConfigurationProperties(); circuitBreakerConfigurationProperties.setCircuitBreakerAspectOrder(400); circuitBreakerConfigurationProperties.getInstances().put("backend1", instanceProperties1); circuitBreakerConfigurationProperties.getInstances().put("backend2", instanceProperties2); CircuitBreakerConfiguration circuitBreakerConfiguration = new CircuitBreakerConfiguration( circuitBreakerConfigurationProperties); DefaultEventConsumerRegistry<CircuitBreakerEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); CircuitBreakerRegistry circuitBreakerRegistry = circuitBreakerConfiguration .circuitBreakerRegistry(eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeCircuitBreakerCustomizerTestInstance()); assertThat(circuitBreakerConfigurationProperties.getCircuitBreakerAspectOrder()) .isEqualTo(400); assertThat(circuitBreakerRegistry.getAllCircuitBreakers().size()).isEqualTo(2); CircuitBreaker circuitBreaker1 = circuitBreakerRegistry.circuitBreaker("backend1"); assertThat(circuitBreaker1).isNotNull(); assertThat(circuitBreaker1.getCircuitBreakerConfig().getSlidingWindowSize()) .isEqualTo(1000); CircuitBreaker circuitBreaker2 = circuitBreakerRegistry.circuitBreaker("backend2"); assertThat(circuitBreaker2).isNotNull(); assertThat(circuitBreaker2.getCircuitBreakerConfig().getSlidingWindowSize()) .isEqualTo(1337); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(2); }
public static boolean isNotBlank(String str) { return !isBlank(str); }
@Test public void testIsNotBlank() { assertThat(StringUtils.isNotBlank(null)).isFalse(); assertThat(StringUtils.isNotBlank("abc")).isTrue(); assertThat(StringUtils.isNotBlank("")).isFalse(); assertThat(StringUtils.isNotBlank(" ")).isFalse(); }
@Override public boolean createEmptyObject(String key) { try { GSObject obj = new GSObject(key); obj.setDataInputStream(new ByteArrayInputStream(new byte[0])); obj.setContentLength(0); obj.setMd5Hash(DIR_HASH); obj.setContentType(Mimetypes.MIMETYPE_BINARY_OCTET_STREAM); mClient.putObject(mBucketName, obj); return true; } catch (ServiceException e) { LOG.error("Failed to create directory: {}", key, e); return false; } }
@Test public void testCreateEmptyObject() throws ServiceException { // test successful create empty object Mockito.when(mClient.putObject(ArgumentMatchers.anyString(), ArgumentMatchers.any(GSObject.class))).thenReturn(null); boolean result = mGCSUnderFileSystem.createEmptyObject(KEY); Assert.assertTrue(result); // test create empty object exception Mockito.when(mClient.putObject(ArgumentMatchers.anyString(), ArgumentMatchers.any(GSObject.class))).thenThrow(ServiceException.class); try { mGCSUnderFileSystem.createEmptyObject(KEY); } catch (Exception e) { Assert.assertTrue(e instanceof ServiceException); } }
public static String pageSql(String sourceSql, String dbType, int pageNum, int pageSize) { switch (dbType) { case "mysql": case "h2": case "postgresql": case "oceanbase": case "dm": return LIMIT_TEMPLATE.replace(SOURCE_SQL_PLACE_HOLD, sourceSql) .replace(LIMIT_PLACE_HOLD, String.valueOf(pageSize)) .replace(OFFSET_PLACE_HOLD, String.valueOf((pageNum - 1) * pageSize)); case "oracle": return ORACLE_PAGE_TEMPLATE.replace(SOURCE_SQL_PLACE_HOLD, sourceSql) .replace(START_PLACE_HOLD, String.valueOf(pageSize * (pageNum - 1) + 1)) .replace(END_PLACE_HOLD, String.valueOf(pageSize * pageNum)); case "sqlserver": return SQLSERVER_PAGE_TEMPLATE.replace(SOURCE_SQL_PLACE_HOLD, sourceSql) .replace(START_PLACE_HOLD, String.valueOf(pageSize * (pageNum - 1) + 1)) .replace(END_PLACE_HOLD, String.valueOf(pageSize * pageNum)); default: throw new NotSupportYetException("PageUtil not support this dbType:" + dbType); } }
@Test public void testPageSql() { String sourceSql = "select * from test where a = 1"; String mysqlTargetSql = "select * from test where a = 1 limit 5 offset 0"; String oracleTargetSql = "select * from " + "( select ROWNUM rn, temp.* from (select * from test where a = 1) temp )" + " where rn between 1 and 5"; String sqlserverTargetSql = "select * from (select temp.*, ROW_NUMBER() OVER(ORDER BY gmt_create desc) AS rowId from (select * from test where a = 1) temp ) t where t.rowId between 1 and 5"; assertEquals(PageUtil.pageSql(sourceSql, "mysql", 1, 5), mysqlTargetSql); assertEquals(PageUtil.pageSql(sourceSql, "h2", 1, 5), mysqlTargetSql); assertEquals(PageUtil.pageSql(sourceSql, "postgresql", 1, 5), mysqlTargetSql); assertEquals(PageUtil.pageSql(sourceSql, "oceanbase", 1, 5), mysqlTargetSql); assertEquals(PageUtil.pageSql(sourceSql, "dm", 1, 5), mysqlTargetSql); assertEquals(PageUtil.pageSql(sourceSql, "oracle", 1, 5), oracleTargetSql); assertEquals(PageUtil.pageSql(sourceSql, "sqlserver", 1, 5), sqlserverTargetSql); assertThrows(NotSupportYetException.class, () -> PageUtil.pageSql(sourceSql, "xxx", 1, 5)); }
@Override public Set<String> getOutputResourceFields( ExcelOutputMeta meta ) { Set<String> fields = new HashSet<>(); ExcelField[] outputFields = meta.getOutputFields(); for ( int i = 0; i < outputFields.length; i++ ) { ExcelField outputField = outputFields[ i ]; fields.add( outputField.getName() ); } return fields; }
@Test public void testGetOutputResourceFields() throws Exception { ExcelField[] outputFields = new ExcelField[2]; ExcelField field1 = mock( ExcelField.class ); ExcelField field2 = mock( ExcelField.class ); outputFields[0] = field1; outputFields[1] = field2; when( field1.getName() ).thenReturn( "field1" ); when( field2.getName() ).thenReturn( "field2" ); when( meta.getOutputFields() ).thenReturn( outputFields ); Set<String> outputResourceFields = analyzer.getOutputResourceFields( meta ); assertEquals( outputFields.length, outputResourceFields.size() ); for ( ExcelField outputField : outputFields ) { assertTrue( outputResourceFields.contains( outputField.getName() ) ); } }
@CanIgnoreReturnValue public final Ordered containsExactly(@Nullable Object @Nullable ... varargs) { List<@Nullable Object> expected = (varargs == null) ? newArrayList((@Nullable Object) null) : asList(varargs); return containsExactlyElementsIn( expected, varargs != null && varargs.length == 1 && varargs[0] instanceof Iterable); }
@Test public void iterableContainsExactlyInOrder() { assertThat(asList(3, 2, 5)).containsExactly(3, 2, 5).inOrder(); }
public static void updateTableStatsSlow(Database db, Table tbl, Warehouse wh, boolean newDir, boolean forceRecompute, EnvironmentContext environmentContext) throws MetaException { // DO_NOT_UPDATE_STATS is supposed to be a transient parameter that is only passed via RPC // We want to avoid this property from being persistent. // // NOTE: If this property *is* set as table property we will remove it which is incorrect but // we can't distinguish between these two cases // // This problem was introduced by HIVE-10228. A better approach would be to pass the property // via the environment context. Map<String,String> params = tbl.getParameters(); boolean updateStats = true; if ((params != null) && params.containsKey(StatsSetupConst.DO_NOT_UPDATE_STATS)) { updateStats = !Boolean.valueOf(params.get(StatsSetupConst.DO_NOT_UPDATE_STATS)); params.remove(StatsSetupConst.DO_NOT_UPDATE_STATS); } if (!updateStats || newDir || tbl.getPartitionKeysSize() != 0) { return; } // If stats are already present and forceRecompute isn't set, nothing to do if (!forceRecompute && params != null && containsAllFastStats(params)) { return; } if (params == null) { params = new HashMap<>(); tbl.setParameters(params); } // The table location already exists and may contain data. // Let's try to populate those stats that don't require full scan. boolean populateQuickStats = !((environmentContext != null) && environmentContext.isSetProperties() && StatsSetupConst.TRUE.equals(environmentContext.getProperties() .get(StatsSetupConst.DO_NOT_POPULATE_QUICK_STATS))); if (populateQuickStats) { // NOTE: wh.getFileStatusesForUnpartitionedTable() can be REALLY slow List<FileStatus> fileStatus = wh.getFileStatusesForUnpartitionedTable(db, tbl); LOG.info("Updating table stats for {}", tbl.getTableName()); populateQuickStats(fileStatus, params); } LOG.info("Updated size of table {} to {}", tbl.getTableName(), params.get(StatsSetupConst.TOTAL_SIZE)); if (environmentContext != null && environmentContext.isSetProperties() && StatsSetupConst.TASK.equals(environmentContext.getProperties().get( StatsSetupConst.STATS_GENERATED))) { StatsSetupConst.setBasicStatsState(params, StatsSetupConst.TRUE); } else { StatsSetupConst.setBasicStatsState(params, StatsSetupConst.FALSE); } }
@Test public void testUpdateTableStatsSlow_removesDoNotUpdateStats() throws TException { // Create database and table Table tbl = new TableBuilder() .setDbName(DB_NAME) .setTableName(TABLE_NAME) .addCol("id", "int") .addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "true") .build(null); Table tbl1 = new TableBuilder() .setDbName(DB_NAME) .setTableName(TABLE_NAME) .addCol("id", "int") .addTableParam(StatsSetupConst.DO_NOT_UPDATE_STATS, "false") .build(null); Warehouse wh = mock(Warehouse.class); MetaStoreServerUtils.updateTableStatsSlow(db, tbl, wh, false, true, null); assertThat(tbl.getParameters(), is(Collections.emptyMap())); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl); MetaStoreServerUtils.updateTableStatsSlow(db, tbl1, wh, true, false, null); assertThat(tbl.getParameters(), is(Collections.emptyMap())); verify(wh, never()).getFileStatusesForUnpartitionedTable(db, tbl1); }
static String validatePathName(String toValidate) { String fileName = getFileName(toValidate); if (!fileName.endsWith(FINAL_SUFFIX)) { throw new KieEfestoCommonException("Wrong file name " + fileName); } String model = getModel(fileName); if (model.isEmpty()) { throw new KieEfestoCommonException("Wrong file name " + fileName); } return toValidate; }
@Test void validatePathName() { String toValidate = String.format("%1$sthis%1$sis%1$svalid%1$sfile.model_json", File.separator); assertThat(IndexFile.validatePathName(toValidate)).isEqualTo(toValidate); }
@Override public void configure(Configuration parameters) { super.configure(parameters); // the if() clauses are to prevent the configure() method from // overwriting the values set by the setters if (Arrays.equals(delimiter, new byte[] {'\n'})) { String delimString = parameters.getString(RECORD_DELIMITER, null); if (delimString != null) { setDelimiter(delimString); } } // set the number of samples if (numLineSamples == NUM_SAMPLES_UNDEFINED) { String samplesString = parameters.getString(NUM_STATISTICS_SAMPLES, null); if (samplesString != null) { try { setNumLineSamples(Integer.parseInt(samplesString)); } catch (NumberFormatException e) { if (LOG.isWarnEnabled()) { LOG.warn( "Invalid value for number of samples to take: " + samplesString + ". Skipping sampling."); } setNumLineSamples(0); } } } }
@Test void testConfigure() { Configuration cfg = new Configuration(); cfg.setString("delimited-format.delimiter", "\n"); format.configure(cfg); assertThat(new String(format.getDelimiter(), format.getCharset())).isEqualTo("\n"); cfg.setString("delimited-format.delimiter", "&-&"); format.configure(cfg); assertThat(new String(format.getDelimiter(), format.getCharset())).isEqualTo("&-&"); }
public List<PluginDO> getPlugins() { return plugins; }
@Test void getPlugins() { List<PluginDO> plugins = batchSelectorDeletedEvent.getPlugins(); assertArrayEquals(pluginDOList.toArray(new PluginDO[0]), plugins.toArray(new PluginDO[0])); }
public Type getGELFType() { if (payload.length < Type.HEADER_SIZE) { throw new IllegalStateException("GELF message is too short. Not even the type header would fit."); } return Type.determineType(payload[0], payload[1]); }
@Test public void testGetGELFTypeDetectsZLIBCompressedMessage() throws Exception { byte[] fakeData = new byte[20]; fakeData[0] = (byte) 0x78; fakeData[1] = (byte) 0x9c; GELFMessage msg = new GELFMessage(fakeData); assertEquals(GELFMessage.Type.ZLIB, msg.getGELFType()); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { ByteBuf newlyByteBuf = payload.getByteBuf().readBytes(readLengthFromMeta(columnDef.getColumnMeta(), payload)); try { return MySQLJsonValueDecoder.decode(newlyByteBuf); } finally { newlyByteBuf.release(); } }
@Test void assertReadJsonValueWithMeta4() { columnDef.setColumnMeta(4); when(payload.readInt4()).thenReturn(4); when(byteBuf.readBytes(4)).thenReturn(jsonValueByteBuf); assertThat(new MySQLJsonBinlogProtocolValue().read(columnDef, payload), is(EXPECTED_JSON)); }
public static boolean isNumber(String value) { try { Double.parseDouble(value); } catch (NumberFormatException nfe) { return false; } return true; }
@Test public void testIsNumber() { Assert.assertTrue(StringKit.isNumber("20")); Assert.assertTrue(StringKit.isNumber("20.1")); Assert.assertFalse(StringKit.isNumber("abc")); Assert.assertFalse(StringKit.isNumber("21w")); }
public Map<String, MetaProperties> logDirProps() { return logDirProps; }
@Test public void testLogDirPropsForFoo() { assertEquals(new HashSet<>(Arrays.asList("/tmp/dir4", "/tmp/dir5")), FOO.logDirProps().keySet()); }
@Override protected Sextet<String[], String, String, ApiHttpMethodEnum[], RpcTypeEnum, String> buildApiDocSextet(final Method method, final Annotation annotation, final Map<String, Object> beans) { ShenyuSpringWebSocketClient shenyuSpringWebSocketClient = AnnotatedElementUtils.findMergedAnnotation(method, ShenyuSpringWebSocketClient.class); if (Objects.isNull(shenyuSpringWebSocketClient)) { return null; } String produce = ShenyuClientConstants.MEDIA_TYPE_ALL_VALUE; String consume = ShenyuClientConstants.MEDIA_TYPE_ALL_VALUE; String[] values = new String[]{shenyuSpringWebSocketClient.value()}; ApiHttpMethodEnum[] apiHttpMethodEnums = new ApiHttpMethodEnum[]{ApiHttpMethodEnum.NOT_HTTP}; String version = "v0.01"; return Sextet.with(values, consume, produce, apiHttpMethodEnums, RpcTypeEnum.WEB_SOCKET, version); }
@Test public void testBuildApiDocSextet() throws NoSuchMethodException { Method method = MockClass.class.getDeclaredMethod("mockMethod"); ReflectionUtils.makeAccessible(method); assertNull(eventListener.buildApiDocSextet(method, mock(Annotation.class), Collections.emptyMap())); }
@Retries.RetryTranslated public void retry(String action, String path, boolean idempotent, Retried retrying, InvocationRaisingIOE operation) throws IOException { retry(action, path, idempotent, retrying, () -> { operation.apply(); return null; }); }
@Test(expected = AWSBadRequestException.class) public void testRetryBadRequestNotIdempotent() throws Throwable { invoker.retry("test", null, false, () -> { throw BAD_REQUEST; }); }
public void createNewCodeDefinition(DbSession dbSession, String projectUuid, String mainBranchUuid, String defaultBranchName, String newCodeDefinitionType, @Nullable String newCodeDefinitionValue) { boolean isCommunityEdition = editionProvider.get().filter(EditionProvider.Edition.COMMUNITY::equals).isPresent(); NewCodePeriodType newCodePeriodType = parseNewCodeDefinitionType(newCodeDefinitionType); NewCodePeriodDto dto = new NewCodePeriodDto(); dto.setType(newCodePeriodType); dto.setProjectUuid(projectUuid); if (isCommunityEdition) { dto.setBranchUuid(mainBranchUuid); } getNewCodeDefinitionValueProjectCreation(newCodePeriodType, newCodeDefinitionValue, defaultBranchName).ifPresent(dto::setValue); if (!CaycUtils.isNewCodePeriodCompliant(dto.getType(), dto.getValue())) { throw new IllegalArgumentException("Failed to set the New Code Definition. The given value is not compatible with the Clean as You Code methodology. " + "Please refer to the documentation for compliant options."); } dbClient.newCodePeriodDao().insert(dbSession, dto); }
@Test public void createNewCodeDefinition_throw_IAE_if_no_value_for_days() { assertThatThrownBy(() -> newCodeDefinitionResolver.createNewCodeDefinition(dbSession, DEFAULT_PROJECT_ID, MAIN_BRANCH_UUID, MAIN_BRANCH, NUMBER_OF_DAYS.name(), null)) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("New code definition type 'NUMBER_OF_DAYS' requires a newCodeDefinitionValue"); }
public V computeIfAbsent(CharSequence key, Supplier<V> valueSupplier) { return computeIfAbsent(key, ignored -> valueSupplier.get()); }
@Test public void testComputeIfAbsent() { CharSequenceMap<String> map = CharSequenceMap.create(); String result1 = map.computeIfAbsent("key1", key -> "computedValue1"); assertThat(result1).isEqualTo("computedValue1"); assertThat(map).containsEntry("key1", "computedValue1"); // verify existing key is not affected String result2 = map.computeIfAbsent("key1", key -> "newValue"); assertThat(result2).isEqualTo("computedValue1"); assertThat(map).containsEntry("key1", "computedValue1"); }
@Override public List<String> getServerList() { return serverList.isEmpty() ? serversFromEndpoint : serverList; }
@Test void testConstructWithEndpointAndRefreshException() throws InvocationTargetException, NoSuchMethodException, IllegalAccessException, NoSuchFieldException { Properties properties = new Properties(); properties.put(PropertyKeyConst.ENDPOINT, "127.0.0.1"); serverListManager = new ServerListManager(properties); List<String> serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); httpRestResult.setCode(500); mockThreadInvoke(serverListManager, true); serverList = serverListManager.getServerList(); assertEquals(1, serverList.size()); assertEquals("127.0.0.1:8848", serverList.get(0)); }
public synchronized void destroy() throws IOException { unlock(); if (file.exists() && file.delete()) { LOGGER.trace("Deleted {}", file.getAbsolutePath()); } channel.close(); }
@Test void testDestroy() throws IOException { File tempFile = TestUtils.tempFile(); FileLock lock1 = new FileLock(tempFile); lock1.destroy(); assertFalse(tempFile.exists()); assertDoesNotThrow(lock1::destroy); }
@Udf public String concat(@UdfParameter( description = "The varchar fields to concatenate") final String... inputs) { if (inputs == null) { return null; } return Arrays.stream(inputs) .filter(Objects::nonNull) .collect(Collectors.joining()); }
@Test public void shouldReturnEmptyIfAllInputsNull() { assertThat(udf.concat((String) null, null), is("")); assertThat(udf.concat((ByteBuffer) null, null), is(ByteBuffer.wrap(new byte[] {}))); }
public List<R> scanForResourcesInClasspathRoot(URI root, Predicate<String> packageFilter) { requireNonNull(root, "root must not be null"); requireNonNull(packageFilter, "packageFilter must not be null"); BiFunction<Path, Path, Resource> createResource = createClasspathRootResource(); return findResourcesForUri(root, DEFAULT_PACKAGE_NAME, packageFilter, createResource); }
@Test void scanForResourcesInClasspathRootJar() { URI classpathRoot = new File("src/test/resources/io/cucumber/core/resource/test/jar-resource.jar").toURI(); List<URI> resources = resourceScanner.scanForResourcesInClasspathRoot(classpathRoot, aPackage -> true); assertThat(resources, containsInAnyOrder( URI.create("classpath:jar-resource.txt"), URI.create("classpath:com/example/package-jar-resource.txt"))); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldShowHintWhenFailingToCreateQueryIfSelectingFromSourceNameWithQuotes() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream \"bar\" as select * from test1;", ksqlConfig, Collections.emptyMap() ); // When: final KsqlStatementException e = assertThrows( KsqlStatementException.class, () -> KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "select * from bar;", ksqlConfig, Collections.emptyMap() ) ); // Then: assertThat(e, rawMessage(is( "Exception while preparing statement: BAR does not exist.\n" + "Did you mean \"bar\"? Hint: wrap the source name in double quotes to make it case-sensitive."))); assertThat(e, statementText(is("select * from bar;"))); }
@Override public boolean isSupported() { return Build.VERSION.SDK_INT >= Build.VERSION_CODES.Q; }
@Test public void isSupported() { NubiaImpl nubia = new NubiaImpl(mApplication); Assert.assertFalse(nubia.isSupported()); }
public static Map<String, Class<?>> compile(Map<String, String> classNameSourceMap, ClassLoader classLoader) { return compile(classNameSourceMap, classLoader, null); }
@Test public void compileInnerClass() throws Exception { Map<String, String> source = singletonMap("org.kie.memorycompiler.ExampleClass", EXAMPLE_INNER_CLASS); Map<String, Class<?>> compiled = KieMemoryCompiler.compile(source, this.getClass().getClassLoader()); assertThat(compiled.size()).isEqualTo(2); assertThat(compiled.get("org.kie.memorycompiler.ExampleClass")).isNotNull(); assertThat(compiled.get("org.kie.memorycompiler.ExampleClass$InnerClass")).isNotNull(); }
static String getScheme( String[] schemes, String fileName ) { for (String scheme : schemes) { if ( fileName.startsWith( scheme + ":" ) ) { return scheme; } } return null; }
@Test public void testCheckForSchemeFail() { String[] schemes = {"file"}; String vfsFilename = "hdfs://hsbcmaster:8020/tmp/acltest/"; assertNull( KettleVFS.getScheme( schemes, vfsFilename ) ); }
@Override public V put(K key, V value, Duration ttl) { return get(putAsync(key, value, ttl)); }
@Test public void testWriterPutTTL() { Map<String, String> store = new HashMap<>(); RMapCacheNative<String, String> map = (RMapCacheNative<String, String>) getWriterTestMap("test", store); map.put("1", "11", Duration.ofSeconds(10)); map.put("2", "22", Duration.ofSeconds(10)); map.put("3", "33", Duration.ofSeconds(10)); Map<String, String> expected = new HashMap<>(); expected.put("1", "11"); expected.put("2", "22"); expected.put("3", "33"); assertThat(store).isEqualTo(expected); map.destroy(); }
@Override protected void parse(final ProtocolFactory protocols, final Local file) throws AccessDeniedException { try { final JsonReader reader = new JsonReader(new InputStreamReader(file.getInputStream(), StandardCharsets.UTF_8)); reader.beginArray(); String url; String user; boolean ssl; Protocol protocol; while(reader.hasNext()) { reader.beginObject(); boolean skip = false; url = null; ssl = false; protocol = null; user = null; while(reader.hasNext()) { final String name = reader.nextName(); switch(name) { case "url": url = this.readNext(name, reader); if(StringUtils.isBlank(url)) { skip = true; } break; case "ssl": ssl = reader.nextBoolean(); break; case "user": user = this.readNext(name, reader); break; case "type": final String type = this.readNext(name, reader); switch(type) { case "google_cloud_storage": protocol = protocols.forType(Protocol.Type.googlestorage); break; case "gdrive": protocol = protocols.forType(Protocol.Type.googledrive); break; default: protocol = protocols.forName(type); } break; default: log.warn(String.format("Ignore property %s", name)); reader.skipValue(); break; } } reader.endObject(); if(!skip && protocol != null && StringUtils.isNotBlank(user)) { if(ssl) { switch(protocol.getType()) { case ftp: protocol = protocols.forScheme(Scheme.ftps); break; case dav: protocol = protocols.forScheme(Scheme.davs); break; } } try { this.add(new HostParser(protocols, protocol).get(url)); } catch(HostParserException e) { log.warn(e); } } } reader.endArray(); } catch(IllegalStateException | IOException e) { throw new LocalAccessDeniedException(e.getMessage(), e); } }
@Test public void testParse() throws AccessDeniedException { NetDrive2BookmarkCollection c = new NetDrive2BookmarkCollection(); assertEquals(0, c.size()); c.parse(new ProtocolFactory(Collections.singleton(new TestProtocol(Scheme.sftp))), new Local("src/test/resources/drives.dat")); assertEquals(1, c.size()); }
@Override public void initialize(@Nullable Configuration configuration, Properties serDeProperties, Properties partitionProperties) throws SerDeException { super.initialize(configuration, serDeProperties, partitionProperties); // HiveIcebergSerDe.initialize is called multiple places in Hive code: // - When we are trying to create a table - HiveDDL data is stored at the serDeProperties, but no Iceberg table // is created yet. // - When we are compiling the Hive query on HiveServer2 side - We only have table information (location/name), // and we have to read the schema using the table data. This is called multiple times so there is room for // optimizing here. // - When we are executing the Hive query in the execution engine - We do not want to load the table data on every // executor, but serDeProperties are populated by HiveIcebergStorageHandler.configureInputJobProperties() and // the resulting properties are serialized and distributed to the executors if (serDeProperties.get(InputFormatConfig.TABLE_SCHEMA) != null) { this.tableSchema = SchemaParser.fromJson(serDeProperties.getProperty(InputFormatConfig.TABLE_SCHEMA)); if (serDeProperties.get(InputFormatConfig.PARTITION_SPEC) != null) { PartitionSpec spec = PartitionSpecParser.fromJson(tableSchema, serDeProperties.getProperty(InputFormatConfig.PARTITION_SPEC)); this.partitionColumns = spec.fields().stream().map(PartitionField::name).collect(Collectors.toList()); } else { this.partitionColumns = ImmutableList.of(); } } else { try { Table table = IcebergTableUtil.getTable(configuration, serDeProperties); // always prefer the original table schema if there is one this.tableSchema = table.schema(); this.partitionColumns = table.spec().fields().stream().map(PartitionField::name).collect(Collectors.toList()); LOG.info("Using schema from existing table {}", SchemaParser.toJson(tableSchema)); } catch (Exception e) { // During table creation we might not have the schema information from the Iceberg table, nor from the HMS // table. In this case we have to generate the schema using the serdeProperties which contains the info // provided in the CREATE TABLE query. if (serDeProperties.get("metadata_location") != null) { // If metadata location is provided, extract the schema details from it. try (FileIO fileIO = new HadoopFileIO(configuration)) { TableMetadata metadata = TableMetadataParser.read(fileIO, serDeProperties.getProperty("metadata_location")); this.tableSchema = metadata.schema(); this.partitionColumns = metadata.spec().fields().stream().map(PartitionField::name).collect(Collectors.toList()); // Validate no schema is provided via create command if (!getColumnNames().isEmpty() || !getPartitionColumnNames().isEmpty()) { throw new SerDeException("Column names can not be provided along with metadata location."); } } } else { boolean autoConversion = configuration.getBoolean(InputFormatConfig.SCHEMA_AUTO_CONVERSION, false); // If we can not load the table try the provided hive schema this.tableSchema = hiveSchemaOrThrow(e, autoConversion); // This is only for table creation, it is ok to have an empty partition column list this.partitionColumns = ImmutableList.of(); } if (e instanceof NoSuchTableException && HiveTableUtil.isCtas(serDeProperties) && !Catalogs.hiveCatalog(configuration, serDeProperties)) { throw new SerDeException(CTAS_EXCEPTION_MSG); } } } this.projectedSchema = projectedSchema(configuration, serDeProperties.getProperty(Catalogs.NAME), tableSchema, jobConf); // Currently ClusteredWriter is used which requires that records are ordered by partition keys. // Here we ensure that SortedDynPartitionOptimizer will kick in and do the sorting. // TODO: remove once we have both Fanout and ClusteredWriter available: HIVE-25948 HiveConf.setIntVar(configuration, HiveConf.ConfVars.HIVE_OPT_SORT_DYNAMIC_PARTITION_THRESHOLD, 1); HiveConf.setVar(configuration, HiveConf.ConfVars.DYNAMIC_PARTITIONING_MODE, "nonstrict"); try { this.inspector = IcebergObjectInspector.create(projectedSchema); } catch (Exception e) { throw new SerDeException(e); } }
@Test public void testInitialize() throws IOException, SerDeException { File location = tmp.newFolder(); Assert.assertTrue(location.delete()); Configuration conf = new Configuration(); Properties properties = new Properties(); properties.setProperty("location", location.toString()); properties.setProperty(InputFormatConfig.CATALOG_NAME, Catalogs.ICEBERG_HADOOP_TABLE_NAME); HadoopTables tables = new HadoopTables(conf); tables.create(schema, location.toString()); HiveIcebergSerDe serDe = new HiveIcebergSerDe(); serDe.initialize(conf, properties, new Properties()); Assert.assertEquals(IcebergObjectInspector.create(schema), serDe.getObjectInspector()); }
@Override public int choosePartition(Message msg, TopicMetadata metadata) { // if key is specified, we should use key as routing; // if key is not specified and no sequence id is provided, not an effectively-once publish, use the default // round-robin routing. if (msg.hasKey() || msg.getSequenceId() < 0) { // TODO: the message key routing is problematic at this moment. // https://github.com/apache/pulsar/pull/1029 is fixing that. return super.choosePartition(msg, metadata); } // if there is no key and sequence id is provided, it is an effectively-once publish, we need to ensure // for a given message it always go to one partition, so we use sequence id to do a deterministic routing. return (int) (msg.getSequenceId() % metadata.numPartitions()); }
@Test public void testChoosePartitionWithKeySequenceId() { String key1 = "key1"; String key2 = "key2"; Message<?> msg1 = mock(Message.class); when(msg1.hasKey()).thenReturn(true); when(msg1.getKey()).thenReturn(key1); // make sure sequence id is different from hashcode, so the test can be tested correctly. when(msg1.getSequenceId()).thenReturn((long) ((key1.hashCode() % 100) + 1)); Message<?> msg2 = mock(Message.class); when(msg2.hasKey()).thenReturn(true); when(msg2.getKey()).thenReturn(key2); when(msg1.getSequenceId()).thenReturn((long) ((key2.hashCode() % 100) + 1)); Clock clock = mock(Clock.class); FunctionResultRouter router = new FunctionResultRouter(0, clock); TopicMetadata metadata = mock(TopicMetadata.class); when(metadata.numPartitions()).thenReturn(100); assertEquals(hash.makeHash(key1) % 100, router.choosePartition(msg1, metadata)); assertEquals(hash.makeHash(key2) % 100, router.choosePartition(msg2, metadata)); }
public static void validate(BugPattern pattern) throws ValidationException { if (pattern == null) { throw new ValidationException("No @BugPattern provided"); } // name must not contain spaces if (CharMatcher.whitespace().matchesAnyOf(pattern.name())) { throw new ValidationException("Name must not contain whitespace: " + pattern.name()); } // linkType must be consistent with link element. switch (pattern.linkType()) { case CUSTOM: if (pattern.link().isEmpty()) { throw new ValidationException("Expected a custom link but none was provided"); } break; case AUTOGENERATED: case NONE: if (!pattern.link().isEmpty()) { throw new ValidationException("Expected no custom link but found: " + pattern.link()); } break; } }
@Test public void basicBugPattern() throws Exception { @BugPattern( name = "BasicBugPattern", summary = "Simplest possible BugPattern", explanation = "Simplest possible BugPattern ", severity = SeverityLevel.ERROR) final class BugPatternTestClass {} BugPattern annotation = BugPatternTestClass.class.getAnnotation(BugPattern.class); BugPatternValidator.validate(annotation); }
public long onStatusMessage( final StatusMessageFlyweight flyweight, final InetSocketAddress receiverAddress, final long senderLimit, final int initialTermId, final int positionBitsToShift, final long timeNs) { return processStatusMessage( flyweight, senderLimit, initialTermId, positionBitsToShift, timeNs, matchesTag(flyweight)); }
@Test void shouldClampToSenderLimitUntilMinimumGroupSizeIsMet() { final UdpChannel channelGroupSizeThree = UdpChannel.parse( "aeron:udp?endpoint=224.20.30.39:24326|interface=localhost|fc=tagged,g:123/3"); flowControl.initialize( newContext(), countersManager, channelGroupSizeThree, 0, 0, 0, 0, 0); final long groupTag = 123L; final long senderLimit = 5000L; final int termOffset = 10_000; assertEquals(senderLimit, onStatusMessage(flowControl, 0, termOffset, senderLimit, null)); assertEquals(senderLimit, onIdle(flowControl, senderLimit)); assertEquals(senderLimit, onStatusMessage(flowControl, 1, termOffset, senderLimit, groupTag)); assertEquals(senderLimit, onIdle(flowControl, senderLimit)); assertEquals(senderLimit, onStatusMessage(flowControl, 2, termOffset, senderLimit, groupTag)); assertEquals(senderLimit, onIdle(flowControl, senderLimit)); assertEquals(senderLimit, onStatusMessage(flowControl, 3, termOffset, senderLimit, null)); assertEquals(senderLimit, onIdle(flowControl, senderLimit)); assertEquals(termOffset + WINDOW_LENGTH, onStatusMessage(flowControl, 4, termOffset, senderLimit, groupTag)); assertEquals(termOffset + WINDOW_LENGTH, onIdle(flowControl, senderLimit)); }
public void addOperator(Transformation<?> transformation) { checkNotNull(transformation, "transformation must not be null."); this.transformations.add(transformation); }
@Test void testAddOperator() { ExecutionEnvironmentImpl env = new ExecutionEnvironmentImpl( new DefaultExecutorServiceLoader(), new Configuration(), null); TestingTransformation<Integer> t1 = new TestingTransformation<>("t1", Types.INT, 10); TestingTransformation<String> t2 = new TestingTransformation<>("t2", Types.STRING, 5); env.addOperator(t1); env.addOperator(t2); assertThat(env.getTransformations()).containsExactly(t1, t2); }
public static String getParent(String url) { String ensUrl = url != null ? url.trim() : ""; if (ensUrl.equals(".") || !ensUrl.contains(".")) { return null; } return ensUrl.substring(ensUrl.indexOf(".") + 1); }
@Test void getParentWhenSuccess() { assertEquals("offchainexample.eth", EnsUtils.getParent("1.offchainexample.eth")); assertEquals("eth", EnsUtils.getParent("offchainexample.eth")); }
public int size() { return pfbdata.length; }
@Test void testPfb() throws IOException { Type1Font font; try (InputStream is = new FileInputStream("target/fonts/OpenSans-Regular.pfb")) { font = Type1Font.createWithPFB(is); } Assertions.assertEquals("1.10", font.getVersion()); Assertions.assertEquals("OpenSans-Regular", font.getFontName()); Assertions.assertEquals("Open Sans Regular", font.getFullName()); Assertions.assertEquals("Open Sans", font.getFamilyName()); Assertions.assertEquals("Digitized data copyright (c) 2010-2011, Google Corporation.", font.getNotice()); Assertions.assertEquals(false, font.isFixedPitch()); Assertions.assertEquals(false, font.isForceBold()); Assertions.assertEquals(0, font.getItalicAngle()); Assertions.assertEquals("Book", font.getWeight()); Assertions.assertTrue(font.getEncoding() instanceof BuiltInEncoding); Assertions.assertEquals(4498, font.getASCIISegment().length); Assertions.assertEquals(95911, font.getBinarySegment().length); Assertions.assertEquals(938, font.getCharStringsDict().size()); for (String s : font.getCharStringsDict().keySet()) { Assertions.assertNotNull(font.getPath(s)); Assertions.assertTrue(font.hasGlyph(s)); } }
@Override public Serde<List<?>> getSerde( final PersistenceSchema schema, final Map<String, String> formatProperties, final KsqlConfig ksqlConfig, final Supplier<SchemaRegistryClient> srClientFactory, final boolean isKey) { FormatProperties.validateProperties(name(), formatProperties, getSupportedProperties()); SerdeUtils.throwOnUnsupportedFeatures(schema.features(), supportedFeatures()); if (!schema.columns().isEmpty()) { throw new KsqlException("The '" + NAME + "' format can only be used when no columns are defined. Got: " + schema.columns()); } return new KsqlVoidSerde<>(); }
@Test(expected = KsqlException.class) public void shouldThrowOnUnsupportedProps() { // Given: formatProps = ImmutableMap.of("some", "prop"); // When: format.getSerde(schema, formatProps, ksqlConfig, srClientFactory, false); }
@Override public void write(DataOutput out) throws IOException { String json = GsonUtils.GSON.toJson(this, AlterJobV2.class); Text.writeString(out, json); }
@Test public void testSerializeOfSchemaChangeJob() throws IOException { // prepare file File file = new File(TEST_FILE_NAME); file.createNewFile(); file.deleteOnExit(); DataOutputStream out = new DataOutputStream(new FileOutputStream(file)); SchemaChangeJobV2 schemaChangeJobV2 = new SchemaChangeJobV2(1, 1, 1, "test", 600000); Deencapsulation.setField(schemaChangeJobV2, "jobState", AlterJobV2.JobState.FINISHED); Map<Long, SchemaVersionAndHash> indexSchemaVersionAndHashMap = Maps.newHashMap(); indexSchemaVersionAndHashMap.put(Long.valueOf(1000), new SchemaVersionAndHash(10, 20)); Deencapsulation.setField(schemaChangeJobV2, "indexSchemaVersionAndHashMap", indexSchemaVersionAndHashMap); // write schema change job schemaChangeJobV2.write(out); out.flush(); out.close(); // read objects from file DataInputStream in = new DataInputStream(new FileInputStream(file)); SchemaChangeJobV2 result = (SchemaChangeJobV2) AlterJobV2.read(in); Assert.assertEquals(1, result.getJobId()); Assert.assertEquals(AlterJobV2.JobState.FINISHED, result.getJobState()); Assert.assertNotNull(Deencapsulation.getField(result, "physicalPartitionIndexMap")); Assert.assertNotNull(Deencapsulation.getField(result, "physicalPartitionIndexTabletMap")); Map<Long, SchemaVersionAndHash> map = Deencapsulation.getField(result, "indexSchemaVersionAndHashMap"); Assert.assertEquals(10, map.get(1000L).schemaVersion); Assert.assertEquals(20, map.get(1000L).schemaHash); }
abstract public <T extends ComponentRoot> T get(Class<T> providerId);
@Test public void testAppRoot_withComponentRoot() { LocalComponentIdFoo retrieved = new ReflectiveAppRoot() .get(ComponentFoo.class) .get("fileName", "name", "secondName"); assertThat(retrieved).isNotNull(); }
public static AssertionResult getResult(SMIMEAssertionTestElement testElement, SampleResult response, String name) { checkForBouncycastle(); AssertionResult res = new AssertionResult(name); try { MimeMessage msg; final int msgPos = testElement.getSpecificMessagePositionAsInt(); if (msgPos < 0){ // means counting from end SampleResult[] subResults = response.getSubResults(); final int pos = subResults.length + msgPos; log.debug("Getting message number: {} of {}", pos, subResults.length); msg = getMessageFromResponse(response,pos); } else { log.debug("Getting message number: {}", msgPos); msg = getMessageFromResponse(response, msgPos); } SMIMESignedParser signedParser = null; if(log.isDebugEnabled()) { log.debug("Content-type: {}", msg.getContentType()); } if (msg.isMimeType("multipart/signed")) { // $NON-NLS-1$ MimeMultipart multipart = (MimeMultipart) msg.getContent(); signedParser = new SMIMESignedParser(new BcDigestCalculatorProvider(), multipart); } else if (msg.isMimeType("application/pkcs7-mime") // $NON-NLS-1$ || msg.isMimeType("application/x-pkcs7-mime")) { // $NON-NLS-1$ signedParser = new SMIMESignedParser(new BcDigestCalculatorProvider(), msg); } if (null != signedParser) { log.debug("Found signature"); if (testElement.isNotSigned()) { res.setFailure(true); res.setFailureMessage("Mime message is signed"); } else if (testElement.isVerifySignature() || !testElement.isSignerNoCheck()) { res = verifySignature(testElement, signedParser, name); } } else { log.debug("Did not find signature"); if (!testElement.isNotSigned()) { res.setFailure(true); res.setFailureMessage("Mime message is not signed"); } } } catch (MessagingException e) { String msg = "Cannot parse mime msg: " + e.getMessage(); log.warn(msg, e); res.setFailure(true); res.setFailureMessage(msg); } catch (CMSException e) { res.setFailure(true); res.setFailureMessage("Error reading the signature: " + e.getMessage()); } catch (SMIMEException e) { res.setFailure(true); res.setFailureMessage("Cannot extract signed body part from signature: " + e.getMessage()); } catch (IOException e) { // should never happen log.error("Cannot read mime message content: {}", e.getMessage(), e); res.setError(true); res.setFailureMessage(e.getMessage()); } return res; }
@Test public void testSignerCert() throws Exception { SMIMEAssertionTestElement testElement = new SMIMEAssertionTestElement(); testElement.setSignerCheckConstraints(true); testElement.setSignerCheckByFile(true); testElement.setSignerCertFile(new File(getClass().getResource("email.pem").toURI()).getAbsolutePath()); AssertionResult result = SMIMEAssertion.getResult(testElement, parent, "Test"); assertFalse(result.isError(), "Result should not be an error"); assertFalse(result.isFailure(), "Result should not fail: " + result.getFailureMessage()); }
public Optional<GroupConfig> groupConfig(String groupId) { return Optional.ofNullable(configMap.get(groupId)); }
@Test public void testGetNonExistentGroupConfig() { Optional<GroupConfig> groupConfig = configManager.groupConfig("foo"); assertFalse(groupConfig.isPresent()); }
private ApolloAuditSpan getActiveSpanFromContext() { return scopeManager().activeSpan(); }
@Test public void testGetActiveSpanFromContext() { ApolloAuditSpan activeSpan = Mockito.mock(ApolloAuditSpan.class); { Mockito.when(manager.activeSpan()).thenReturn(activeSpan); } ApolloAuditSpan get = tracer.getActiveSpan(); assertEquals(activeSpan, get); }
@Override public List<PortStatistics> getPortDeltaStatistics(DeviceId deviceId) { checkNotNull(deviceId, DEVICE_NULL); // TODO not supported at the moment. return ImmutableList.of(); }
@Test(expected = NullPointerException.class) public void testGetPortsDeltaStatisticsByNullId() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); DeviceService deviceService = manager.get(virtualNetwork.id(), DeviceService.class); // test the getPortDeltaStatistics() method using a null device identifier deviceService.getPortDeltaStatistics(null); }
public Map<String, String> build() { Map<String, String> builder = new HashMap<>(); configureFileSystem(builder); configureNetwork(builder); configureCluster(builder); configureSecurity(builder); configureOthers(builder); LOGGER.info("Elasticsearch listening on [HTTP: {}:{}, TCP: {}:{}]", builder.get(ES_HTTP_HOST_KEY), builder.get(ES_HTTP_PORT_KEY), builder.get(ES_TRANSPORT_HOST_KEY), builder.get(ES_TRANSPORT_PORT_KEY)); return builder; }
@Test @UseDataProvider("clusterEnabledOrNot") public void disable_seccomp_filter_if_configured_in_search_additional_props(boolean clusterEnabled) throws Exception { Props props = minProps(clusterEnabled); props.set("sonar.search.javaAdditionalOpts", "-Xmx1G -Dbootstrap.system_call_filter=false -Dfoo=bar"); Map<String, String> settings = new EsSettings(props, new EsInstallation(props), system).build(); assertThat(settings).containsEntry("bootstrap.system_call_filter", "false"); }
@Operation(summary = "viewVariables", description = "QUERY_PROCESS_INSTANCE_GLOBAL_VARIABLES_AND_LOCAL_VARIABLES_NOTES") @Parameters({ @Parameter(name = "id", description = "PROCESS_INSTANCE_ID", required = true, schema = @Schema(implementation = int.class, example = "100")) }) @GetMapping(value = "/{id}/view-variables") @ResponseStatus(HttpStatus.OK) @ApiException(Status.QUERY_PROCESS_INSTANCE_ALL_VARIABLES_ERROR) public Result viewVariables(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode, @PathVariable("id") Integer id) { Map<String, Object> result = processInstanceService.viewVariables(projectCode, id); return returnDataList(result); }
@Test public void testViewVariables() throws Exception { Map<String, Object> mockResult = new HashMap<>(); mockResult.put(Constants.STATUS, Status.SUCCESS); Mockito.when(processInstanceService.viewVariables(1113L, 123)).thenReturn(mockResult); MvcResult mvcResult = mockMvc .perform(get("/projects/{projectCode}/process-instances/{id}/view-variables", "1113", "123") .header(SESSION_ID, sessionId)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertNotNull(result); Assertions.assertEquals(Status.SUCCESS.getCode(), result.getCode().intValue()); }
public static BytesInput fromUnsignedVarInt(int intValue) { return new UnsignedVarIntBytesInput(intValue); }
@Test public void testFromUnsignedVarInt() throws IOException { int value = RANDOM.nextInt(Short.MAX_VALUE); ByteArrayOutputStream baos = new ByteArrayOutputStream(2); BytesUtils.writeUnsignedVarInt(value, baos); byte[] data = baos.toByteArray(); Supplier<BytesInput> factory = () -> BytesInput.fromUnsignedVarInt(value); validate(data, factory); }
public static String get(@NonNull SymbolRequest request) { String name = request.getName(); String title = request.getTitle(); String tooltip = request.getTooltip(); String htmlTooltip = request.getHtmlTooltip(); String classes = request.getClasses(); String pluginName = request.getPluginName(); String id = request.getId(); String identifier = (pluginName == null || pluginName.isBlank()) ? "core" : pluginName; String symbol = SYMBOLS .computeIfAbsent(identifier, key -> new ConcurrentHashMap<>()) .computeIfAbsent(name, key -> loadSymbol(identifier, key)); if ((tooltip != null && !tooltip.isBlank()) && (htmlTooltip == null || htmlTooltip.isBlank())) { symbol = symbol.replaceAll("<svg", Matcher.quoteReplacement("<svg tooltip=\"" + Functions.htmlAttributeEscape(tooltip) + "\"")); } if (htmlTooltip != null && !htmlTooltip.isBlank()) { symbol = symbol.replaceAll("<svg", Matcher.quoteReplacement("<svg data-html-tooltip=\"" + Functions.htmlAttributeEscape(htmlTooltip) + "\"")); } if (id != null && !id.isBlank()) { symbol = symbol.replaceAll("<svg", Matcher.quoteReplacement("<svg id=\"" + Functions.htmlAttributeEscape(id) + "\"")); } if (classes != null && !classes.isBlank()) { symbol = symbol.replaceAll("<svg", "<svg class=\"" + Functions.htmlAttributeEscape(classes) + "\""); } if (title != null && !title.isBlank()) { symbol = "<span class=\"jenkins-visually-hidden\">" + Util.xmlEscape(title) + "</span>" + symbol; } return symbol; }
@Test @DisplayName("When omitting tooltip from attributes, the symbol should not have a tooltip") void getSymbol_notSettingTooltipDoesntAddTooltipAttribute() { String symbol = Symbol.get(new SymbolRequest.Builder() .withName("science") .withTitle("Title") .withClasses("class1 class2") .withId("id") .build() ); assertThat(symbol, containsString(SCIENCE_PATH)); assertThat(symbol, not(containsString("tooltip"))); }
public static <InputT> KeyByBuilder<InputT> of(PCollection<InputT> input) { return named(null).of(input); }
@Test @SuppressWarnings("unchecked") public void testTypeHints_typePropagation() { final PCollection<String> dataset = TestUtils.createMockDataset(TypeDescriptors.strings()); final TypeDescriptor<String> keyType = TypeDescriptors.strings(); final TypeDescriptor<Long> valueType = TypeDescriptors.longs(); final TypeDescriptor<Long> outputType = TypeDescriptors.longs(); final PCollection<KV<String, Long>> reduced = ReduceByKey.of(dataset) .keyBy(s -> s, keyType) .valueBy(s -> 1L, valueType) .combineBy(Sums.ofLongs()) .output(); final ReduceByKey reduce = (ReduceByKey) TestUtils.getProducer(reduced); TypePropagationAssert.assertOperatorTypeAwareness(reduce, keyType, valueType, outputType); }
public static CombineIndicator minus(Indicator<Num> indicatorLeft, Indicator<Num> indicatorRight) { return new CombineIndicator(indicatorLeft, indicatorRight, Num::minus); }
@Test public void testDifferenceIndicator() { Function<Number, Num> numFunction = DecimalNum::valueOf; BarSeries series = new BaseBarSeries(); FixedIndicator<Num> mockIndicator = new FixedIndicator<Num>(series, numFunction.apply(-2.0), numFunction.apply(0.00), numFunction.apply(1.00), numFunction.apply(2.53), numFunction.apply(5.87), numFunction.apply(6.00), numFunction.apply(10.0)); ConstantIndicator<Num> constantIndicator = new ConstantIndicator<Num>(series, numFunction.apply(6)); CombineIndicator differenceIndicator = CombineIndicator.minus(constantIndicator, mockIndicator); assertNumEquals("8", differenceIndicator.getValue(0)); assertNumEquals("6", differenceIndicator.getValue(1)); assertNumEquals("5", differenceIndicator.getValue(2)); assertNumEquals("3.47", differenceIndicator.getValue(3)); assertNumEquals("0.13", differenceIndicator.getValue(4)); assertNumEquals("0", differenceIndicator.getValue(5)); assertNumEquals("-4", differenceIndicator.getValue(6)); }
@Override public int read(char[] cbuf, int off, int len) throws IOException { int read = in.read(cbuf, off, len); if (read > 0) { nonXmlCharFilterer.filter(cbuf, off, read); } return read; }
@Test public void testReadEOS() throws IOException { char[] buffer = new char[10]; when(readerMock.read(any(char[].class), anyInt(), anyInt())).thenReturn(-1); int result = nonXmlFilterReader.read(buffer, 3, 5); assertEquals(-1, result, "Unexpected number of chars read"); assertArrayEquals(new char[] { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, buffer, "Buffer should not have been affected"); }
@Override public Set<Algorithm> getKeys(final Path file, final LoginCallback prompt) throws BackgroundException { return new HashSet<>(Arrays.asList(Algorithm.NONE, SSE_AES256)); }
@Test public void testGetAlgorithms() throws Exception { assertEquals(2, new S3EncryptionFeature(session, new S3AccessControlListFeature(session)).getKeys( new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.volume, Path.Type.directory)), new DisabledLoginCallback()).size()); }
@Override public AppAttemptsInfo getAppAttempts(HttpServletRequest hsr, String appId) { try { long startTime = Time.now(); DefaultRequestInterceptorREST interceptor = getOrCreateInterceptorByAppId(appId); AppAttemptsInfo appAttemptsInfo = interceptor.getAppAttempts(hsr, appId); if (appAttemptsInfo != null) { long stopTime = Time.now(); routerMetrics.succeededAppAttemptsRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(getUser().getShortUserName(), GET_APPLICATION_ATTEMPTS, TARGET_WEB_SERVICE); return appAttemptsInfo; } } catch (IllegalArgumentException e) { routerMetrics.incrAppAttemptsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APPLICATION_ATTEMPTS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException(e, "Unable to get the AppAttempt appId: %s.", appId); } catch (YarnException e) { routerMetrics.incrAppAttemptsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APPLICATION_ATTEMPTS, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); RouterServerUtil.logAndThrowRunTimeException("getAppAttempts error.", e); } routerMetrics.incrAppAttemptsFailedRetrieved(); RouterAuditLogger.logFailure(getUser().getShortUserName(), GET_APPLICATION_ATTEMPTS, UNKNOWN, TARGET_WEB_SERVICE, "getAppAttempts Failed."); throw new RuntimeException("getAppAttempts Failed."); }
@Test public void testGetAppAttempts() throws IOException, InterruptedException { // Submit application to multiSubCluster ApplicationId appId = ApplicationId.newInstance(Time.now(), 1); ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo(); context.setApplicationId(appId.toString()); Assert.assertNotNull(interceptor.submitApplication(context, null)); AppAttemptsInfo appAttemptsInfo = interceptor.getAppAttempts(null, appId.toString()); Assert.assertNotNull(appAttemptsInfo); ArrayList<AppAttemptInfo> attemptLists = appAttemptsInfo.getAttempts(); Assert.assertNotNull(appAttemptsInfo); Assert.assertEquals(2, attemptLists.size()); AppAttemptInfo attemptInfo1 = attemptLists.get(0); Assert.assertNotNull(attemptInfo1); Assert.assertEquals(0, attemptInfo1.getAttemptId()); Assert.assertEquals("AppAttemptId_0", attemptInfo1.getAppAttemptId()); Assert.assertEquals("LogLink_0", attemptInfo1.getLogsLink()); Assert.assertEquals(1659621705L, attemptInfo1.getFinishedTime()); AppAttemptInfo attemptInfo2 = attemptLists.get(1); Assert.assertNotNull(attemptInfo2); Assert.assertEquals(0, attemptInfo2.getAttemptId()); Assert.assertEquals("AppAttemptId_1", attemptInfo2.getAppAttemptId()); Assert.assertEquals("LogLink_1", attemptInfo2.getLogsLink()); Assert.assertEquals(1659621705L, attemptInfo2.getFinishedTime()); }
@Override public void setConf(Configuration conf) { super.setConf(conf); getRawMapping().setConf(conf); }
@Test public void testNoFile() { TableMapping mapping = new TableMapping(); Configuration conf = new Configuration(); mapping.setConf(conf); List<String> names = new ArrayList<String>(); names.add(hostName1); names.add(hostName2); List<String> result = mapping.resolve(names); assertEquals(names.size(), result.size()); assertEquals(NetworkTopology.DEFAULT_RACK, result.get(0)); assertEquals(NetworkTopology.DEFAULT_RACK, result.get(1)); }
@Override protected Mono<Void> handleRuleIfNull(final String pluginName, final ServerWebExchange exchange, final ShenyuPluginChain chain) { return WebFluxResultUtils.noRuleResult(pluginName, exchange); }
@Test public void testHandleRuleIfNull() { Assertions.assertEquals(springCloudPlugin.handleRuleIfNull("SpringCloud", exchange, chain).getClass(), WebFluxResultUtils.noRuleResult("SpringCloud", exchange).getClass()); }
private KsqlScalarFunction createFunction( final Class theClass, final UdfDescription udfDescriptionAnnotation, final Udf udfAnnotation, final Method method, final String path, final String sensorName, final Class<? extends Kudf> udfClass ) { // sanity check FunctionLoaderUtils .instantiateFunctionInstance(method.getDeclaringClass(), udfDescriptionAnnotation.name()); final FunctionInvoker invoker = FunctionLoaderUtils.createFunctionInvoker(method); final String functionName = udfDescriptionAnnotation.name(); LOGGER.info("Adding function " + functionName + " for method " + method); final List<ParameterInfo> parameters = FunctionLoaderUtils .createParameters(method, functionName, typeParser); final ParamType javaReturnSchema = FunctionLoaderUtils .getReturnType(method, udfAnnotation.schema(), typeParser); final SchemaProvider schemaProviderFunction = FunctionLoaderUtils .handleUdfReturnSchema( theClass, javaReturnSchema, udfAnnotation.schema(), typeParser, udfAnnotation.schemaProvider(), udfDescriptionAnnotation.name(), method.isVarArgs() ); return KsqlScalarFunction.create( schemaProviderFunction, javaReturnSchema, parameters, FunctionName.of(functionName.toUpperCase()), udfClass, getUdfFactory(method, udfDescriptionAnnotation, functionName, invoker, sensorName), udfAnnotation.description(), path, method.isVarArgs() ); }
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") @SuppressWarnings("unchecked") @Test public void shouldCollectMetricsForUdafsWhenEnabled() throws Exception { final Metrics metrics = new Metrics(); final UdafFactoryInvoker creator = createUdafLoader(Optional.of(metrics)).createUdafFactoryInvoker( TestUdaf.class.getMethod("createSumLong"), FunctionName.of("test-udf"), "desc", new String[]{""}, "", ""); final KsqlAggregateFunction<Long, Long, Long> executable = creator.createFunction(AggregateFunctionInitArguments.EMPTY_ARGS, Collections.EMPTY_LIST); executable.aggregate(1L, 1L); executable.aggregate(1L, 1L); final KafkaMetric metric = metrics.metric( metrics.metricName("aggregate-test-udf-createSumLong-count", "ksql-udaf-test-udf-createSumLong")); assertThat(metric.metricValue(), equalTo(2.0)); }
public Row toSnakeCase() { return getSchema().getFields().stream() .map( field -> { if (field.getType().getRowSchema() != null) { Row innerRow = getValue(field.getName()); if (innerRow != null) { return innerRow.toSnakeCase(); } } return (Object) getValue(field.getName()); }) .collect(toRow(getSchema().toSnakeCase())); }
@Test public void testToSnakeCase() { Schema innerSchema = Schema.builder() .addStringField("myFirstNestedStringField") .addStringField("mySecondNestedStringField") .build(); Schema schema = Schema.builder() .addStringField("myFirstStringField") .addStringField("mySecondStringField") .addRowField("myRowField", innerSchema) .build(); Row innerRow = Row.withSchema(innerSchema).addValues("nested1", "nested2").build(); Row row = Row.withSchema(schema).addValues("str1", "str2", innerRow).build(); Row expectedSnakeCaseInnerRow = Row.withSchema(innerSchema.toSnakeCase()).addValues("nested1", "nested2").build(); Row expectedSnakeCaseRow = Row.withSchema(schema.toSnakeCase()) .addValues("str1", "str2", expectedSnakeCaseInnerRow) .build(); assertEquals(expectedSnakeCaseInnerRow, row.toSnakeCase().getRow("my_row_field")); assertEquals(expectedSnakeCaseRow, row.toSnakeCase()); }
public T initialBufferSize(int value) { if (value <= 0) { throw new IllegalArgumentException("initialBufferSize must be strictly positive"); } this.initialBufferSize = value; return get(); }
@Test void initialBufferSizeBadValues() { assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> conf.initialBufferSize(0)) .as("rejects 0") .withMessage("initialBufferSize must be strictly positive"); assertThatExceptionOfType(IllegalArgumentException.class) .isThrownBy(() -> conf.initialBufferSize(-1)) .as("rejects negative") .withMessage("initialBufferSize must be strictly positive"); }
@Override public String toString(@Nullable String root, Iterable<String> names) { StringBuilder builder = new StringBuilder(); if (root != null) { builder.append(root); } joiner().appendTo(builder, names); return builder.toString(); }
@Test public void testUnix_toUri() { URI fileUri = PathType.unix().toUri(fileSystemUri, "/", ImmutableList.of("foo", "bar"), false); assertThat(fileUri.toString()).isEqualTo("jimfs://foo/foo/bar"); assertThat(fileUri.getPath()).isEqualTo("/foo/bar"); URI directoryUri = PathType.unix().toUri(fileSystemUri, "/", ImmutableList.of("foo", "bar"), true); assertThat(directoryUri.toString()).isEqualTo("jimfs://foo/foo/bar/"); assertThat(directoryUri.getPath()).isEqualTo("/foo/bar/"); URI rootUri = PathType.unix().toUri(fileSystemUri, "/", ImmutableList.<String>of(), true); assertThat(rootUri.toString()).isEqualTo("jimfs://foo/"); assertThat(rootUri.getPath()).isEqualTo("/"); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(new DefaultPathContainerService().isContainer(file)) { return PathAttributes.EMPTY; } final Path query; if(file.isPlaceholder()) { query = new Path(file.getParent(), FilenameUtils.removeExtension(file.getName()), file.getType(), file.attributes()); } else { query = file; } final AttributedList<Path> list; if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(file.getParent())) { list = new DriveTeamDrivesListService(session, fileid).list(file.getParent(), listener); } else { list = new FileidDriveListService(session, fileid, query).list(file.getParent(), listener); } final Path found = list.find(new ListFilteringFeature.ListFilteringPredicate(session.getCaseSensitivity(), file)); if(null == found) { throw new NotfoundException(file.getAbsolute()); } return found.attributes(); }
@Test(expected = NotfoundException.class) public void testNotFound() throws Exception { final DriveAttributesFinderFeature f = new DriveAttributesFinderFeature(session, new DriveFileIdProvider(session)); f.find(new Path(DriveHomeFinderService.MYDRIVE_FOLDER, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file))); }
@Override public void store(Measure newMeasure) { saveMeasure(newMeasure.inputComponent(), (DefaultMeasure<?>) newMeasure); }
@Test public void should_skip_highlighting_on_pr_when_file_status_is_SAME() { DefaultInputFile file = new TestInputFileBuilder("foo", "src/Foo.php") .setContents("// comment") .setStatus(InputFile.Status.SAME).build(); when(branchConfiguration.isPullRequest()).thenReturn(true); DefaultHighlighting highlighting = new DefaultHighlighting(underTest).onFile(file).highlight(1, 0, 1, 1, TypeOfText.KEYWORD); underTest.store(highlighting); assertThat(reportWriter.hasComponentData(FileStructure.Domain.SYNTAX_HIGHLIGHTINGS, file.scannerId())).isFalse(); }
public static AvroGenericCoder of(Schema schema) { return AvroGenericCoder.of(schema); }
@Test public void testDeterminismCollectionValue() { assertNonDeterministic( AvroCoder.of(OrderedSetOfNonDetValues.class), reasonField(UnorderedMapClass.class, "mapField", "may not be deterministically ordered")); assertNonDeterministic( AvroCoder.of(ListOfNonDetValues.class), reasonField(UnorderedMapClass.class, "mapField", "may not be deterministically ordered")); }
@Override public <T> Future<T> submit(Callable<T> task) { return delegate.submit(task); }
@Test public void submit2() { underTest.submit(runnable); verify(executorService).submit(runnable); }
public static String parent(final String absolute, final char delimiter) { if(String.valueOf(delimiter).equals(absolute)) { return null; } int index = absolute.length() - 1; if(absolute.charAt(index) == delimiter) { if(index > 0) { index--; } } int cut = absolute.lastIndexOf(delimiter, index); if(cut > 0) { return absolute.substring(0, cut); } //if (index == 0) parent is root return String.valueOf(delimiter); }
@Test public void testParent() { assertEquals("/", PathNormalizer.parent("/p", '/')); assertEquals("/p", PathNormalizer.parent("/p/n", '/')); assertNull(PathNormalizer.parent("/", '/')); }
public SimpleAuthenticationConfig addUser(@Nonnull String username, @Nonnull String password, String... roles) { addUser(username, new UserDto(password, roles)); return self(); }
@Test public void testAddUserWithEmptyPassword() { SimpleAuthenticationConfig c = new SimpleAuthenticationConfig(); assertThrows(IllegalArgumentException.class, () -> c.addUser("user", "")); assertThrows(IllegalArgumentException.class, () -> c.addUser("user", (String) null)); }
public static <T> RuntimeTypeAdapterFactory<T> of( Class<T> baseType, String typeFieldName, boolean maintainType) { return new RuntimeTypeAdapterFactory<>(baseType, typeFieldName, maintainType); }
@Test public void testNullTypeFieldName() { assertThrows( NullPointerException.class, () -> RuntimeTypeAdapterFactory.of(BillingInstrument.class, null)); }
public String getUniqueKey(KeyTypeEnum keyType) { return super.getUniqueKey(keyType, application); }
@Test void testGetUniqueKey() { String interfaceName = "org.apache.dubbo.metadata.integration.InterfaceNameTestService"; String version = "1.0.0.zk.md"; String group = null; String application = "vic.zk.md"; MetadataIdentifier providerMetadataIdentifier = new MetadataIdentifier(interfaceName, version, group, PROVIDER_SIDE, application); Assertions.assertEquals( providerMetadataIdentifier.getUniqueKey(KeyTypeEnum.PATH), "metadata" + PATH_SEPARATOR + interfaceName + PATH_SEPARATOR + (version == null ? "" : (version + PATH_SEPARATOR)) + (group == null ? "" : (group + PATH_SEPARATOR)) + PROVIDER_SIDE + PATH_SEPARATOR + application); Assertions.assertEquals( providerMetadataIdentifier.getUniqueKey(KeyTypeEnum.UNIQUE_KEY), interfaceName + MetadataConstants.KEY_SEPARATOR + (version == null ? "" : version) + MetadataConstants.KEY_SEPARATOR + (group == null ? "" : group) + MetadataConstants.KEY_SEPARATOR + PROVIDER_SIDE + MetadataConstants.KEY_SEPARATOR + application); }
public static Object get(Object object, int index) { if (index < 0) { throw new IndexOutOfBoundsException("Index cannot be negative: " + index); } if (object instanceof Map) { Map map = (Map) object; Iterator iterator = map.entrySet().iterator(); return get(iterator, index); } else if (object instanceof List) { return ((List) object).get(index); } else if (object instanceof Object[]) { return ((Object[]) object)[index]; } else if (object instanceof Iterator) { Iterator it = (Iterator) object; while (it.hasNext()) { index--; if (index == -1) { return it.next(); } else { it.next(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object instanceof Collection) { Iterator iterator = ((Collection) object).iterator(); return get(iterator, index); } else if (object instanceof Enumeration) { Enumeration it = (Enumeration) object; while (it.hasMoreElements()) { index--; if (index == -1) { return it.nextElement(); } else { it.nextElement(); } } throw new IndexOutOfBoundsException("Entry does not exist: " + index); } else if (object == null) { throw new IllegalArgumentException("Unsupported object type: null"); } else { try { return Array.get(object, index); } catch (IllegalArgumentException ex) { throw new IllegalArgumentException("Unsupported object type: " + object.getClass().getName()); } } }
@Test void testGetIterator2() { assertThrows(IndexOutOfBoundsException.class, () -> { CollectionUtils.get(Collections.emptyIterator(), -1); }); }
@Override public List<PinotTaskConfig> generateTasks(List<TableConfig> tableConfigs) { String taskType = MergeRollupTask.TASK_TYPE; List<PinotTaskConfig> pinotTaskConfigs = new ArrayList<>(); for (TableConfig tableConfig : tableConfigs) { if (!validate(tableConfig, taskType)) { continue; } String tableNameWithType = tableConfig.getTableName(); LOGGER.info("Start generating task configs for table: {} for task: {}", tableNameWithType, taskType); // Get all segment metadata List<SegmentZKMetadata> allSegments = getSegmentsZKMetadataForTable(tableNameWithType); // Filter segments based on status List<SegmentZKMetadata> preSelectedSegmentsBasedOnStatus = filterSegmentsBasedOnStatus(tableConfig.getTableType(), allSegments); // Select current segment snapshot based on lineage, filter out empty segments SegmentLineage segmentLineage = _clusterInfoAccessor.getSegmentLineage(tableNameWithType); Set<String> preSelectedSegmentsBasedOnLineage = new HashSet<>(); for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) { preSelectedSegmentsBasedOnLineage.add(segment.getSegmentName()); } SegmentLineageUtils.filterSegmentsBasedOnLineageInPlace(preSelectedSegmentsBasedOnLineage, segmentLineage); List<SegmentZKMetadata> preSelectedSegments = new ArrayList<>(); for (SegmentZKMetadata segment : preSelectedSegmentsBasedOnStatus) { if (preSelectedSegmentsBasedOnLineage.contains(segment.getSegmentName()) && segment.getTotalDocs() > 0 && MergeTaskUtils.allowMerge(segment)) { preSelectedSegments.add(segment); } } if (preSelectedSegments.isEmpty()) { // Reset the watermark time if no segment found. This covers the case where the table is newly created or // all segments for the existing table got deleted. resetDelayMetrics(tableNameWithType); LOGGER.info("Skip generating task: {} for table: {}, no segment is found.", taskType, tableNameWithType); continue; } // Sort segments based on startTimeMs, endTimeMs and segmentName in ascending order preSelectedSegments.sort((a, b) -> { long aStartTime = a.getStartTimeMs(); long bStartTime = b.getStartTimeMs(); if (aStartTime != bStartTime) { return Long.compare(aStartTime, bStartTime); } long aEndTime = a.getEndTimeMs(); long bEndTime = b.getEndTimeMs(); return aEndTime != bEndTime ? Long.compare(aEndTime, bEndTime) : a.getSegmentName().compareTo(b.getSegmentName()); }); // Sort merge levels based on bucket time period Map<String, String> taskConfigs = tableConfig.getTaskConfig().getConfigsForTaskType(taskType); Map<String, Map<String, String>> mergeLevelToConfigs = MergeRollupTaskUtils.getLevelToConfigMap(taskConfigs); List<Map.Entry<String, Map<String, String>>> sortedMergeLevelConfigs = new ArrayList<>(mergeLevelToConfigs.entrySet()); sortedMergeLevelConfigs.sort(Comparator.comparingLong( e -> TimeUtils.convertPeriodToMillis(e.getValue().get(MinionConstants.MergeTask.BUCKET_TIME_PERIOD_KEY)))); // Get incomplete merge levels Set<String> inCompleteMergeLevels = new HashSet<>(); for (Map.Entry<String, TaskState> entry : TaskGeneratorUtils.getIncompleteTasks(taskType, tableNameWithType, _clusterInfoAccessor).entrySet()) { for (PinotTaskConfig taskConfig : _clusterInfoAccessor.getTaskConfigs(entry.getKey())) { inCompleteMergeLevels.add(taskConfig.getConfigs().get(MergeRollupTask.MERGE_LEVEL_KEY)); } } // Get scheduling mode which is "processFromWatermark" by default. If "processAll" mode is enabled, there will be // no watermark, and each round we pick the buckets in chronological order which have unmerged segments. boolean processAll = MergeTask.PROCESS_ALL_MODE.equalsIgnoreCase(taskConfigs.get(MergeTask.MODE)); ZNRecord mergeRollupTaskZNRecord = _clusterInfoAccessor .getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, tableNameWithType); int expectedVersion = mergeRollupTaskZNRecord != null ? mergeRollupTaskZNRecord.getVersion() : -1; MergeRollupTaskMetadata mergeRollupTaskMetadata = mergeRollupTaskZNRecord != null ? MergeRollupTaskMetadata.fromZNRecord(mergeRollupTaskZNRecord) : new MergeRollupTaskMetadata(tableNameWithType, new TreeMap<>()); List<PinotTaskConfig> pinotTaskConfigsForTable = new ArrayList<>(); // Schedule tasks from lowest to highest merge level (e.g. Hourly -> Daily -> Monthly -> Yearly) String mergeLevel = null; for (Map.Entry<String, Map<String, String>> mergeLevelConfig : sortedMergeLevelConfigs) { String lowerMergeLevel = mergeLevel; mergeLevel = mergeLevelConfig.getKey(); Map<String, String> mergeConfigs = mergeLevelConfig.getValue(); // Skip scheduling if there's incomplete task for current mergeLevel if (inCompleteMergeLevels.contains(mergeLevel)) { LOGGER.info("Found incomplete task of merge level: {} for the same table: {}, Skipping task generation: {}", mergeLevel, tableNameWithType, taskType); continue; } // Get the bucket size, buffer size and maximum number of parallel buckets (by default 1) String bucketPeriod = mergeConfigs.get(MergeTask.BUCKET_TIME_PERIOD_KEY); long bucketMs = TimeUtils.convertPeriodToMillis(bucketPeriod); if (bucketMs <= 0) { LOGGER.error("Bucket time period: {} (table : {}, mergeLevel : {}) must be larger than 0", bucketPeriod, tableNameWithType, mergeLevel); continue; } String bufferPeriod = mergeConfigs.get(MergeTask.BUFFER_TIME_PERIOD_KEY); long bufferMs = TimeUtils.convertPeriodToMillis(bufferPeriod); if (bufferMs < 0) { LOGGER.error("Buffer time period: {} (table : {}, mergeLevel : {}) must be larger or equal to 0", bufferPeriod, tableNameWithType, mergeLevel); continue; } String maxNumParallelBucketsStr = mergeConfigs.get(MergeTask.MAX_NUM_PARALLEL_BUCKETS); int maxNumParallelBuckets = maxNumParallelBucketsStr != null ? Integer.parseInt(maxNumParallelBucketsStr) : DEFAULT_NUM_PARALLEL_BUCKETS; if (maxNumParallelBuckets <= 0) { LOGGER.error("Maximum number of parallel buckets: {} (table : {}, mergeLevel : {}) must be larger than 0", maxNumParallelBuckets, tableNameWithType, mergeLevel); continue; } // Get bucket start/end time long preSelectedSegStartTimeMs = preSelectedSegments.get(0).getStartTimeMs(); long bucketStartMs = preSelectedSegStartTimeMs / bucketMs * bucketMs; long watermarkMs = 0; if (!processAll) { // Get watermark from MergeRollupTaskMetadata ZNode // bucketStartMs = watermarkMs // bucketEndMs = bucketStartMs + bucketMs watermarkMs = getWatermarkMs(preSelectedSegStartTimeMs, bucketMs, mergeLevel, mergeRollupTaskMetadata); bucketStartMs = watermarkMs; } long bucketEndMs = bucketStartMs + bucketMs; if (lowerMergeLevel == null) { long lowestLevelMaxValidBucketEndTimeMs = Long.MIN_VALUE; for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) { // Compute lowestLevelMaxValidBucketEndTimeMs among segments that are ready for merge long currentValidBucketEndTimeMs = getValidBucketEndTimeMsForSegment(preSelectedSegment, bucketMs, bufferMs); lowestLevelMaxValidBucketEndTimeMs = Math.max(lowestLevelMaxValidBucketEndTimeMs, currentValidBucketEndTimeMs); } _tableLowestLevelMaxValidBucketEndTimeMs.put(tableNameWithType, lowestLevelMaxValidBucketEndTimeMs); } // Create metrics even if there's no task scheduled, this helps the case that the controller is restarted // but the metrics are not available until the controller schedules a valid task List<String> sortedMergeLevels = sortedMergeLevelConfigs.stream().map(e -> e.getKey()).collect(Collectors.toList()); if (processAll) { createOrUpdateNumBucketsToProcessMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, bufferMs, bucketMs, preSelectedSegments, sortedMergeLevels); } else { createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, null, watermarkMs, bufferMs, bucketMs); } if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) { LOGGER.info("Bucket with start: {} and end: {} (table : {}, mergeLevel : {}, mode : {}) cannot be merged yet", bucketStartMs, bucketEndMs, tableNameWithType, mergeLevel, processAll ? MergeTask.PROCESS_ALL_MODE : MergeTask.PROCESS_FROM_WATERMARK_MODE); continue; } // Find overlapping segments for each bucket, skip the buckets that has all segments merged List<List<SegmentZKMetadata>> selectedSegmentsForAllBuckets = new ArrayList<>(maxNumParallelBuckets); List<SegmentZKMetadata> selectedSegmentsForBucket = new ArrayList<>(); boolean hasUnmergedSegments = false; boolean hasSpilledOverData = false; boolean areAllSegmentsReadyToMerge = true; // The for loop terminates in following cases: // 1. Found buckets with unmerged segments: // For each bucket find all segments overlapping with the target bucket, skip the bucket if all overlapping // segments are merged. Schedule k (numParallelBuckets) buckets at most, and stops at the first bucket that // contains spilled over data. // One may wonder how a segment with records spanning different buckets is handled. The short answer is that // it will be cut into multiple segments, each for a separate bucket. This is achieved by setting bucket time // period as PARTITION_BUCKET_TIME_PERIOD when generating PinotTaskConfigs // 2. There's no bucket with unmerged segments, skip scheduling for (SegmentZKMetadata preSelectedSegment : preSelectedSegments) { long startTimeMs = preSelectedSegment.getStartTimeMs(); if (startTimeMs < bucketEndMs) { long endTimeMs = preSelectedSegment.getEndTimeMs(); if (endTimeMs >= bucketStartMs) { // For segments overlapping with current bucket, add to the result list if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) { hasUnmergedSegments = true; } if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) { areAllSegmentsReadyToMerge = false; } if (hasSpilledOverData(preSelectedSegment, bucketMs)) { hasSpilledOverData = true; } selectedSegmentsForBucket.add(preSelectedSegment); } // endTimeMs < bucketStartMs // Haven't find the first overlapping segment, continue to the next segment } else { // Has gone through all overlapping segments for current bucket if (hasUnmergedSegments && areAllSegmentsReadyToMerge) { // Add the bucket if there are unmerged segments selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket); } if (selectedSegmentsForAllBuckets.size() == maxNumParallelBuckets || hasSpilledOverData) { // If there are enough buckets or found spilled over data, schedule merge tasks break; } else { // Start with a new bucket // TODO: If there are many small merged segments, we should merge them again selectedSegmentsForBucket = new ArrayList<>(); hasUnmergedSegments = false; areAllSegmentsReadyToMerge = true; bucketStartMs = (startTimeMs / bucketMs) * bucketMs; bucketEndMs = bucketStartMs + bucketMs; if (!isValidBucketEndTime(bucketEndMs, bufferMs, lowerMergeLevel, mergeRollupTaskMetadata, processAll)) { break; } if (!isMergedSegment(preSelectedSegment, mergeLevel, sortedMergeLevels)) { hasUnmergedSegments = true; } if (!isMergedSegment(preSelectedSegment, lowerMergeLevel, sortedMergeLevels)) { areAllSegmentsReadyToMerge = false; } if (hasSpilledOverData(preSelectedSegment, bucketMs)) { hasSpilledOverData = true; } selectedSegmentsForBucket.add(preSelectedSegment); } } } // Add the last bucket if it contains unmerged segments and is not added before if (hasUnmergedSegments && areAllSegmentsReadyToMerge && (selectedSegmentsForAllBuckets.isEmpty() || ( selectedSegmentsForAllBuckets.get(selectedSegmentsForAllBuckets.size() - 1) != selectedSegmentsForBucket))) { selectedSegmentsForAllBuckets.add(selectedSegmentsForBucket); } if (selectedSegmentsForAllBuckets.isEmpty()) { LOGGER.info("No unmerged segment found for table: {}, mergeLevel: {}", tableNameWithType, mergeLevel); continue; } // Bump up watermark to the earliest start time of selected segments truncated to the closest bucket boundary long newWatermarkMs = selectedSegmentsForAllBuckets.get(0).get(0).getStartTimeMs() / bucketMs * bucketMs; mergeRollupTaskMetadata.getWatermarkMap().put(mergeLevel, newWatermarkMs); LOGGER.info("Update watermark for table: {}, mergeLevel: {} from: {} to: {}", tableNameWithType, mergeLevel, watermarkMs, newWatermarkMs); // Update the delay metrics if (!processAll) { createOrUpdateDelayMetrics(tableNameWithType, mergeLevel, lowerMergeLevel, newWatermarkMs, bufferMs, bucketMs); } // Create task configs int maxNumRecordsPerTask = mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY) != null ? Integer.parseInt( mergeConfigs.get(MergeRollupTask.MAX_NUM_RECORDS_PER_TASK_KEY)) : DEFAULT_MAX_NUM_RECORDS_PER_TASK; SegmentPartitionConfig segmentPartitionConfig = tableConfig.getIndexingConfig().getSegmentPartitionConfig(); if (segmentPartitionConfig == null) { for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) { pinotTaskConfigsForTable.addAll( createPinotTaskConfigs(selectedSegmentsPerBucket, tableConfig, maxNumRecordsPerTask, mergeLevel, null, mergeConfigs, taskConfigs)); } } else { // For partitioned table, schedule separate tasks for each partitionId (partitionId is constructed from // partitions of all partition columns. There should be exact match between partition columns of segment and // partition columns of table configuration, and there is only partition per column in segment metadata). // Other segments which do not meet these conditions are considered as outlier segments, and additional tasks // are generated for them. Map<String, ColumnPartitionConfig> columnPartitionMap = segmentPartitionConfig.getColumnPartitionMap(); List<String> partitionColumns = new ArrayList<>(columnPartitionMap.keySet()); for (List<SegmentZKMetadata> selectedSegmentsPerBucket : selectedSegmentsForAllBuckets) { Map<List<Integer>, List<SegmentZKMetadata>> partitionToSegments = new HashMap<>(); List<SegmentZKMetadata> outlierSegments = new ArrayList<>(); for (SegmentZKMetadata selectedSegment : selectedSegmentsPerBucket) { SegmentPartitionMetadata segmentPartitionMetadata = selectedSegment.getPartitionMetadata(); List<Integer> partitions = new ArrayList<>(); if (segmentPartitionMetadata != null && columnPartitionMap.keySet() .equals(segmentPartitionMetadata.getColumnPartitionMap().keySet())) { for (String partitionColumn : partitionColumns) { if (segmentPartitionMetadata.getPartitions(partitionColumn).size() == 1) { partitions.add(segmentPartitionMetadata.getPartitions(partitionColumn).iterator().next()); } else { partitions.clear(); break; } } } if (partitions.isEmpty()) { outlierSegments.add(selectedSegment); } else { partitionToSegments.computeIfAbsent(partitions, k -> new ArrayList<>()).add(selectedSegment); } } for (Map.Entry<List<Integer>, List<SegmentZKMetadata>> entry : partitionToSegments.entrySet()) { List<Integer> partition = entry.getKey(); List<SegmentZKMetadata> partitionedSegments = entry.getValue(); pinotTaskConfigsForTable.addAll( createPinotTaskConfigs(partitionedSegments, tableConfig, maxNumRecordsPerTask, mergeLevel, partition, mergeConfigs, taskConfigs)); } if (!outlierSegments.isEmpty()) { pinotTaskConfigsForTable.addAll( createPinotTaskConfigs(outlierSegments, tableConfig, maxNumRecordsPerTask, mergeLevel, null, mergeConfigs, taskConfigs)); } } } } // Write updated watermark map to zookeeper if (!processAll) { try { _clusterInfoAccessor .setMinionTaskMetadata(mergeRollupTaskMetadata, MinionConstants.MergeRollupTask.TASK_TYPE, expectedVersion); } catch (ZkException e) { LOGGER.error( "Version changed while updating merge/rollup task metadata for table: {}, skip scheduling. There are " + "multiple task schedulers for the same table, need to investigate!", tableNameWithType); continue; } } pinotTaskConfigs.addAll(pinotTaskConfigsForTable); LOGGER.info("Finished generating task configs for table: {} for task: {}, numTasks: {}", tableNameWithType, taskType, pinotTaskConfigsForTable.size()); } // Clean up metrics cleanUpDelayMetrics(tableConfigs); return pinotTaskConfigs; }
@Test public void testEmptyTable() { Map<String, Map<String, String>> taskConfigsMap = new HashMap<>(); Map<String, String> tableTaskConfigs = new HashMap<>(); tableTaskConfigs.put("daily.mergeType", "concat"); tableTaskConfigs.put("daily.bufferTimePeriod", "1d"); tableTaskConfigs.put("daily.bucketTimePeriod", "1d"); tableTaskConfigs.put("daily.maxNumRecordsPerSegment", "1000000"); taskConfigsMap.put(MinionConstants.MergeRollupTask.TASK_TYPE, tableTaskConfigs); TableConfig offlineTableConfig = getTableConfig(TableType.OFFLINE, taskConfigsMap); ClusterInfoAccessor mockClusterInfoProvide = mock(ClusterInfoAccessor.class); when(mockClusterInfoProvide.getSegmentsZKMetadata(OFFLINE_TABLE_NAME)).thenReturn( Lists.newArrayList(Collections.emptyList())); when(mockClusterInfoProvide.getIdealState(OFFLINE_TABLE_NAME)).thenReturn(new IdealState(OFFLINE_TABLE_NAME)); mockMergeRollupTaskMetadataGetterAndSetter(mockClusterInfoProvide); MergeRollupTaskGenerator generator = new MergeRollupTaskGenerator(); generator.init(mockClusterInfoProvide); List<PinotTaskConfig> pinotTaskConfigs = generator.generateTasks(Lists.newArrayList(offlineTableConfig)); assertNull(mockClusterInfoProvide.getMinionTaskMetadataZNRecord(MinionConstants.MergeRollupTask.TASK_TYPE, OFFLINE_TABLE_NAME)); assertEquals(pinotTaskConfigs.size(), 0); }
@Override public OpticalConnectivityId setupConnectivity(ConnectPoint ingress, ConnectPoint egress, Bandwidth bandwidth, Duration latency) { checkNotNull(ingress); checkNotNull(egress); log.info("setupConnectivity({}, {}, {}, {})", ingress, egress, bandwidth, latency); Bandwidth bw = (bandwidth == null) ? NO_BW_REQUIREMENT : bandwidth; Stream<Path> paths = topologyService.getKShortestPaths( topologyService.currentTopology(), ingress.deviceId(), egress.deviceId(), new BandwidthLinkWeight(bandwidth)); // Path service calculates from node to node, we're only interested in port to port Optional<OpticalConnectivityId> id = paths.filter(p -> p.src().equals(ingress) && p.dst().equals(egress)) .limit(maxPaths) .map(p -> setupPath(p, bw, latency)) .filter(Objects::nonNull) .findFirst(); if (id.isPresent()) { log.info("Assigned OpticalConnectivityId: {}", id); } else { log.error("setupConnectivity({}, {}, {}, {}) failed.", ingress, egress, bandwidth, latency); } return id.orElse(null); }
@Test public void testInstalledEventLocal() { Bandwidth bandwidth = Bandwidth.bps(100); Duration latency = Duration.ofMillis(10); OpticalConnectivityId cid = target.setupConnectivity(CP12, CP71, bandwidth, latency); // notify all intents are installed intentService.notifyInstalled(); assertEquals(1, listener.events.size()); assertEquals(OpticalPathEvent.Type.PATH_INSTALLED, listener.events.get(0).type()); assertEquals(cid, listener.events.get(0).subject()); }
public static String formatWithSchema(final DataNode dataNode) { return dataNode.getDataSourceName() + (null != dataNode.getSchemaName() ? "." + dataNode.getSchemaName() : "") + "." + dataNode.getTableName(); }
@Test void assertFormatWithSchema() { DataNode dataNode = new DataNode("ds_0.tbl_0"); dataNode.setSchemaName("public"); assertThat(DataNodeUtils.formatWithSchema(dataNode), is("ds_0.public.tbl_0")); }
public Service createGenericResourceService(String name, String version, String resource, String referencePayload) throws EntityAlreadyExistsException { log.info("Creating a new Service '{}-{}' for generic resource {}", name, version, resource); // Check if corresponding Service already exists. Service existingService = serviceRepository.findByNameAndVersion(name, version); if (existingService != null) { log.warn("A Service '{}-{}' is already existing. Throwing an Exception", name, version); throw new EntityAlreadyExistsException( String.format("Service '%s-%s' is already present in store", name, version)); } // Create new service with GENERIC_REST type. Service service = new Service(); service.setName(name); service.setVersion(version); service.setType(ServiceType.GENERIC_REST); service.setMetadata(new Metadata()); // Now create basic crud operations for the resource. Operation createOp = new Operation(); createOp.setName("POST /" + resource); createOp.setMethod("POST"); service.addOperation(createOp); Operation getOp = new Operation(); getOp.setName("GET /" + resource + "/:id"); getOp.setMethod("GET"); getOp.setDispatcher(DispatchStyles.URI_PARTS); getOp.setDispatcherRules("id"); service.addOperation(getOp); Operation updateOp = new Operation(); updateOp.setName("PUT /" + resource + "/:id"); updateOp.setMethod("PUT"); updateOp.setDispatcher(DispatchStyles.URI_PARTS); updateOp.setDispatcherRules("id"); service.addOperation(updateOp); Operation listOp = new Operation(); listOp.setName("GET /" + resource); listOp.setMethod("GET"); service.addOperation(listOp); Operation delOp = new Operation(); delOp.setName("DELETE /" + resource + "/:id"); delOp.setMethod("DELETE"); delOp.setDispatcher(DispatchStyles.URI_PARTS); delOp.setDispatcherRules("id"); service.addOperation(delOp); serviceRepository.save(service); log.info("Having created Service '{}' for generic resource {}", service.getId(), resource); // If reference payload is provided, record a first resource. if (referencePayload != null) { GenericResource genericResource = new GenericResource(); genericResource.setServiceId(service.getId()); genericResource.setReference(true); try { Document document = Document.parse(referencePayload); genericResource.setPayload(document); genericResourceRepository.save(genericResource); } catch (JsonParseException jpe) { log.error("Cannot parse the provided reference payload as JSON: {}", referencePayload); log.error("Reference is ignored, please provide JSON the next time"); } } // Publish a Service create event before returning. publishServiceChangeEvent(service, ChangeType.CREATED); return service; }
@Test void testCreateGenericResourceService() { Service created = null; try { created = service.createGenericResourceService("Order Service", "1.0", "order", null); } catch (Exception e) { fail("No exception should be thrown"); } // Check created object. assertNotNull(created.getId()); // Retrieve object by id and assert on what has been persisted. Service retrieved = repository.findById(created.getId()).orElse(null); assertEquals("Order Service", retrieved.getName()); assertEquals("1.0", retrieved.getVersion()); assertEquals(ServiceType.GENERIC_REST, retrieved.getType()); // Now check operations. assertEquals(5, retrieved.getOperations().size()); for (Operation op : retrieved.getOperations()) { if ("POST /order".equals(op.getName())) { assertEquals("POST", op.getMethod()); } else if ("GET /order/:id".equals(op.getName())) { assertEquals("GET", op.getMethod()); assertEquals(DispatchStyles.URI_PARTS, op.getDispatcher()); assertEquals("id", op.getDispatcherRules()); } else if ("GET /order".equals(op.getName())) { assertEquals("GET", op.getMethod()); } else if ("PUT /order/:id".equals(op.getName())) { assertEquals("PUT", op.getMethod()); assertEquals(DispatchStyles.URI_PARTS, op.getDispatcher()); assertEquals("id", op.getDispatcherRules()); } else if ("DELETE /order/:id".equals(op.getName())) { assertEquals("DELETE", op.getMethod()); assertEquals(DispatchStyles.URI_PARTS, op.getDispatcher()); assertEquals("id", op.getDispatcherRules()); } else { fail("Unknown operation name: " + op.getName()); } } }
public static Timestamp next(Timestamp timestamp) { if (timestamp.equals(Timestamp.MAX_VALUE)) { return timestamp; } final int nanos = timestamp.getNanos(); final long seconds = timestamp.getSeconds(); if (nanos + 1 >= NANOS_PER_SECOND) { return Timestamp.ofTimeSecondsAndNanos(seconds + 1, 0); } else { return Timestamp.ofTimeSecondsAndNanos(seconds, nanos + 1); } }
@Test public void testNextIncrementsSecondsWhenNanosOverflow() { assertEquals( Timestamp.ofTimeSecondsAndNanos(11L, 0), TimestampUtils.next(Timestamp.ofTimeSecondsAndNanos(10L, 999999999))); }
static void validateNoUserColumnsWithSameNameAsPseudoColumns(final Analysis analysis) { final String disallowedNames = analysis.getAllDataSources() .stream() .map(AliasedDataSource::getDataSource) .map(DataSource::getSchema) .map(LogicalSchema::value) .flatMap(Collection::stream) .map(Column::name) .filter(name -> SystemColumns.isPseudoColumn(name)) .map(ColumnName::toString) .collect(Collectors.joining(", ")); if (disallowedNames.length() > 0) { throw new KsqlException( "Your stream/table has columns with the same name as newly introduced pseudocolumns in" + " ksqlDB, and cannot be queried as a result. The conflicting names are: " + disallowedNames + ".\n" ); } }
@Test public void shouldThrowOnUserColumnsWithSameNameAsPseudoColumn() { // Given: givenAnalysisOfQueryWithUserColumnsWithSameNameAsPseudoColumn(); // When: final Exception e = assertThrows( KsqlException.class, () -> QueryValidatorUtil.validateNoUserColumnsWithSameNameAsPseudoColumns(analysis) ); // Then: assertThat(e.getMessage(), containsString("Your stream/table has columns with the " + "same name as newly introduced pseudocolumns in " + "ksqlDB, and cannot be queried as a result. The conflicting names are: `ROWPARTITION`.")); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 3) { onInvalidDataReceived(device, data); return; } int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++); final boolean carbohydratePresent = (flags & 0x01) != 0; final boolean mealPresent = (flags & 0x02) != 0; final boolean testerHealthPresent = (flags & 0x04) != 0; final boolean exercisePresent = (flags & 0x08) != 0; final boolean medicationPresent = (flags & 0x10) != 0; final boolean medicationUnitLiter = (flags & 0x20) != 0; final boolean HbA1cPresent = (flags & 0x40) != 0; final boolean extendedFlagsPresent = (flags & 0x80) != 0; if (data.size() < 3 + (carbohydratePresent ? 3 : 0) + (mealPresent ? 1 : 0) + (testerHealthPresent ? 1 : 0) + (exercisePresent ? 3 : 0) + (medicationPresent ? 3 : 0) + (HbA1cPresent ? 2 : 0) + (extendedFlagsPresent ? 1 : 0)) { onInvalidDataReceived(device, data); return; } final int sequenceNumber = data.getIntValue(Data.FORMAT_UINT16_LE, offset); offset += 2; // Optional fields if (extendedFlagsPresent) { // ignore extended flags offset += 1; } Carbohydrate carbohydrate = null; Float carbohydrateAmount = null; if (carbohydratePresent) { final int carbohydrateId = data.getIntValue(Data.FORMAT_UINT8, offset); carbohydrate = Carbohydrate.from(carbohydrateId); carbohydrateAmount = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 1); // in grams offset += 3; } Meal meal = null; if (mealPresent) { final int mealId = data.getIntValue(Data.FORMAT_UINT8, offset); meal = Meal.from(mealId); offset += 1; } Tester tester = null; Health health = null; if (testerHealthPresent) { final int testerAndHealth = data.getIntValue(Data.FORMAT_UINT8, offset); tester = Tester.from(testerAndHealth & 0x0F); health = Health.from(testerAndHealth >> 4); offset += 1; } Integer exerciseDuration = null; Integer exerciseIntensity = null; if (exercisePresent) { exerciseDuration = data.getIntValue(Data.FORMAT_UINT16_LE, offset); // in seconds exerciseIntensity = data.getIntValue(Data.FORMAT_UINT8, offset + 2); // in percentage offset += 3; } Medication medication = null; Float medicationAmount = null; Integer medicationUnit = null; if (medicationPresent) { final int medicationId = data.getIntValue(Data.FORMAT_UINT8, offset); medication = Medication.from(medicationId); medicationAmount = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 1); // mg or ml medicationUnit = medicationUnitLiter ? UNIT_ml : UNIT_mg; offset += 3; } Float HbA1c = null; if (HbA1cPresent) { HbA1c = data.getFloatValue(Data.FORMAT_SFLOAT, offset); // offset += 2; } onGlucoseMeasurementContextReceived(device, sequenceNumber, carbohydrate, carbohydrateAmount, meal, tester, health, exerciseDuration, exerciseIntensity, medication, medicationAmount, medicationUnit, HbA1c); }
@Test public void onGlucoseMeasurementContextReceived_full() { final MutableData data = new MutableData(new byte[17]); data.setValue(0xFF, Data.FORMAT_UINT8, 0); // Flags data.setValue(0, Data.FORMAT_UINT16_LE, 1); // Sequence number data.setValue(0xb3, Data.FORMAT_UINT8, 3); // Extended flags - ignored data.setValue(GlucoseMeasurementContextCallback.Carbohydrate.DINNER.value, Data.FORMAT_UINT8, 4); // Carbohydrate data.setValue(100.0f, Data.FORMAT_SFLOAT, 5); // Carbohydrate Amount data.setValue(GlucoseMeasurementContextCallback.Meal.CASUAL.value, Data.FORMAT_UINT8, 7); // Meal data.setValue(0x12, Data.FORMAT_UINT8, 8); // Tester and Health (health care practitioner, minor issues) data.setValue(60, Data.FORMAT_UINT16_LE, 9); // 1 minute of exercise data.setValue(50, Data.FORMAT_UINT8, 11); // 50% data.setValue(4, Data.FORMAT_UINT8, 12); // Long acting insulin data.setValue(123.45f, Data.FORMAT_SFLOAT, 13); // 123.45 ml data.setValue(34.5f, Data.FORMAT_SFLOAT, 15); // HbA1c = 34.5% callback.onDataReceived(null, data); assertTrue(success); assertEquals(0, number); }
@Schema(description = "응닡 λ©”μ‹œμ§€", example = """ data: { "aDomain": { // λ‹¨μˆ˜λͺ…μ‚¬λŠ” object ν˜•νƒœλ‘œ λ°˜ν™˜ ... },` "bDomains": [ // 볡수λͺ…μ‚¬λŠ” array ν˜•νƒœλ‘œ λ°˜ν™˜ ... ] } """) private T data; @Builder private SuccessResponse(T data) { this.data = data; } /** * data : { "key" : data } ν˜•νƒœμ˜ 성곡 응닡을 λ°˜ν™˜ν•œλ‹€. * <br/> * λͺ…μ‹œμ μœΌλ‘œ key의 이름을 μ§€μ •ν•˜κΈ° μœ„ν•΄ μ‚¬μš©ν•œλ‹€. */ public static <V> SuccessResponse<Map<String, V>> from(String key, V data) { return SuccessResponse.<Map<String, V>>builder() .data(Map.of(key, data)) .build(); }
@Test @DisplayName("SuccessResponse.from() - DTOλ₯Ό ν†΅ν•œ 성곡 응닡") public void successResponseFromDto() { // When SuccessResponse<TestDto> response = SuccessResponse.from(dto); // Then assertEquals("2000", response.getCode()); assertEquals(dto, response.getData()); System.out.println(response); }
public void printKsqlEntityList(final List<KsqlEntity> entityList) { switch (outputFormat) { case JSON: printAsJson(entityList); break; case TABULAR: final boolean showStatements = entityList.size() > 1; for (final KsqlEntity ksqlEntity : entityList) { writer().println(); if (showStatements) { writer().println(ksqlEntity.getStatementText()); } printAsTable(ksqlEntity); } break; default: throw new RuntimeException(String.format( "Unexpected output format: '%s'", outputFormat.name() )); } }
@Test public void shouldPrintTerminateQuery() { // Given: final KsqlEntity entity = new TerminateQueryEntity("statementText", "queryId", true); // When: console.printKsqlEntityList(ImmutableList.of(entity)); // Then: final String output = terminal.getOutputString(); Approvals.verify(output, approvalOptions); }
abstract void execute(Admin admin, Namespace ns, PrintStream out) throws Exception;
@Test public void testFindHangingDoesNotFilterByTransactionInProgressWithDifferentPartitions() throws Exception { TopicPartition topicPartition = new TopicPartition("foo", 5); String[] args = new String[]{ "--bootstrap-server", "localhost:9092", "find-hanging", "--topic", topicPartition.topic(), "--partition", String.valueOf(topicPartition.partition()) }; long producerId = 132L; short producerEpoch = 5; long lastTimestamp = time.milliseconds() - TimeUnit.MINUTES.toMillis(60); int coordinatorEpoch = 19; long txnStartOffset = 29384L; expectDescribeProducers( topicPartition, producerId, producerEpoch, lastTimestamp, OptionalInt.of(coordinatorEpoch), OptionalLong.of(txnStartOffset) ); String transactionalId = "bar"; TransactionListing listing = new TransactionListing( transactionalId, producerId, TransactionState.ONGOING ); expectListTransactions( new ListTransactionsOptions().filterProducerIds(singleton(producerId)), singletonMap(1, Collections.singletonList(listing)) ); // Although there is a transaction in progress from the same // producer epoch, it does not include the topic partition we // found when describing producers. TransactionDescription description = new TransactionDescription( 1, TransactionState.ONGOING, producerId, producerEpoch, 60000, OptionalLong.of(time.milliseconds()), singleton(new TopicPartition("foo", 10)) ); expectDescribeTransactions(singletonMap(transactionalId, description)); execute(args); assertNormalExit(); assertHangingTransaction( topicPartition, producerId, producerEpoch, coordinatorEpoch, txnStartOffset, lastTimestamp ); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeLargerScale() { FunctionTestUtil.assertResult(roundUpFunction.invoke(BigDecimal.valueOf(10.123456789), BigDecimal.valueOf(6)) , BigDecimal.valueOf(10.123457)); }
@Override public void reset() { // reset all offsets this.numRecords = 0; this.currentSortBufferOffset = 0; this.sortBufferBytes = 0; // return all memory this.freeMemory.addAll(this.sortBuffer); this.sortBuffer.clear(); // grab first buffers this.currentSortBufferSegment = nextMemorySegment(); this.sortBuffer.add(this.currentSortBufferSegment); this.outView.set(this.currentSortBufferSegment); }
@Test void testReset() throws Exception { final int numSegments = MEMORY_SIZE / MEMORY_PAGE_SIZE; final List<MemorySegment> memory = this.memoryManager.allocatePages(new DummyInvokable(), numSegments); FixedLengthRecordSorter<IntPair> sorter = newSortBuffer(memory); RandomIntPairGenerator generator = new RandomIntPairGenerator(SEED); // write the buffer full with the first set of records IntPair record = new IntPair(); int num = -1; do { generator.next(record); num++; } while (sorter.write(record) && num < 3354624); sorter.reset(); // write a second sequence of records. since the values are of fixed length, we must be able // to write an equal number generator.reset(); // write the buffer full with the first set of records int num2 = -1; do { generator.next(record); num2++; } while (sorter.write(record) && num2 < 3354624); assertThat(num2) .withFailMessage( "The number of records written after the reset was not the same as before.") .isEqualTo(num); // re-read the records generator.reset(); IntPair readTarget = new IntPair(); int i = 0; while (i < num) { generator.next(record); readTarget = sorter.getRecord(readTarget, i++); int rk = readTarget.getKey(); int gk = record.getKey(); int rv = readTarget.getValue(); int gv = record.getValue(); assertThat(rk).withFailMessage("The re-read key is wrong %d", i).isEqualTo(gk); assertThat(rv).withFailMessage("The re-read value is wrong %d", i).isEqualTo(gv); } // release the memory occupied by the buffers sorter.dispose(); this.memoryManager.release(memory); }
public boolean updateValue(@Nullable TraceContext context, @Nullable String value) { if (context == null) return false; if (this.context.updateValue(this, context, value)) { CorrelationFlushScope.flush(this, value); return true; } return false; }
@Test void updateValue_context_null() { // permits unguarded use of CurrentTraceContext.get() REQUEST_ID.updateValue((TraceContext) null, null); }
public static void register(final MetricsRegister metricsRegister) { MetricsReporter.metricsRegister = metricsRegister; MetricsReporter.registerCounter(LabelNames.REQUEST_TOTAL, "shenyu request total count"); MetricsReporter.registerCounter(LabelNames.REQUEST_TYPE_TOTAL, new String[]{"path", "type"}, "shenyu http request type total count"); MetricsReporter.registerCounter(LabelNames.REQUEST_THROW_TOTAL, "shenyu request error total count"); MetricsReporter.registerHistogram(LabelNames.EXECUTE_LATENCY_NAME, "the shenyu executor latency millis"); MetricsReporter.registerCounter(LabelNames.SENTINEL_REQUEST_RESTRICT_TOTAL, "shenyu sentinel request restrict total count"); MetricsReporter.registerCounter(LabelNames.SENTINEL_REQUEST_CIRCUITBREAKER_TOTAL, "shenyu sentinel circuitbreaker request total count"); MetricsReporter.registerCounter(LabelNames.RESILIENCE4J_REQUEST_RESTRICT_TOTAL, "shenyu resilience4j request restrict total count"); MetricsReporter.registerCounter(LabelNames.RESILIENCE4J_REQUEST_CIRCUITBREAKER_TOTAL, "shenyu resilience4j circuitbreaker request total count"); MetricsReporter.registerCounter(LabelNames.HYSTRIX_REQUEST_CIRCUITBREAKER_TOTAL, "shenyu hystrix circuitbreaker request total count"); MetricsReporter.registerCounter(LabelNames.RATELIMITER_REQUEST_RESTRICT_TOTAL, "shenyu ratelimiter request restrict total count"); }
@Test public void testRegister() throws Exception { Map<String, Counter> map1 = getPrivateField(metricsRegister, "COUNTER_MAP", Map.class); Assertions.assertEquals(map1.size(), 9); Map<String, Histogram> map2 = getPrivateField(metricsRegister, "HISTOGRAM_MAP", Map.class); Assertions.assertEquals(map2.size(), 3); List<String> labels = new ArrayList<>(); labels.add("shenyu_request_total"); Collection<Metric> metrics = new ArrayList<>(); metrics.add(new Metric(MetricType.COUNTER, "name1", DOCUMENT, labels)); metrics.add(new Metric(MetricType.GAUGE, "name2", DOCUMENT, labels)); metrics.add(new Metric(MetricType.HISTOGRAM, "name3", DOCUMENT, labels)); MetricsReporter.registerMetrics(metrics); Map<String, Counter> map3 = getPrivateField(metricsRegister, "COUNTER_MAP", Map.class); Assertions.assertEquals(map3.size(), 10); Map<String, Histogram> map4 = getPrivateField(metricsRegister, "HISTOGRAM_MAP", Map.class); Assertions.assertEquals(map4.size(), 4); Map<String, Gauge> map5 = getPrivateField(metricsRegister, "GAUGE_MAP", Map.class); Assertions.assertEquals(map5.size(), 3); }
@Override public void check(Model model) { if (model == null) return; List<Model> appenderModels = new ArrayList<>(); deepFindAllModelsOfType(AppenderModel.class, appenderModels, model); List<Pair<Model, Model>> nestedPairs = deepFindNestedSubModelsOfType(AppenderModel.class, appenderModels); List<Pair<Model, Model>> filteredNestedPairs = nestedPairs.stream().filter(pair -> !isSiftingAppender(pair.first)).collect(Collectors.toList()); if(filteredNestedPairs.isEmpty()) { return; } addWarn(NESTED_APPENDERS_WARNING); for(Pair<Model, Model> pair: filteredNestedPairs) { addWarn("Appender at line "+pair.first.getLineNumber() + " contains a nested appender at line "+pair.second.getLineNumber()); } }
@Test public void nestedAppender() { TopModel topModel = new TopModel(); AppenderModel appenderModel0 = new AppenderModel(); appenderModel0.setLineNumber(1); topModel.addSubModel(appenderModel0); AppenderModel appenderModel1 = new AppenderModel(); appenderModel1.setLineNumber(2); appenderModel0.addSubModel(appenderModel1); awasc.check(topModel); statusChecker.assertContainsMatch(Status.WARN, NESTED_APPENDERS_WARNING); statusChecker.assertContainsMatch(Status.WARN,"Appender at line 1"); }
public static <T> StateSerializerProvider<T> fromNewRegisteredSerializer( TypeSerializer<T> registeredStateSerializer) { return new EagerlyRegisteredStateSerializerProvider<>(registeredStateSerializer); }
@Test void testPreviousSchemaSerializerForEagerlyRegisteredStateSerializerProvider() { StateSerializerProvider<TestType> testProvider = StateSerializerProvider.fromNewRegisteredSerializer( new TestType.V1TestTypeSerializer()); // this should fail with an exception assertThatThrownBy(testProvider::previousSchemaSerializer) .isInstanceOf(UnsupportedOperationException.class); }
@Override public Integer clusterGetSlotForKey(byte[] key) { RFuture<Integer> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key); return syncFuture(f); }
@Test public void testClusterGetSlotForKey() { Integer slot = connection.clusterGetSlotForKey("123".getBytes()); assertThat(slot).isNotNull(); }
public static int getFieldCount(DataType dataType) { return getFieldDataTypes(dataType).size(); }
@Test void testGetFieldCount() { assertThat( DataType.getFieldCount( ROW( FIELD("c0", BOOLEAN()), FIELD("c1", DOUBLE()), FIELD("c2", INT())))) .isEqualTo(3); assertThat( DataType.getFieldCount( STRUCTURED( DataTypesTest.SimplePojo.class, FIELD("name", STRING()), FIELD("count", INT().notNull().bridgedTo(int.class))))) .isEqualTo(2); assertThat(DataType.getFieldCount(ARRAY(INT()))).isZero(); assertThat(DataType.getFieldCount(INT())).isZero(); }
public static java.util.regex.Pattern compilePattern(String expression) { return compilePattern(expression, 0); }
@Test void testCompilePatternMultilineCaseIgnoreOK() { Pattern pattern = JMeterUtils.compilePattern("^some.*g$", Pattern.MULTILINE | Pattern.CASE_INSENSITIVE); assertTrue(pattern.matcher("abc\nsome good thing").find()); }
@Override public KubevirtLoadBalancer loadBalancer(String name) { checkArgument(name != null, ERR_NULL_LOAD_BALANCER_NAME); return kubevirtLoadBalancerStore.loadBalancer(name); }
@Test public void testGetLoadBalancerByName() { createBasicLoadBalancers(); assertNotNull("Load balancer did not match", target.loadBalancer(LB_NAME)); assertNull("Load balancer did not match", target.loadBalancer(UNKNOWN_ID)); }
public void setSticky(final boolean sticky) { this.sticky = sticky; }
@Test public void testSetSticky() { assertTrue(new Permission(1755).isSticky()); assertTrue(new Permission(3755).isSticky()); assertTrue(new Permission(5755).isSticky()); assertFalse(new Permission(2755).isSticky()); assertFalse(new Permission(6755).isSticky()); assertEquals("1000", new Permission(1000).getMode()); assertEquals("--------T", new Permission(1000).getSymbol()); }
@Override public ObjectNode encode(LispGcAddress address, CodecContext context) { checkNotNull(address, "LispGcAddress cannot be null"); final ObjectNode result = context.mapper().createObjectNode() .put(NORTH, address.isNorth()) .put(LATITUDE_DEGREE, address.getLatitudeDegree()) .put(LATITUDE_MINUTE, address.getLatitudeMinute()) .put(LATITUDE_SECOND, address.getLatitudeSecond()) .put(EAST, address.isEast()) .put(LONGITUDE_DEGREE, address.getLongitudeDegree()) .put(LONGITUDE_MINUTE, address.getLongitudeMinute()) .put(LONGITUDE_SECOND, address.getLongitudeSecond()) .put(ALTITUDE, address.getAltitude()); if (address.getAddress() != null) { final JsonCodec<MappingAddress> addressCodec = context.codec(MappingAddress.class); ObjectNode addressNode = addressCodec.encode(address.getAddress(), context); result.set(ADDRESS, addressNode); } return result; }
@Test public void testLispGcAddressEncode() { LispGcAddress address = new LispGcAddress.Builder() .withIsNorth(NORTH) .withLatitudeDegree(LATITUDE_DEGREE) .withLatitudeMinute(LATITUDE_MINUTE) .withLatitudeSecond(LATITUDE_SECOND) .withIsEast(EAST) .withLongitudeDegree(LONGITUDE_DEGREE) .withLongitudeMinute(LONGITUDE_MINUTE) .withLongitudeSecond(LONGITUDE_SECOND) .withAltitude(ALTITUDE) .withAddress(MappingAddresses.ipv4MappingAddress(IPV4_PREFIX)) .build(); ObjectNode addressJson = gcAddressCodec.encode(address, context); assertThat("errors in encoding Geo Coordinate address JSON", addressJson, LispGcAddressJsonMatcher.matchesGcAddress(address)); }
@Override public short getShort(final int columnIndex) throws SQLException { return (short) ResultSetUtils.convertValue(mergeResultSet.getValue(columnIndex, short.class), short.class); }
@Test void assertGetShortWithColumnLabel() throws SQLException { when(mergeResultSet.getValue(1, short.class)).thenReturn((short) 1); assertThat(shardingSphereResultSet.getShort("label"), is((short) 1)); }
public synchronized void setLevel(Level newLevel) { if (level == newLevel) { // nothing to do; return; } if (newLevel == null && isRootLogger()) { throw new IllegalArgumentException("The level of the root logger cannot be set to null"); } level = newLevel; if (newLevel == null) { effectiveLevelInt = parent.effectiveLevelInt; newLevel = parent.getEffectiveLevel(); } else { effectiveLevelInt = newLevel.levelInt; } if (childrenList != null) { int len = childrenList.size(); for (int i = 0; i < len; i++) { Logger child = (Logger) childrenList.get(i); // tell child to handle parent levelInt change child.handleParentLevelChange(effectiveLevelInt); } } // inform listeners loggerContext.fireOnLevelChange(this, newLevel); }
@Test public void setRootLevelToNull() { try { root.setLevel(null); fail("The level of the root logger should not be settable to null"); } catch (IllegalArgumentException e) { } }
public static Schema convertToSchema(LogicalType schema) { return convertToSchema(schema, true); }
@Test void testRowTypeAvroSchemaConversion() { RowType rowType = (RowType) ResolvedSchema.of( Column.physical( "row1", DataTypes.ROW( DataTypes.FIELD("a", DataTypes.STRING()))), Column.physical( "row2", DataTypes.ROW( DataTypes.FIELD("b", DataTypes.STRING()))), Column.physical( "row3", DataTypes.ROW( DataTypes.FIELD( "row3", DataTypes.ROW( DataTypes.FIELD( "c", DataTypes .STRING())))))) .toSourceRowDataType() .getLogicalType(); Schema schema = AvroSchemaConverter.convertToSchema(rowType); assertThat(schema.toString(true)) .isEqualTo( "{\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record\",\n" + " \"namespace\" : \"org.apache.flink.avro.generated\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"row1\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record_row1\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"a\",\n" + " \"type\" : [ \"null\", \"string\" ],\n" + " \"default\" : null\n" + " } ]\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"row2\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record_row2\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"b\",\n" + " \"type\" : [ \"null\", \"string\" ],\n" + " \"default\" : null\n" + " } ]\n" + " } ],\n" + " \"default\" : null\n" + " }, {\n" + " \"name\" : \"row3\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record_row3\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"row3\",\n" + " \"type\" : [ \"null\", {\n" + " \"type\" : \"record\",\n" + " \"name\" : \"record_row3_row3\",\n" + " \"fields\" : [ {\n" + " \"name\" : \"c\",\n" + " \"type\" : [ \"null\", \"string\" ],\n" + " \"default\" : null\n" + " } ]\n" + " } ],\n" + " \"default\" : null\n" + " } ]\n" + " } ],\n" + " \"default\" : null\n" + " } ]\n" + "}"); }
static void validateDependencies(Set<Artifact> dependencies, Set<String> allowedRules, boolean failOnUnmatched) throws EnforcerRuleException { SortedSet<Artifact> unmatchedArtifacts = new TreeSet<>(); Set<String> matchedRules = new HashSet<>(); for (Artifact dependency : dependencies) { boolean matches = false; for (String rule : allowedRules) { if (matches(dependency, rule)){ matchedRules.add(rule); matches = true; break; } } if (!matches) { unmatchedArtifacts.add(dependency); } } SortedSet<String> unmatchedRules = new TreeSet<>(allowedRules); unmatchedRules.removeAll(matchedRules); if (!unmatchedArtifacts.isEmpty() || (failOnUnmatched && !unmatchedRules.isEmpty())) { StringBuilder errorMessage = new StringBuilder("Vespa dependency enforcer failed:\n"); if (!unmatchedArtifacts.isEmpty()) { errorMessage.append("Dependencies not matching any rule:\n"); unmatchedArtifacts.forEach(a -> errorMessage.append(" - ").append(a.toString()).append('\n')); } if (failOnUnmatched && !unmatchedRules.isEmpty()) { errorMessage.append("Rules not matching any dependency:\n"); unmatchedRules.forEach(p -> errorMessage.append(" - ").append(p).append('\n')); } throw new EnforcerRuleException(errorMessage.toString()); } }
@Test void fails_on_unmatched_dependency() { Set<Artifact> dependencies = Set.of( artifact("com.yahoo.vespa", "container-core", "8.0.0", "provided"), artifact("com.yahoo.vespa", "testutils", "8.0.0", "test")); Set<String> rules = Set.of("com.yahoo.vespa:*:jar:*:test"); EnforcerRuleException exception = assertThrows( EnforcerRuleException.class, () -> EnforceDependencies.validateDependencies(dependencies, rules, true)); String expectedErrorMessage = """ Vespa dependency enforcer failed: Dependencies not matching any rule: - com.yahoo.vespa:container-core:jar:8.0.0:provided """; assertEquals(expectedErrorMessage, exception.getMessage()); }
@Override public CompletableFuture<List<Long>> getSplitBoundary(BundleSplitOption bundleSplitOption) { NamespaceService service = bundleSplitOption.getService(); NamespaceBundle bundle = bundleSplitOption.getBundle(); List<Long> positions = bundleSplitOption.getPositions(); if (positions == null || positions.size() == 0) { throw new IllegalArgumentException("SplitBoundaries can't be empty"); } // sort all positions Collections.sort(positions); if (force) { return getBoundaries(bundle, positions); } else { return service.getOwnedTopicListForNamespaceBundle(bundle) .thenCompose(topics -> { if (topics == null || topics.size() <= 1) { return CompletableFuture.completedFuture(null); } return getBoundaries(bundle, positions); }); } }
@Test public void testSpecifiedPositionsLessThan1() { SpecifiedPositionsBundleSplitAlgorithm algorithm = new SpecifiedPositionsBundleSplitAlgorithm(); NamespaceService mockNamespaceService = mock(NamespaceService.class); NamespaceBundle mockNamespaceBundle = mock(NamespaceBundle.class); try { assertNull(algorithm.getSplitBoundary( new BundleSplitOption(mockNamespaceService, mockNamespaceBundle, null)).join()); fail("Should fail since split boundaries is null"); } catch (IllegalArgumentException e) { // ignore } try { assertNull(algorithm.getSplitBoundary( new BundleSplitOption(mockNamespaceService, mockNamespaceBundle, new ArrayList<>())).join()); fail("Should fail since split boundaries is empty"); } catch (IllegalArgumentException e) { // ignore } }