focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Udf(description = "Returns the base 10 logarithm of an INT value.") public Double log( @UdfParameter( value = "value", description = "the value get the base 10 logarithm of." ) final Integer value ) { return log(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleZeroValue() { assertThat(Double.isInfinite(udf.log(0)), is(true)); assertThat(Double.isInfinite(udf.log(0L)), is(true)); assertThat(Double.isInfinite(udf.log(0.0)), is(true)); assertThat(Double.isInfinite(udf.log(15, 0)), is(true)); assertThat(Double.isInfinite(udf.log(15L, 0L)), is(true)); assertThat(Double.isInfinite(udf.log(15.0, 0.0)), is(true)); }
public String readStringLenenc() { return new String(readStringLenencByBytes(), charset); }
@Test void assertReadStringLenenc() { when(byteBuf.readUnsignedByte()).thenReturn((short) 0); assertThat(new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).readStringLenenc(), is("")); }
@SuppressWarnings("unchecked") public static <T> ChannelOption<T> valueOf(String name) { return (ChannelOption<T>) pool.valueOf(name); }
@Test public void testValueOf() { String name = "test1"; assertFalse(ChannelOption.exists(name)); ChannelOption<String> option = ChannelOption.valueOf(name); ChannelOption<String> option2 = ChannelOption.valueOf(name); assertSame(option, option2); }
public DefaultIssue forRule(RuleKey ruleKey) { this.ruleKey = ruleKey; return this; }
@Test public void build_issue_with_secondary_locations() { TextRange range1 = new DefaultTextRange(new DefaultTextPointer(1, 1), new DefaultTextPointer(1, 2)); TextRange range2 = new DefaultTextRange(new DefaultTextPointer(2, 1), new DefaultTextPointer(2, 2)); DefaultIssue issue = new DefaultIssue(project, storage) .at(new DefaultIssueLocation().on(inputFile)) .addLocation(new DefaultIssueLocation().on(inputFile).at(range1).message("loc1")) .addLocation(new DefaultIssueLocation().on(inputFile).at(range2).message("loc2")) .forRule(RULE_KEY); assertThat(issue.flows()) .extracting(Flow::type, Flow::description) .containsExactly(tuple(FlowType.UNDEFINED, null), tuple(FlowType.UNDEFINED, null)); assertThat(issue.flows().get(0).locations()).hasSize(1); assertThat(issue.flows().get(1).locations()).hasSize(1); }
@Override public long size() { return get(sizeAsync()); }
@Test public void testSize() { RBucket<String> bucket = redisson.getBucket("testCompareAndSet"); assertThat(bucket.size()).isZero(); bucket.set("1234"); // json adds quotes assertThat(bucket.size()).isEqualTo(5); }
public static <T> ParseFiles<T> parseFilesGenericRecords( SerializableFunction<GenericRecord, T> parseFn) { return new AutoValue_ParquetIO_ParseFiles.Builder<T>().setParseFn(parseFn).build(); }
@Test @SuppressWarnings({"nullable", "ConstantConditions"} /* forced check. */) public void testReadFilesUnknownSchemaFilesForGenericRecordThrowException() { IllegalArgumentException illegalArgumentException = assertThrows( IllegalArgumentException.class, () -> ParquetIO.parseFilesGenericRecords(GenericRecordPassthroughFn.create()) .expand(null)); assertEquals( "Parse can't be used for reading as GenericRecord.", illegalArgumentException.getMessage()); }
private static GuardedByExpression bind(JCTree.JCExpression exp, BinderContext context) { GuardedByExpression expr = BINDER.visit(exp, context); checkGuardedBy(expr != null, String.valueOf(exp)); checkGuardedBy(expr.kind() != Kind.TYPE_LITERAL, "Raw type literal: %s", exp); return expr; }
@Test public void outer_lock() { assertThat( bind( "Test", "Outer.this.lock", forSourceLines( "threadsafety/Test.java", "package threadsafety;", "class Outer {", " final Object lock = new Object();", " class Test {}", "}"))) .isEqualTo("(SELECT (SELECT (THIS) outer$threadsafety.Outer) lock)"); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void verify_myNewIssues_notification_dispatcher_key() { NotificationDispatcherMetadata metadata = NewIssuesNotificationHandler.newMetadata(); assertThat(metadata.getDispatcherKey()).isEqualTo(NEW_ISSUES_DISPATCHER_KEY); }
String substituteParametersInSqlString(String sql, SqlParameterSource paramSource) { ParsedSql parsedSql = NamedParameterUtils.parseSqlStatement(sql); List<SqlParameter> declaredParams = NamedParameterUtils.buildSqlParameterList(parsedSql, paramSource); if (declaredParams.isEmpty()) { return sql; } for (SqlParameter parSQL: declaredParams) { String paramName = parSQL.getName(); if (!paramSource.hasValue(paramName)) { continue; } Object value = paramSource.getValue(paramName); if (value instanceof SqlParameterValue) { value = ((SqlParameterValue)value).getValue(); } if (!(value instanceof Iterable)) { String ValueForSQLQuery = getValueForSQLQuery(value); sql = sql.replace(":" + paramName, ValueForSQLQuery); continue; } //Iterable int count = 0; String valueArrayStr = ""; for (Object valueTemp: (Iterable)value) { if (count > 0) { valueArrayStr+=", "; } String valueForSQLQuery = getValueForSQLQuery(valueTemp); valueArrayStr += valueForSQLQuery; ++count; } sql = sql.replace(":" + paramName, valueArrayStr); } return sql; }
@Test public void substituteParametersInSqlString_StringType() { String sql = "Select * from Table Where name = :name AND id = :id"; String sqlToUse = "Select * from Table Where name = 'Mery''s' AND id = 'ID_1'"; ctx.addStringParameter("name", "Mery's"); ctx.addStringParameter("id", "ID_1"); String sqlToUseResult = queryLog.substituteParametersInSqlString(sql, ctx); assertEquals(sqlToUse, sqlToUseResult); }
public static <E> ArrayList<E> newArrayList() { return new ArrayList<>(); }
@Test public void testItrArrayLists() { Set<String> set = new HashSet<>(); set.add("record1"); set.add("record2"); set.add("record3"); List<String> list = Lists.newArrayList(set); list.add("record4"); Assert.assertEquals(4, list.size()); }
public List<MavenArtifact> searchSha1(String sha1) throws IOException, TooManyRequestsException { if (null == sha1 || !sha1.matches("^[0-9A-Fa-f]{40}$")) { throw new IllegalArgumentException("Invalid SHA1 format"); } if (cache != null) { final List<MavenArtifact> cached = cache.get(sha1); if (cached != null) { LOGGER.debug("cache hit for Central: " + sha1); if (cached.isEmpty()) { throw new FileNotFoundException("Artifact not found in Central"); } return cached; } } final List<MavenArtifact> result = new ArrayList<>(); final URL url = new URL(String.format(query, rootURL, sha1)); LOGGER.trace("Searching Central url {}", url); // Determine if we need to use a proxy. The rules: // 1) If the proxy is set, AND the setting is set to true, use the proxy // 2) Otherwise, don't use the proxy (either the proxy isn't configured, // or proxy is specifically set to false) final URLConnectionFactory factory = new URLConnectionFactory(settings); final HttpURLConnection conn = factory.createHttpURLConnection(url, useProxy); conn.setDoOutput(true); // JSON would be more elegant, but there's not currently a dependency // on JSON, so don't want to add one just for this conn.addRequestProperty("Accept", "application/xml"); conn.connect(); if (conn.getResponseCode() == 200) { boolean missing = false; try { final DocumentBuilder builder = XmlUtils.buildSecureDocumentBuilder(); final Document doc = builder.parse(conn.getInputStream()); final XPath xpath = XPathFactory.newInstance().newXPath(); final String numFound = xpath.evaluate("/response/result/@numFound", doc); if ("0".equals(numFound)) { missing = true; } else { final NodeList docs = (NodeList) xpath.evaluate("/response/result/doc", doc, XPathConstants.NODESET); for (int i = 0; i < docs.getLength(); i++) { final String g = xpath.evaluate("./str[@name='g']", docs.item(i)); LOGGER.trace("GroupId: {}", g); final String a = xpath.evaluate("./str[@name='a']", docs.item(i)); LOGGER.trace("ArtifactId: {}", a); final String v = xpath.evaluate("./str[@name='v']", docs.item(i)); final NodeList attributes = (NodeList) xpath.evaluate("./arr[@name='ec']/str", docs.item(i), XPathConstants.NODESET); boolean pomAvailable = false; boolean jarAvailable = false; for (int x = 0; x < attributes.getLength(); x++) { final String tmp = xpath.evaluate(".", attributes.item(x)); if (".pom".equals(tmp)) { pomAvailable = true; } else if (".jar".equals(tmp)) { jarAvailable = true; } } final String centralContentUrl = settings.getString(Settings.KEYS.CENTRAL_CONTENT_URL); String artifactUrl = null; String pomUrl = null; if (jarAvailable) { //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom artifactUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/' + v + '/' + a + '-' + v + ".jar"; } if (pomAvailable) { //org/springframework/spring-core/3.2.0.RELEASE/spring-core-3.2.0.RELEASE.pom pomUrl = centralContentUrl + g.replace('.', '/') + '/' + a + '/' + v + '/' + a + '-' + v + ".pom"; } result.add(new MavenArtifact(g, a, v, artifactUrl, pomUrl)); } } } catch (ParserConfigurationException | IOException | SAXException | XPathExpressionException e) { // Anything else is jacked up XML stuff that we really can't recover from well final String errorMessage = "Failed to parse MavenCentral XML Response: " + e.getMessage(); throw new IOException(errorMessage, e); } if (missing) { if (cache != null) { cache.put(sha1, result); } throw new FileNotFoundException("Artifact not found in Central"); } } else if (conn.getResponseCode() == 429) { final String errorMessage = "Too many requests sent to MavenCentral; additional requests are being rejected."; throw new TooManyRequestsException(errorMessage); } else { final String errorMessage = "Could not connect to MavenCentral (" + conn.getResponseCode() + "): " + conn.getResponseMessage(); throw new IOException(errorMessage); } if (cache != null) { cache.put(sha1, result); } return result; }
@Test(expected = IllegalArgumentException.class) public void testMalformedSha1() throws Exception { searcher.searchSha1("invalid"); }
public Properties getProperties() { return properties; }
@Test public void getProperties() { assertEquals(new Properties(), new MapStoreConfig().getProperties()); }
public void estimatorStats() { expressionContext.getOp().accept(this, expressionContext); }
@Test public void testLogicalIcebergTableScan() { GlobalStateMgr globalStateMgr = connectContext.getGlobalStateMgr(); Table icebergTable = globalStateMgr.getMetadataMgr().getTable("iceberg0", "partitioned_db", "t1"); List<Column> columns = icebergTable.getColumns(); Map<ColumnRefOperator, Column> refToColumn = Maps.newHashMap(); Map<Column, ColumnRefOperator> columnToRef = Maps.newHashMap(); ColumnRefOperator partitionColumn = null; for (int i = 0; i < columns.size(); i++) { Column column = columns.get(i); ColumnRefOperator ref = new ColumnRefOperator(i, column.getType(), column.getName(), true); if (column.getName().equals("date")) { partitionColumn = ref; } refToColumn.put(ref, column); columnToRef.put(column, ref); } BinaryPredicateOperator predicateOperator = new BinaryPredicateOperator(BinaryType.LT, partitionColumn, ConstantOperator.createInt(50)); LogicalIcebergScanOperator icebergScanOperator = new LogicalIcebergScanOperator(icebergTable, refToColumn, columnToRef, -1, predicateOperator, TableVersionRange.empty()); GroupExpression groupExpression = new GroupExpression(icebergScanOperator, Lists.newArrayList()); groupExpression.setGroup(new Group(0)); ExpressionContext expressionContext = new ExpressionContext(groupExpression); new MockUp<MetadataMgr>() { @Mock public Statistics getTableStatisticsFromInternalStatistics(Table table, Map<ColumnRefOperator, Column> columns) { Statistics.Builder builder = Statistics.builder(); icebergScanOperator.getOutputColumns().forEach(col -> builder.addColumnStatistic(col, new ColumnStatistic(0, 100, 0.0, 5.0, 100)) ); builder.setOutputRowCount(100); return builder.build(); } }; StatisticsCalculator statisticsCalculator = new StatisticsCalculator(expressionContext, columnRefFactory, optimizerContext); statisticsCalculator.estimatorStats(); Assert.assertEquals(50, expressionContext.getStatistics().getOutputRowCount(), 0.001); Assert.assertEquals(50, expressionContext.getStatistics(). getColumnStatistic(partitionColumn).getMaxValue(), 0.001); Assert.assertTrue(optimizerContext.isObtainedFromInternalStatistics()); optimizerContext.setObtainedFromInternalStatistics(false); }
public CacheConfig<K, V> setBackupCount(int backupCount) { this.backupCount = checkBackupCount(backupCount, asyncBackupCount); return this; }
@Test(expected = IllegalArgumentException.class) public void setBackupCount_whenTooLarge() { CacheConfig config = new CacheConfig(); config.setBackupCount(200); //max allowed is 6.. }
@PostMapping("/plugin/rule/saveOrUpdate") public Mono<String> saveRule(@RequestBody final RuleData ruleData) { if (StringUtils.isEmpty(ruleData.getSelectorId())) { return Mono.just("Error: please add selectorId!"); } subscriber.onRuleSubscribe(buildDefaultRuleData(ruleData)); return Mono.just(ruleData.getId()); }
@Test public void testSaveRule() throws Exception { final String testSelectorId = "testSaveRuleId"; final String testRuleId = "ruleId"; final RuleData ruleData = createRuleData(testSelectorId, testRuleId); this.mockMvc .perform(MockMvcRequestBuilders.post("/shenyu/plugin/rule/saveOrUpdate") .content(GsonUtils.getGson().toJson(ruleData)) .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andReturn(); final List<RuleData> selectorId = baseDataCache.obtainRuleData(testSelectorId); assertThat(selectorId.get(0).getSelectorId()).isEqualTo(testSelectorId); Object result = this.mockMvc .perform(MockMvcRequestBuilders.post("/shenyu/plugin/rule/saveOrUpdate") .content("{}") .contentType(MediaType.APPLICATION_JSON)) .andExpect(status().isOk()) .andReturn() .getAsyncResult(); assertThat(result).isEqualTo("Error: please add selectorId!"); }
public static Configuration loadConfiguration(String[] args) throws FlinkException { return ConfigurationParserUtils.loadCommonConfiguration( filterCmdArgs(args, ClusterConfigurationParserFactory.options()), BashJavaUtils.class.getSimpleName()); }
@TestTemplate void testLoadConfigurationConfigDirLongOpt() throws Exception { String[] args = {"--configDir", confDir.toFile().getAbsolutePath()}; Configuration configuration = FlinkConfigLoader.loadConfiguration(args); verifyConfiguration(configuration, TEST_CONFIG_KEY, TEST_CONFIG_VALUE); }
public abstract void filter(Metadata metadata) throws TikaException;
@Test public void testIncludeFilter() throws Exception { Metadata metadata = new Metadata(); metadata.set("title", "title"); metadata.set("author", "author"); MetadataFilter filter = new IncludeFieldMetadataFilter(set("title")); filter.filter(metadata); assertEquals(1, metadata.names().length); assertEquals("title", metadata.get("title")); assertNull(metadata.get("author")); }
public <T> T submitRequest(String pluginId, String requestName, PluginInteractionCallback<T> pluginInteractionCallback) { if (!pluginManager.isPluginOfType(extensionName, pluginId)) { throw new RecordNotFoundException(format("Did not find '%s' plugin with id '%s'. Looks like plugin is missing", extensionName, pluginId)); } try { String resolvedExtensionVersion = pluginManager.resolveExtensionVersion(pluginId, extensionName, goSupportedVersions); DefaultGoPluginApiRequest apiRequest = new DefaultGoPluginApiRequest(extensionName, resolvedExtensionVersion, requestName); apiRequest.setRequestBody(pluginInteractionCallback.requestBody(resolvedExtensionVersion)); apiRequest.setRequestParams(pluginInteractionCallback.requestParams(resolvedExtensionVersion)); apiRequest.setRequestHeaders(pluginInteractionCallback.requestHeaders(resolvedExtensionVersion)); GoPluginApiResponse response = pluginManager.submitTo(pluginId, extensionName, apiRequest); if (response == null) { throw new RuntimeException("The plugin sent a null response"); } if (DefaultGoApiResponse.SUCCESS_RESPONSE_CODE == response.responseCode()) { return pluginInteractionCallback.onSuccess(response.responseBody(), response.responseHeaders(), resolvedExtensionVersion); } pluginInteractionCallback.onFailure(response.responseCode(), response.responseBody(), resolvedExtensionVersion); throw new RuntimeException(format("The plugin sent a response that could not be understood by Go. Plugin returned with code '%s' and the following response: '%s'", response.responseCode(), response.responseBody())); } catch (RuntimeException e) { throw e; } catch (Exception e) { throw new RuntimeException(format("Interaction with plugin with id '%s' implementing '%s' extension failed while requesting for '%s'. Reason: [%s]", pluginId, extensionName, requestName, e.getMessage()), e); } }
@Test void shouldInvokeOnFailureCallbackWhenResponseCodeOtherThan200() { PluginInteractionCallback pluginInteractionCallback = mock(PluginInteractionCallback.class); when(response.responseCode()).thenReturn(400); when(response.responseBody()).thenReturn("Error response"); when(pluginManager.submitTo(eq(pluginId), eq(extensionName), any(GoPluginApiRequest.class))).thenReturn(response); when(pluginManager.resolveExtensionVersion(eq(pluginId), eq(extensionName), anyList())).thenReturn("1.0"); assertThatCode(() -> helper.submitRequest(pluginId, requestName, pluginInteractionCallback)) .isInstanceOf(RuntimeException.class); verify(pluginInteractionCallback).onFailure(400, "Error response", "1.0"); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(PG_BOOLEAN); builder.dataType(PG_BOOLEAN); break; case TINYINT: case SMALLINT: builder.columnType(PG_SMALLINT); builder.dataType(PG_SMALLINT); break; case INT: builder.columnType(PG_INTEGER); builder.dataType(PG_INTEGER); break; case BIGINT: builder.columnType(PG_BIGINT); builder.dataType(PG_BIGINT); break; case FLOAT: builder.columnType(PG_REAL); builder.dataType(PG_REAL); break; case DOUBLE: builder.columnType(PG_DOUBLE_PRECISION); builder.dataType(PG_DOUBLE_PRECISION); break; case DECIMAL: if (column.getSourceType() != null && column.getSourceType().equalsIgnoreCase(PG_MONEY)) { builder.columnType(PG_MONEY); builder.dataType(PG_MONEY); } else { DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", PG_NUMERIC, precision, scale)); builder.dataType(PG_NUMERIC); builder.precision(precision); builder.scale(scale); } break; case BYTES: builder.columnType(PG_BYTEA); builder.dataType(PG_BYTEA); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(PG_TEXT); builder.dataType(PG_TEXT); } else if (column.getColumnLength() <= MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", PG_VARCHAR, column.getColumnLength())); builder.dataType(PG_VARCHAR); } else { builder.columnType(PG_TEXT); builder.dataType(PG_TEXT); } break; case DATE: builder.columnType(PG_DATE); builder.dataType(PG_DATE); break; case TIME: Integer timeScale = column.getScale(); if (timeScale != null && timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } if (timeScale != null && timeScale > 0) { builder.columnType(String.format("%s(%s)", PG_TIME, timeScale)); } else { builder.columnType(PG_TIME); } builder.dataType(PG_TIME); builder.scale(timeScale); break; case TIMESTAMP: Integer timestampScale = column.getScale(); if (timestampScale != null && timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } if (timestampScale != null && timestampScale > 0) { builder.columnType(String.format("%s(%s)", PG_TIMESTAMP, timestampScale)); } else { builder.columnType(PG_TIMESTAMP); } builder.dataType(PG_TIMESTAMP); builder.scale(timestampScale); break; case ARRAY: ArrayType arrayType = (ArrayType) column.getDataType(); SeaTunnelDataType elementType = arrayType.getElementType(); switch (elementType.getSqlType()) { case BOOLEAN: builder.columnType(PG_BOOLEAN_ARRAY); builder.dataType(PG_BOOLEAN_ARRAY); break; case TINYINT: case SMALLINT: builder.columnType(PG_SMALLINT_ARRAY); builder.dataType(PG_SMALLINT_ARRAY); break; case INT: builder.columnType(PG_INTEGER_ARRAY); builder.dataType(PG_INTEGER_ARRAY); break; case BIGINT: builder.columnType(PG_BIGINT_ARRAY); builder.dataType(PG_BIGINT_ARRAY); break; case FLOAT: builder.columnType(PG_REAL_ARRAY); builder.dataType(PG_REAL_ARRAY); break; case DOUBLE: builder.columnType(PG_DOUBLE_PRECISION_ARRAY); builder.dataType(PG_DOUBLE_PRECISION_ARRAY); break; case BYTES: builder.columnType(PG_BYTEA); builder.dataType(PG_BYTEA); break; case STRING: builder.columnType(PG_TEXT_ARRAY); builder.dataType(PG_TEXT_ARRAY); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.POSTGRESQL, elementType.getSqlType().name(), column.getName()); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.POSTGRESQL, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertShort() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.SHORT_TYPE).build(); BasicTypeDefine typeDefine = PostgresTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(PostgresTypeConverter.PG_SMALLINT, typeDefine.getColumnType()); Assertions.assertEquals(PostgresTypeConverter.PG_SMALLINT, typeDefine.getDataType()); }
@Override public Map<K, V> getCachedMap() { return localCacheView.getCachedMap(); }
@Test public void testNoInvalidationOnRemove() throws InterruptedException { LocalCachedMapOptions<String, Integer> options = LocalCachedMapOptions.<String, Integer>name("test") .evictionPolicy(EvictionPolicy.LFU) .cacheSize(5) .syncStrategy(SyncStrategy.NONE); RLocalCachedMap<String, Integer> map1 = redisson.getLocalCachedMap(options); Map<String, Integer> cache1 = map1.getCachedMap(); RLocalCachedMap<String, Integer> map2 = redisson.getLocalCachedMap(options); Map<String, Integer> cache2 = map2.getCachedMap(); map1.put("1", 1); map1.put("2", 2); assertThat(map2.get("1")).isEqualTo(1); assertThat(map2.get("2")).isEqualTo(2); assertThat(cache1.size()).isEqualTo(2); assertThat(cache2.size()).isEqualTo(2); map1.remove("1"); map2.remove("2"); Thread.sleep(50); assertThat(cache1.size()).isEqualTo(1); assertThat(cache2.size()).isEqualTo(1); }
private static HashSet<String> parseExcludeRouter(List<Router> customRouters) { HashSet<String> excludeKeys = new HashSet<String>(); if (CommonUtils.isNotEmpty(customRouters)) { for (Router router : customRouters) { if (router instanceof ExcludeRouter) { // 存在需要排除的过滤器 ExcludeRouter excludeRouter = (ExcludeRouter) router; String excludeName = excludeRouter.getExcludeName(); if (StringUtils.isNotEmpty(excludeName)) { String excludeRouterName = startsWithExcludePrefix(excludeName) ? excludeName.substring(1) : excludeName; if (StringUtils.isNotEmpty(excludeRouterName)) { excludeKeys.add(excludeRouterName); } } customRouters.remove(router); } } } if (!excludeKeys.isEmpty()) { if (LOGGER.isInfoEnabled()) { LOGGER.info("Find exclude routers: {}", excludeKeys); } } return excludeKeys; }
@Test public void testParseExcludeRouter() throws InvocationTargetException, IllegalAccessException, NoSuchMethodException { String methodName = "parseExcludeRouter"; List<Router> routers = new CopyOnWriteArrayList<>(); routers.add(new TestChainRouter1()); routers.add(new TestChainRouter2()); routers.add(new TestChainRouter3()); routers.add(new TestChainRouter4()); routers.add(new ExcludeRouter("-testChainRouter5")); Method parseExcludeRouter = RouterChain.class.getDeclaredMethod(methodName, List.class); parseExcludeRouter.setAccessible(true); Object invokeResult = parseExcludeRouter.invoke(Router.class, routers); Set<String> result = new HashSet<>(); result.add(invokeResult.toString()); Assert.assertEquals(1, result.size()); Assert.assertNotNull(invokeResult); }
@Override public String getSessionId() { return sessionID; }
@Test public void testGetRequestWithChunkedFraming() { log.info("Starting get async"); assertNotNull("Incorrect sessionId", session3.getSessionId()); try { assertTrue("NETCONF get running command failed. ", GET_REPLY_PATTERN.matcher(session3.get(SAMPLE_REQUEST, null)).matches()); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF get test failed: " + e.getMessage()); } log.info("Finishing get async"); }
@Override public boolean isAllowedTaskMovement(final ClientState source, final ClientState destination) { final Map<String, String> sourceClientTags = clientTagFunction.apply(source.processId(), source); final Map<String, String> destinationClientTags = clientTagFunction.apply(destination.processId(), destination); for (final Entry<String, String> sourceClientTagEntry : sourceClientTags.entrySet()) { if (!sourceClientTagEntry.getValue().equals(destinationClientTags.get(sourceClientTagEntry.getKey()))) { return false; } } return true; }
@Test public void shouldDeclineSingleTaskMoveWhenReduceClientTagCount() { final ClientState source = createClientStateWithCapacity(PID_1, 1, mkMap(mkEntry(ZONE_TAG, ZONE_1), mkEntry(CLUSTER_TAG, CLUSTER_1))); final ClientState destination = createClientStateWithCapacity(PID_2, 1, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_1))); final ClientState clientState = createClientStateWithCapacity(PID_3, 1, mkMap(mkEntry(ZONE_TAG, ZONE_3), mkEntry(CLUSTER_TAG, CLUSTER_2))); final Map<ProcessId, ClientState> clientStateMap = mkMap( mkEntry(PID_1, source), mkEntry(PID_2, destination), mkEntry(PID_3, clientState) ); final TaskId taskId = new TaskId(0, 0); clientState.assignActive(taskId); source.assignStandby(taskId); // Because destination has ZONE_3 which is the same as active's zone assertFalse(standbyTaskAssignor.isAllowedTaskMovement(source, destination, taskId, clientStateMap)); }
public String migrate(String oldJSON, int targetVersion) { LOGGER.debug("Migrating to version {}: {}", targetVersion, oldJSON); Chainr transform = getTransformerFor(targetVersion); Object transformedObject = transform.transform(JsonUtils.jsonToMap(oldJSON), getContextMap(targetVersion)); String transformedJSON = JsonUtils.toJsonString(transformedObject); LOGGER.debug("After migration to version {}: {}", targetVersion, transformedJSON); return transformedJSON; }
@Test void shouldMigrateV1ToV2_ByChangingNothing_WhenThereIsNoPipelineLockingDefined() { ConfigRepoDocumentMother documentMother = new ConfigRepoDocumentMother(); String oldJSON = documentMother.versionOneComprehensiveWithNoLocking(); String transformedJSON = migrator.migrate(oldJSON, 2); String oldJSONWithVersionUpdatedForComparison = oldJSON.replaceAll("\"target_version\":\"1\"", "\"target_version\":\"2\""); assertThatJson(oldJSONWithVersionUpdatedForComparison).isEqualTo(transformedJSON); }
public static Map<String, Object> convertValues(final Map<String, Object> data, final ConfigurationRequest configurationRequest) throws ValidationException { final Map<String, Object> configuration = Maps.newHashMapWithExpectedSize(data.size()); final Map<String, Map<String, Object>> configurationFields = configurationRequest.asList(); for (final Map.Entry<String, Object> entry : data.entrySet()) { final String field = entry.getKey(); final Map<String, Object> fieldDescription = configurationFields.get(field); if (fieldDescription == null || fieldDescription.isEmpty()) { throw new ValidationException(field, "Unknown configuration field description for field \"" + field + "\""); } final String type = (String) fieldDescription.get("type"); // Decide what to cast to. (string, bool, number) Object value; switch (type) { case "text": case "dropdown": value = entry.getValue() == null ? "" : String.valueOf(entry.getValue()); break; case "number": try { value = Integer.parseInt(String.valueOf(entry.getValue())); } catch (NumberFormatException e) { // If a numeric field is optional and not provided, use null as value if ("true".equals(String.valueOf(fieldDescription.get("is_optional")))) { value = null; } else { throw new ValidationException(field, e.getMessage()); } } break; case "boolean": value = "true".equalsIgnoreCase(String.valueOf(entry.getValue())); break; case "list": final List<?> valueList = entry.getValue() == null ? Collections.emptyList() : (List<?>) entry.getValue(); value = valueList.stream() .filter(o -> o != null && o instanceof String) .map(String::valueOf) .collect(Collectors.toList()); break; default: throw new ValidationException(field, "Unknown configuration field type \"" + type + "\""); } configuration.put(field, value); } return configuration; }
@Test public void convertValuesThrowsIllegalArgumentExceptionOnUnknwonType() throws Exception { thrown.expect(ValidationException.class); thrown.expectMessage("Unknown configuration field type \"dummy\""); final ConfigurationRequest cr = new ConfigurationRequest(); cr.addField(new DummyField()); final Map<String, Object> data = new HashMap<>(); data.put("dummy", "foo"); ConfigurationMapConverter.convertValues(data, cr); }
public String getUploadUrlOfAgent(JobIdentifier jobIdentifier, String filePath) { return getUploadUrlOfAgent(jobIdentifier, filePath, 1); }
@Test public void shouldReturnRestfulUrlOfAgentWithAttemptCounter() { String uploadUrl1 = urlService.getUploadUrlOfAgent(jobIdentifier, "file", 1); assertThat(uploadUrl1, endsWith("/files/pipelineName/LATEST/stageName/LATEST/buildName/file?attempt=1&buildId=123")); }
public static List<FieldSchema> convert(Schema schema) { return schema.columns().stream() .map(col -> new FieldSchema(col.name(), convertToTypeString(col.type()), col.doc())) .collect(Collectors.toList()); }
@Test public void testSimpleSchemaConvertToIcebergSchema() { assertThat(HiveSchemaUtil.convert(SIMPLE_HIVE_SCHEMA).asStruct()) .isEqualTo(SIMPLE_ICEBERG_SCHEMA.asStruct()); }
public static String printableFieldNamesToString() { return printableFieldNamesToString(_saveConfig); }
@Test // header text should not change unexpectedly // if this test fails, check whether the default was intentionally changed or not public void testHeader() { final String HDR = "timeStamp,elapsed,label,responseCode,responseMessage,threadName,dataType,success," + "failureMessage,bytes,sentBytes,grpThreads,allThreads,URL,Latency,IdleTime,Connect"; assertEquals(HDR, CSVSaveService.printableFieldNamesToString(), "Header text has changed"); }
@Override public boolean isLockable(List<RowLock> rowLock) { return false; }
@Test public void testIsLockable() { LocalDBLocker locker = new LocalDBLocker(); List<RowLock> rowLocks = new ArrayList<>(); boolean result = locker.isLockable(rowLocks); // Assert the result of the isLockable method Assertions.assertFalse(result); }
public CompletableFuture<JobClient> submitJob( JobGraph jobGraph, ClassLoader userCodeClassloader) throws Exception { MiniClusterConfiguration miniClusterConfig = getMiniClusterConfig(jobGraph.getMaximumParallelism()); MiniCluster miniCluster = miniClusterFactory.apply(miniClusterConfig); miniCluster.start(); return miniCluster .submitJob(jobGraph) .thenApplyAsync( FunctionUtils.uncheckedFunction( submissionResult -> { org.apache.flink.client.ClientUtils .waitUntilJobInitializationFinished( () -> miniCluster .getJobStatus( submissionResult .getJobID()) .get(), () -> miniCluster .requestJobResult( submissionResult .getJobID()) .get(), userCodeClassloader); return submissionResult; })) .thenApply( result -> new MiniClusterJobClient( result.getJobID(), miniCluster, userCodeClassloader, MiniClusterJobClient.JobFinalizationBehavior .SHUTDOWN_CLUSTER)) .whenComplete( (ignored, throwable) -> { if (throwable != null) { // We failed to create the JobClient and must shutdown to ensure // cleanup. shutDownCluster(miniCluster); } }) .thenApply(Function.identity()); }
@Test void testTurnUpParallelismByOverwriteParallelism() throws Exception { JobVertex jobVertex = getBlockingJobVertex(); JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(jobVertex); int overwriteParallelism = jobVertex.getParallelism() + 1; BlockingInvokable.reset(overwriteParallelism); Configuration configuration = new Configuration(); configuration.set( PipelineOptions.PARALLELISM_OVERRIDES, ImmutableMap.of( jobVertex.getID().toHexString(), String.valueOf(overwriteParallelism))); PerJobMiniClusterFactory perJobMiniClusterFactory = initializeMiniCluster(configuration); JobClient jobClient = perJobMiniClusterFactory .submitJob(jobGraph, ClassLoader.getSystemClassLoader()) .get(); // wait for tasks to be properly running BlockingInvokable.latch.await(); jobClient.cancel().get(); assertThatFuture(jobClient.getJobExecutionResult()) .eventuallyFailsWith(ExecutionException.class) .withMessageContaining("Job was cancelled"); assertThatMiniClusterIsShutdown(); }
static List<Instance> convertProviderToInstances(ProviderConfig providerConfig) { @SuppressWarnings("unchecked") List<ServerConfig> servers = providerConfig.getServer(); if (servers != null && !servers.isEmpty()) { List<Instance> instances = new ArrayList<Instance>(); for (ServerConfig server : servers) { String serviceName = buildServiceName(providerConfig, server.getProtocol()); Instance instance = new Instance(); instance.setClusterName(DEFAULT_CLUSTER); instance.setServiceName(serviceName); // set host port String host = server.getVirtualHost(); if (host == null) { host = server.getHost(); if (NetUtils.isLocalHost(host) || NetUtils.isAnyHost(host)) { host = SystemInfo.getLocalHost(); } } instance.setIp(host); Integer port = server.getVirtualPort(); // 虚拟port if (port == null) { port = server.getPort(); } instance.setPort(port); // set meta data Map<String, String> metaData = RegistryUtils.convertProviderToMap(providerConfig, server); instance.setMetadata(metaData); instances.add(instance); } return instances; } return null; }
@Test public void convertProviderToInstances() { ServerConfig serverConfig = new ServerConfig() .setProtocol("bolt") .setHost("0.0.0.0") .setPort(12200); ProviderConfig<?> provider = new ProviderConfig(); provider.setInterfaceId("com.alipay.xxx.TestService") .setApplication(new ApplicationConfig().setAppName("test-server")) .setUniqueId("nacos-test") .setProxy("javassist") .setRegister(true) .setSerialization("hessian2") .setServer(serverConfig) .setWeight(222) .setTimeout(3000); List<Instance> instances = NacosRegistryHelper.convertProviderToInstances(provider); assertNotNull(instances); assertEquals(1, instances.size()); Instance instance = instances.get(0); assertNotNull(instance); assertEquals(NacosRegistryHelper.DEFAULT_CLUSTER, instance.getClusterName()); assertEquals(serverConfig.getPort(), instance.getPort()); assertEquals(serverConfig.getProtocol(), instance.getMetadata().get(RpcConstants.CONFIG_KEY_PROTOCOL)); assertEquals(provider.getSerialization(), instance.getMetadata().get(RpcConstants.CONFIG_KEY_SERIALIZATION)); assertEquals(provider.getUniqueId(), instance.getMetadata().get(RpcConstants.CONFIG_KEY_UNIQUEID)); assertEquals(provider.getWeight(), Integer.parseInt(instance.getMetadata().get(RpcConstants.CONFIG_KEY_WEIGHT))); assertEquals(provider.getTimeout(), Integer.parseInt(instance.getMetadata().get(RpcConstants.CONFIG_KEY_TIMEOUT))); assertEquals(provider.getSerialization(), instance.getMetadata().get(RpcConstants.CONFIG_KEY_SERIALIZATION)); assertEquals(provider.getAppName(), instance.getMetadata().get(RpcConstants.CONFIG_KEY_APP_NAME)); assertEquals("com.alipay.xxx.TestService:nacos-test:DEFAULT", instance.getServiceName()); }
@VisibleForTesting static MetricGroup registerMetricGroup(MetricKey metricKey, MetricGroup metricGroup) { ArrayList<String> scopeComponents = getNameSpaceArray(metricKey); int size = scopeComponents.size(); List<String> metricGroupNames = scopeComponents.subList(0, size / 2); List<String> metricGroupTypes = scopeComponents.subList(size / 2, size); for (int i = 0; i < metricGroupNames.size(); ++i) { if (metricGroupTypes.get(i).equals("MetricGroupType.generic")) { metricGroup = metricGroup.addGroup(metricGroupNames.get(i)); } else if (metricGroupTypes.get(i).equals("MetricGroupType.key")) { metricGroup = metricGroup.addGroup(metricGroupNames.get(i), metricGroupNames.get(++i)); } } return metricGroup; }
@Test void testRegisterMetricGroup() { MetricKey key = MetricKey.create("step", MetricName.named(DEFAULT_NAMESPACE, "name")); MetricRegistry registry = NoOpMetricRegistry.INSTANCE; GenericMetricGroup root = new GenericMetricGroup( registry, new MetricGroupTest.DummyAbstractMetricGroup(registry), "root"); MetricGroup metricGroup = FlinkMetricContainer.registerMetricGroup(key, root); assertThat(metricGroup.getScopeComponents()) .isEqualTo(Arrays.asList("root", "key", "value").toArray()); }
@Override public void execute(GraphModel graphModel) { final Graph graph; if (useUndirected) { graph = graphModel.getUndirectedGraphVisible(); } else { graph = graphModel.getDirectedGraphVisible(); } execute(graph); }
@Test public void testColumnCreation() { GraphModel graphModel = GraphGenerator.generateNullUndirectedGraph(1); Hits h = new Hits(); h.execute(graphModel); Assert.assertTrue(graphModel.getNodeTable().hasColumn(Hits.HUB)); }
public Searcher searcher() { return new Searcher(); }
@Test void requireThatPredicatesHavingMultipleIdenticalConjunctionsAreSupported() { PredicateIndexBuilder builder = new PredicateIndexBuilder(10); builder.indexDocument(DOC_ID, Predicate.fromString( "((a in ['b'] and c in ['d']) or x in ['y']) and ((a in ['b'] and c in ['d']) or z in ['w'])")); PredicateIndex index = builder.build(); PredicateIndex.Searcher searcher = index.searcher(); PredicateQuery query = new PredicateQuery(); query.addFeature("a", "b"); query.addFeature("c", "d"); assertEquals("[42]", searcher.search(query).toList().toString()); }
public String getExpression() { String column = identifier.getValue(); if (null != nestedObjectAttributes && !nestedObjectAttributes.isEmpty()) { column = String.join(".", column, nestedObjectAttributes.stream().map(IdentifierValue::getValue).collect(Collectors.joining("."))); } return null == owner ? column : String.join(".", owner.getIdentifier().getValue(), column); }
@Test void assertGetExpressionWithOwner() { ColumnSegment actual = new ColumnSegment(0, 0, new IdentifierValue("`col`")); actual.setOwner(new OwnerSegment(0, 0, new IdentifierValue("`tbl`"))); assertThat(actual.getExpression(), is("tbl.col")); }
@Override public boolean isAllSet() { return mask == -1L; }
@Test public void testIsAllSet() { assertThat(new LongBitMask().isAllSet()).isFalse(); assertThat(new LongBitMask(-1L).isAllSet()).isTrue(); }
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) { if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) { throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\""); } LoggerContext rootContext = getRootContext(); logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value)); logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value)); Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey()); boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE; logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled)); return rootContext; }
@Test public void apply_sets_domain_property_over_process_property_if_both_set() { LogLevelConfig config = newLogLevelConfig().levelByDomain("foo", WEB_SERVER, LogDomain.ES).build(); props.set("sonar.log.level.web", "DEBUG"); props.set("sonar.log.level.web.es", "TRACE"); LoggerContext context = underTest.apply(config, props); assertThat(context.getLogger("foo").getLevel()).isEqualTo(Level.TRACE); }
public static Read read() { return new Read(null, "", new Scan()); }
@Test public void testReadingWithFilter() throws Exception { final String table = tmpTable.getName(); final int numRows = 1001; createAndWriteData(table, numRows); String regex = ".*17.*"; Filter filter = new RowFilter(CompareFilter.CompareOp.EQUAL, new RegexStringComparator(regex)); runReadTestLength( HBaseIO.read().withConfiguration(conf).withTableId(table).withFilter(filter), false, 20); }
public CoordinatorResult<TxnOffsetCommitResponseData, CoordinatorRecord> commitTransactionalOffset( RequestContext context, TxnOffsetCommitRequestData request ) throws ApiException { validateTransactionalOffsetCommit(context, request); final TxnOffsetCommitResponseData response = new TxnOffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); request.topics().forEach(topic -> { final TxnOffsetCommitResponseTopic topicResponse = new TxnOffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new TxnOffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing transactional offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new TxnOffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupTransactionalOffsetCommitWithUnknownMemberId() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); assertThrows(UnknownMemberIdException.class, () -> context.commitTransactionalOffset( new TxnOffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGenerationId(10) .setTopics(Collections.singletonList( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) .setCommittedLeaderEpoch(10) .setCommittedMetadata("metadata") )) )) )); }
@InvokeOnHeader(CONTROL_ACTION_SUBSCRIBE) public void performSubscribe(final Message message, AsyncCallback callback) { String filterId; if (message.getBody() instanceof DynamicRouterControlMessage) { filterId = subscribeFromMessage(dynamicRouterControlService, message, false); } else { filterId = subscribeFromHeaders(dynamicRouterControlService, message, false); } message.setBody(filterId); callback.done(false); }
@Test void performSubscribeActionWithMessageInBodyWithEmptyExpressionLanguage() { String subscribeChannel = "testChannel"; DynamicRouterControlMessage subMsg = DynamicRouterControlMessage.Builder.newBuilder() .subscribeChannel(subscribeChannel) .subscriptionId("testId") .destinationUri("mock://test") .priority(10) .predicate("true") .expressionLanguage("") .build(); when(message.getBody()).thenReturn(subMsg); when(message.getBody(DynamicRouterControlMessage.class)).thenReturn(subMsg); Exception ex = assertThrows(IllegalStateException.class, () -> producer.performSubscribe(message, callback)); assertEquals("Predicate bean could not be found", ex.getMessage()); }
@Override public boolean isIndexed(QueryContext queryContext) { Index index = queryContext.matchIndex(attributeName, QueryContext.IndexMatchHint.PREFER_ORDERED); return index != null && index.isOrdered() && expressionCanBeUsedAsIndexPrefix(); }
@Test public void likePredicateIsNotIndexed_whenPercentWildcardIsUsedMultipleTimes() { QueryContext queryContext = mock(QueryContext.class); when(queryContext.matchIndex("this", QueryContext.IndexMatchHint.PREFER_ORDERED)).thenReturn(createIndex(IndexType.SORTED)); assertFalse(new LikePredicate("this", "sub%string%").isIndexed(queryContext)); }
@VisibleForTesting void clearCurrentDirectoryChangedListenersWhenImporting( boolean importfile, JobMeta jobMeta ) { if ( importfile ) { jobMeta.clearCurrentDirectoryChangedListeners(); } }
@Test public void testClearCurrentDirectoryChangedListenersWhenImporting() { JobMeta jm = mock( JobMeta.class ); jobFileListener.clearCurrentDirectoryChangedListenersWhenImporting( true, jm ); verify( jm, times( 1 ) ).clearCurrentDirectoryChangedListeners(); }
public static <T> Collection<T> select( Iterable<T> iterable, Predicate<? super T> predicate) { return FJIterate.select(iterable, predicate, false); }
@Test public void selectSortedSet() { RichIterable<Integer> iterable = Interval.oneTo(200).toSortedSet(); Collection<Integer> actual1 = FJIterate.select(iterable, Predicates.greaterThan(100)); Collection<Integer> actual2 = FJIterate.select(iterable, Predicates.greaterThan(100), true); RichIterable<Integer> expected = iterable.select(Predicates.greaterThan(100)); assertSame(expected.getClass(), actual1.getClass()); assertSame(expected.getClass(), actual2.getClass()); assertEquals(expected, actual1, expected.getClass().getSimpleName() + '/' + actual1.getClass().getSimpleName()); assertEquals(expected, actual2, expected.getClass().getSimpleName() + '/' + actual2.getClass().getSimpleName()); }
@Override public Expression getExpression(String tableName, Alias tableAlias) { // 只有有登陆用户的情况下,才进行数据权限的处理 LoginUser loginUser = SecurityFrameworkUtils.getLoginUser(); if (loginUser == null) { return null; } // 只有管理员类型的用户,才进行数据权限的处理 if (ObjectUtil.notEqual(loginUser.getUserType(), UserTypeEnum.ADMIN.getValue())) { return null; } // 获得数据权限 DeptDataPermissionRespDTO deptDataPermission = loginUser.getContext(CONTEXT_KEY, DeptDataPermissionRespDTO.class); // 从上下文中拿不到,则调用逻辑进行获取 if (deptDataPermission == null) { deptDataPermission = permissionApi.getDeptDataPermission(loginUser.getId()); if (deptDataPermission == null) { log.error("[getExpression][LoginUser({}) 获取数据权限为 null]", JsonUtils.toJsonString(loginUser)); throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 未返回数据权限", loginUser.getId(), tableName, tableAlias.getName())); } // 添加到上下文中,避免重复计算 loginUser.setContext(CONTEXT_KEY, deptDataPermission); } // 情况一,如果是 ALL 可查看全部,则无需拼接条件 if (deptDataPermission.getAll()) { return null; } // 情况二,即不能查看部门,又不能查看自己,则说明 100% 无权限 if (CollUtil.isEmpty(deptDataPermission.getDeptIds()) && Boolean.FALSE.equals(deptDataPermission.getSelf())) { return new EqualsTo(null, null); // WHERE null = null,可以保证返回的数据为空 } // 情况三,拼接 Dept 和 User 的条件,最后组合 Expression deptExpression = buildDeptExpression(tableName,tableAlias, deptDataPermission.getDeptIds()); Expression userExpression = buildUserExpression(tableName, tableAlias, deptDataPermission.getSelf(), loginUser.getId()); if (deptExpression == null && userExpression == null) { // TODO 芋艿:获得不到条件的时候,暂时不抛出异常,而是不返回数据 log.warn("[getExpression][LoginUser({}) Table({}/{}) DeptDataPermission({}) 构建的条件为空]", JsonUtils.toJsonString(loginUser), tableName, tableAlias, JsonUtils.toJsonString(deptDataPermission)); // throw new NullPointerException(String.format("LoginUser(%d) Table(%s/%s) 构建的条件为空", // loginUser.getId(), tableName, tableAlias.getName())); return EXPRESSION_NULL; } if (deptExpression == null) { return userExpression; } if (userExpression == null) { return deptExpression; } // 目前,如果有指定部门 + 可查看自己,采用 OR 条件。即,WHERE (dept_id IN ? OR user_id = ?) return new Parenthesis(new OrExpression(deptExpression, userExpression)); }
@Test // 无数据权限时 public void testGetExpression_noDeptDataPermission() { try (MockedStatic<SecurityFrameworkUtils> securityFrameworkUtilsMock = mockStatic(SecurityFrameworkUtils.class)) { // 准备参数 String tableName = "t_user"; Alias tableAlias = new Alias("u"); // mock 方法 LoginUser loginUser = randomPojo(LoginUser.class, o -> o.setId(1L) .setUserType(UserTypeEnum.ADMIN.getValue())); securityFrameworkUtilsMock.when(SecurityFrameworkUtils::getLoginUser).thenReturn(loginUser); // mock 方法(permissionApi 返回 null) when(permissionApi.getDeptDataPermission(eq(loginUser.getId()))).thenReturn(null); // 调用 NullPointerException exception = assertThrows(NullPointerException.class, () -> rule.getExpression(tableName, tableAlias)); // 断言 assertEquals("LoginUser(1) Table(t_user/u) 未返回数据权限", exception.getMessage()); } }
public static boolean getStandaloneMode() { if (Objects.isNull(isStandalone)) { isStandalone = Boolean.getBoolean(STANDALONE_MODE_PROPERTY_NAME); } return isStandalone; }
@Test public void getStandaloneMode() { boolean standaloneMode = EnvUtil.getStandaloneMode(); Assert.isTrue(!standaloneMode); }
protected String stripBasePath(String requestPath, ContainerConfig config) { if (!config.isStripBasePath()) { return requestPath; } if (requestPath.startsWith(config.getServiceBasePath())) { String newRequestPath = requestPath.replaceFirst(config.getServiceBasePath(), ""); if (!newRequestPath.startsWith("/")) { newRequestPath = "/" + newRequestPath; } return newRequestPath; } return requestPath; }
@Test void requestReader_stripBasePath() { ContainerConfig config = ContainerConfig.defaultConfig(); String requestPath = "/" + BASE_PATH_MAPPING + ORDERS_URL; String finalPath = requestReader.stripBasePath(requestPath, config); assertNotNull(finalPath); assertEquals(requestPath, finalPath); config.setStripBasePath(true); config.setServiceBasePath(BASE_PATH_MAPPING); finalPath = requestReader.stripBasePath(requestPath, config); assertNotNull(finalPath); assertEquals(ORDERS_URL, finalPath); finalPath = requestReader.stripBasePath(ORDERS_URL, config); assertNotNull(finalPath); assertEquals(ORDERS_URL, finalPath); }
public void checkExecutePrerequisites(final ExecutionContext executionContext) { ShardingSpherePreconditions.checkState(isValidExecutePrerequisites(executionContext), () -> new TableModifyInTransactionException(getTableName(executionContext))); }
@Test void assertCheckExecutePrerequisitesWhenExecuteDDLNotInPostgreSQLTransaction() { when(transactionRule.getDefaultType()).thenReturn(TransactionType.LOCAL); when(connectionSession.getTransactionStatus().isInTransaction()).thenReturn(false); ExecutionContext executionContext = new ExecutionContext( new QueryContext(createPostgreSQLCreateTableStatementContext(), "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)), Collections.emptyList(), mock(RouteContext.class)); new ProxySQLExecutor(JDBCDriverType.STATEMENT, databaseConnectionManager, mock(DatabaseConnector.class), mockQueryContext()).checkExecutePrerequisites(executionContext); }
void appendEndOfSegmentEvent(ByteBuffer record, int subpartitionId) { subpartitionCacheManagers[subpartitionId].appendEndOfSegmentEvent(record); increaseNumCachedBytesAndCheckFlush(record.remaining(), true); }
@Test void testAppendEndOfSegmentEvent() throws IOException { TestingTieredStorageMemoryManager memoryManager = new TestingTieredStorageMemoryManager.Builder().build(); List<PartitionFileWriter.SubpartitionBufferContext> receivedBuffers = new ArrayList<>(); TestingPartitionFileWriter partitionFileWriter = new TestingPartitionFileWriter.Builder() .setWriteFunction( (partitionId, subpartitionBufferContexts) -> { receivedBuffers.addAll(subpartitionBufferContexts); return FutureUtils.completedVoidFuture(); }) .build(); DiskCacheManager diskCacheManager = new DiskCacheManager( TieredStorageIdMappingUtils.convertId(new ResultPartitionID()), 1, 1024, memoryManager, partitionFileWriter); diskCacheManager.appendEndOfSegmentEvent( EventSerializer.toSerializedEvent(EndOfSegmentEvent.INSTANCE), 0); diskCacheManager.close(); assertThat(receivedBuffers).hasSize(1); List<PartitionFileWriter.SegmentBufferContext> segmentBufferContexts = receivedBuffers.get(0).getSegmentBufferContexts(); assertThat(segmentBufferContexts).hasSize(1); List<Tuple2<Buffer, Integer>> bufferAndIndexes = segmentBufferContexts.get(0).getBufferAndIndexes(); assertThat(bufferAndIndexes).hasSize(1); Buffer buffer = bufferAndIndexes.get(0).f0; assertThat(buffer.isBuffer()).isFalse(); AbstractEvent event = EventSerializer.fromSerializedEvent( buffer.readOnlySlice().getNioBufferReadable(), getClass().getClassLoader()); assertThat(event).isInstanceOf(EndOfSegmentEvent.class); }
public static List<Event> computeEventDiff(final Params params) { final List<Event> events = new ArrayList<>(); emitPerNodeDiffEvents(createBaselineParams(params), events); emitWholeClusterDiffEvent(createBaselineParams(params), events); emitDerivedBucketSpaceStatesDiffEvents(params, events); return events; }
@Test void added_exhaustion_in_feed_block_resource_set_emits_node_event() { final EventFixture fixture = EventFixture.createForNodes(3) .clusterStateBefore("distributor:3 storage:3") .feedBlockBefore(ClusterStateBundle.FeedBlock.blockedWith( "we're closed", setOf(exhaustion(1, "oil")))) .clusterStateAfter("distributor:3 storage:3") .feedBlockAfter(ClusterStateBundle.FeedBlock.blockedWith( "we're still closed", setOf(exhaustion(1, "oil"), exhaustion(1, "cpu_brake_fluid")))); final List<Event> events = fixture.computeEventDiff(); assertThat(events.size(), equalTo(1)); assertThat(events, hasItem(allOf( eventForNode(storageNode(1)), nodeEventWithDescription("Added resource exhaustion: cpu_brake_fluid on node 1 [unknown hostname] is 80.0% full (the configured limit is 70.0%)"), nodeEventForBaseline()))); }
Object of(Object component) { return of(component, LOG); }
@Test public void generate_key_of_object() { assertThat(keys.of(FakeComponent.class)).isEqualTo(FakeComponent.class); }
public static KerberosName parse(String principalName) { Matcher match = NAME_PARSER.matcher(principalName); if (!match.matches()) { if (principalName.contains("@")) { throw new IllegalArgumentException("Malformed Kerberos name: " + principalName); } else { return new KerberosName(principalName, null, null); } } else { return new KerberosName(match.group(1), match.group(3), match.group(4)); } }
@Test public void testToUpperCase() throws Exception { List<String> rules = Arrays.asList( "RULE:[1:$1]/U", "RULE:[2:$1](Test.*)s/ABC///U", "RULE:[2:$1](ABC.*)s/ABC/XYZ/g/U", "RULE:[2:$1](App\\..*)s/App\\.(.*)/$1/g/U", "RULE:[2:$1]/U", "DEFAULT" ); KerberosShortNamer shortNamer = KerberosShortNamer.fromUnparsedRules("REALM.COM", rules); KerberosName name = KerberosName.parse("User@REALM.COM"); assertEquals("USER", shortNamer.shortName(name)); name = KerberosName.parse("TestABC/host@FOO.COM"); assertEquals("TEST", shortNamer.shortName(name)); name = KerberosName.parse("ABC_User_ABC/host@FOO.COM"); assertEquals("XYZ_USER_XYZ", shortNamer.shortName(name)); name = KerberosName.parse("App.SERVICE-name/example.com@REALM.COM"); assertEquals("SERVICE-NAME", shortNamer.shortName(name)); name = KerberosName.parse("User/root@REALM.COM"); assertEquals("USER", shortNamer.shortName(name)); }
public List<ColumnMetadata> getColumnsMetadata() { return columnsMetadata; }
@Test public void testColumnMetadata() { assertEquals(exampleTable.getColumnsMetadata(), ImmutableList.of( new ColumnMetadata("a", createUnboundedVarcharType()), new ColumnMetadata("b", BIGINT))); }
private RemotingCommand setCommitLogReadaheadMode(ChannelHandlerContext ctx, RemotingCommand request) { final RemotingCommand response = RemotingCommand.createResponseCommand(null); LOGGER.info("setCommitLogReadaheadMode called by {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel())); try { HashMap<String, String> extFields = request.getExtFields(); if (null == extFields) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("set commitlog readahead mode param error"); return response; } int mode = Integer.parseInt(extFields.get(FIleReadaheadMode.READ_AHEAD_MODE)); if (mode != LibC.MADV_RANDOM && mode != LibC.MADV_NORMAL) { response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("set commitlog readahead mode param value error"); return response; } MessageStore messageStore = this.brokerController.getMessageStore(); if (messageStore instanceof DefaultMessageStore) { DefaultMessageStore defaultMessageStore = (DefaultMessageStore) messageStore; if (mode == LibC.MADV_NORMAL) { defaultMessageStore.getMessageStoreConfig().setDataReadAheadEnable(true); } else { defaultMessageStore.getMessageStoreConfig().setDataReadAheadEnable(false); } defaultMessageStore.getCommitLog().scanFileAndSetReadMode(mode); } response.setCode(ResponseCode.SUCCESS); response.setRemark("set commitlog readahead mode success, mode: " + mode); } catch (Exception e) { LOGGER.error("set commitlog readahead mode failed", e); response.setCode(ResponseCode.SYSTEM_ERROR); response.setRemark("set commitlog readahead mode failed"); } return response; }
@Test public void testSetCommitLogReadAheadMode() throws RemotingCommandException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.SET_COMMITLOG_READ_MODE, null); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); HashMap<String, String> extfields = new HashMap<>(); extfields.put(FIleReadaheadMode.READ_AHEAD_MODE, String.valueOf(LibC.MADV_DONTNEED)); request.setExtFields(extfields); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); extfields.clear(); extfields.put(FIleReadaheadMode.READ_AHEAD_MODE, String.valueOf(LibC.MADV_NORMAL)); request.setExtFields(extfields); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); this.brokerController.setMessageStore(defaultMessageStore); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); when(this.defaultMessageStore.getMessageStoreConfig()).thenReturn(messageStoreConfig); when(this.defaultMessageStore.getCommitLog()).thenReturn(commitLog); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public boolean initAndAddIssue(Issue issue) { DefaultInputComponent inputComponent = (DefaultInputComponent) issue.primaryLocation().inputComponent(); if (noSonar(inputComponent, issue)) { return false; } ActiveRule activeRule = activeRules.find(issue.ruleKey()); if (activeRule == null) { // rule does not exist or is not enabled -> ignore the issue return false; } ScannerReport.Issue rawIssue = createReportIssue(issue, inputComponent.scannerId(), activeRule.severity()); if (filters.accept(inputComponent, rawIssue)) { write(inputComponent.scannerId(), rawIssue); return true; } return false; }
@Test public void add_issue_flows_to_cache() { initModuleIssues(); DefaultMessageFormatting messageFormatting = new DefaultMessageFormatting().start(0).end(4).type(CODE); DefaultIssue issue = new DefaultIssue(project) .at(new DefaultIssueLocation().on(file)) // Flow without type .addFlow(List.of(new DefaultIssueLocation().on(file).at(file.selectLine(1)).message("Foo1", List.of(messageFormatting)), new DefaultIssueLocation().on(file).at(file.selectLine(2)).message("Foo2"))) // Flow with type and description .addFlow(List.of(new DefaultIssueLocation().on(file)), NewIssue.FlowType.DATA, "description") // Flow with execution type and no description .addFlow(List.of(new DefaultIssueLocation().on(file)), NewIssue.FlowType.EXECUTION, null) .forRule(JAVA_RULE_KEY); when(filters.accept(any(InputComponent.class), any(ScannerReport.Issue.class))).thenReturn(true); moduleIssues.initAndAddIssue(issue); ArgumentCaptor<ScannerReport.Issue> argument = ArgumentCaptor.forClass(ScannerReport.Issue.class); verify(reportPublisher.getWriter()).appendComponentIssue(eq(file.scannerId()), argument.capture()); List<ScannerReport.Flow> writtenFlows = argument.getValue().getFlowList(); assertThat(writtenFlows) .extracting(ScannerReport.Flow::getDescription, ScannerReport.Flow::getType) .containsExactly(tuple("", FlowType.UNDEFINED), tuple("description", FlowType.DATA), tuple("", FlowType.EXECUTION)); assertThat(writtenFlows.get(0).getLocationCount()).isEqualTo(2); assertThat(writtenFlows.get(0).getLocationList()).containsExactly( ScannerReport.IssueLocation.newBuilder() .setComponentRef(file.scannerId()) .setMsg("Foo1") .addMsgFormatting(ScannerReport.MessageFormatting.newBuilder().setStart(0).setEnd(4).setType(ScannerReport.MessageFormattingType.CODE).build()) .setTextRange(ScannerReport.TextRange.newBuilder().setStartLine(1).setEndLine(1).setEndOffset(3).build()) .build(), ScannerReport.IssueLocation.newBuilder() .setComponentRef(file.scannerId()) .setMsg("Foo2") .setTextRange(ScannerReport.TextRange.newBuilder().setStartLine(2).setEndLine(2).setEndOffset(3).build()) .build()); }
public final T convertFromEntity(final U entity) { return fromEntity.apply(entity); }
@Test void testCustomConverter() { var converter = new Converter<UserDto, User>( userDto -> new User( userDto.firstName(), userDto.lastName(), userDto.active(), String.valueOf(new Random().nextInt()) ), user -> new UserDto( user.firstName(), user.lastName(), user.active(), user.firstName().toLowerCase() + user.lastName().toLowerCase() + "@whatever.com") ); var u1 = new User("John", "Doe", false, "12324"); var userDto = converter.convertFromEntity(u1); assertEquals("johndoe@whatever.com", userDto.email()); }
@Override public boolean archive(String gcsUrl, byte[] data) { BlobInfo blobInfo = parseBlobInfo(gcsUrl); if (data.length <= options.chunkUploadThresholdInBytes) { // Create the blob in one request. logger.atInfo().log("Archiving data to GCS at '%s' in one request.", gcsUrl); storage.create(blobInfo, data); return true; } // When content is large (1MB or more) it is recommended to write it in chunks via the blob's // channel writer. logger.atInfo().log( "Content is larger than threshold, archiving data to GCS at '%s' in chunks.", gcsUrl); try (WriteChannel writer = storage.writer(blobInfo)) { for (int chunkOffset = 0; chunkOffset < data.length; chunkOffset += options.chunkSizeInBytes) { int chunkSize = Math.min(data.length - chunkOffset, options.chunkSizeInBytes); writer.write(ByteBuffer.wrap(data, chunkOffset, chunkSize)); } return true; } catch (IOException e) { logger.atSevere().withCause(e).log("Unable to archving data to GCS at '%s'.", gcsUrl); return false; } }
@Test public void archive_withInvalidGcsUrl_throwsIllegalArgumentException() { GoogleCloudStorageArchiver archiver = archiverFactory.create(mockStorage); assertThrows(IllegalArgumentException.class, () -> archiver.archive("invalid_url", "")); }
@Override public void close() throws IOException { boolean released = false; synchronized (requestLock) { if (!closeFuture.isDone()) { try { LOG.debug("{}: Releasing {}.", owningTaskName, this); if (retriggerLocalRequestTimer != null) { retriggerLocalRequestTimer.cancel(); } for (InputChannel inputChannel : inputChannels()) { try { inputChannel.releaseAllResources(); } catch (IOException e) { LOG.warn( "{}: Error during release of channel resources: {}.", owningTaskName, e.getMessage(), e); } } // The buffer pool can actually be destroyed immediately after the // reader received all of the data from the input channels. if (bufferPool != null) { bufferPool.lazyDestroy(); } } finally { released = true; closeFuture.complete(null); } } } if (released) { synchronized (inputChannelsWithData) { inputChannelsWithData.notifyAll(); } if (enabledTieredStorage()) { tieredStorageConsumerClient.close(); } } }
@Test void testInputGateRemovalFromNettyShuffleEnvironment() throws Exception { NettyShuffleEnvironment network = createNettyShuffleEnvironment(); try (Closer closer = Closer.create()) { closer.register(network::close); int numberOfGates = 10; Map<InputGateID, SingleInputGate> createdInputGatesById = createInputGateWithLocalChannels(network, numberOfGates, 1); assertThat(createdInputGatesById.size()).isEqualTo(numberOfGates); for (InputGateID id : createdInputGatesById.keySet()) { assertThat(network.getInputGate(id).isPresent()).isTrue(); createdInputGatesById.get(id).close(); assertThat(network.getInputGate(id).isPresent()).isFalse(); } } }
public static Read read() { return new AutoValue_HCatalogIO_Read.Builder() .setDatabase(DEFAULT_DATABASE) .setPartitionCols(new ArrayList<>()) .build(); }
@Test public void testSourceCanBeSerializedMultipleTimes() throws Exception { ReaderContext context = getReaderContext(getConfigPropertiesAsMap(service.getHiveConf())); HCatalogIO.Read spec = HCatalogIO.read() .withConfigProperties(getConfigPropertiesAsMap(service.getHiveConf())) .withContext(context) .withTable(TEST_TABLE); BoundedHCatalogSource source = new BoundedHCatalogSource(spec); SerializableUtils.clone(SerializableUtils.clone(source)); }
@Override public double get() { return get(getAsync()); }
@Test public void testGetZero() { RAtomicDouble ad2 = redisson.getAtomicDouble("test"); assertThat(ad2.get()).isZero(); }
void addStep(final Supplier<PMMLStep> stepSupplier, final PMMLRuntimeContext pmmlContext) { stepExecuted(stepSupplier, pmmlContext); }
@Test void addStep() { PMMLStep step = mock(PMMLStep.class); Set<PMMLListener> pmmlListenersMock = IntStream.range(0, 3).mapToObj(i -> mock(PMMLListener.class)).collect(Collectors.toSet()); PMMLRuntimeContext pmmlContextMock = mock(PMMLRuntimeContext.class); when(pmmlContextMock.getEfestoListeners()).thenReturn(pmmlListenersMock); evaluator.addStep(() -> step, pmmlContextMock); pmmlListenersMock.forEach(pmmlListenerMock -> verify(pmmlListenerMock).stepExecuted(step)); }
public int getKafkaFetcherMinBytes() { return _kafkaFetcherMinBytes; }
@Test public void testGetFetcherMinBytes() { // test default KafkaPartitionLevelStreamConfig config = getStreamConfig("topic", "host1", "", "", "", null, null); Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_FETCHER_MIN_BYTES_DEFAULT, config.getKafkaFetcherMinBytes()); config = getStreamConfig("topic", "host1", "", "", "", "", null); Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_FETCHER_MIN_BYTES_DEFAULT, config.getKafkaFetcherMinBytes()); config = getStreamConfig("topic", "host1", "", "", "", "bad value", null); Assert.assertEquals(KafkaStreamConfigProperties.LowLevelConsumer.KAFKA_FETCHER_MIN_BYTES_DEFAULT, config.getKafkaFetcherMinBytes()); // correct config config = getStreamConfig("topic", "host1", "", "", "", "100", null); Assert.assertEquals(100, config.getKafkaFetcherMinBytes()); }
public Expression rewrite(final Expression expression) { return new ExpressionTreeRewriter<>(new OperatorPlugin()::process) .rewrite(expression, null); }
@Test public void shouldNotReplaceStringsInFunctions() { // Given: final Expression predicate = getPredicate( "SELECT * FROM orders where ROWTIME = foo('2017-01-01');"); // When: final Expression rewritten = rewriter.rewrite(predicate); // Then: verify(parser, never()).parse(any()); assertThat(rewritten.toString(), is("(ORDERS.ROWTIME = FOO('2017-01-01'))")); }
@Override public T peekFirst() { if (_head == null) { return null; } return _head._value; }
@Test public void testEmptyPeekFirst() { LinkedDeque<Object> q = new LinkedDeque<>(); Assert.assertNull(q.peekFirst(), "peekFirst on empty queue should return null"); }
public ProcessingNodesState calculateProcessingState(TimeRange timeRange) { final DateTime updateThresholdTimestamp = clock.nowUTC().minus(updateThreshold.toMilliseconds()); try (DBCursor<ProcessingStatusDto> statusCursor = db.find(activeNodes(updateThresholdTimestamp))) { if (!statusCursor.hasNext()) { return ProcessingNodesState.NONE_ACTIVE; } int activeNodes = 0; int idleNodes = 0; while (statusCursor.hasNext()) { activeNodes++; ProcessingStatusDto nodeProcessingStatus = statusCursor.next(); DateTime lastIndexedMessage = nodeProcessingStatus.receiveTimes().postIndexing(); // If node is behind and is busy, it is overloaded. if (lastIndexedMessage.isBefore(timeRange.getTo()) && isBusy(nodeProcessingStatus)) { return ProcessingNodesState.SOME_OVERLOADED; } // If a node did not index a message that is at least at the start of the time range, // we consider it idle. if (lastIndexedMessage.isBefore(timeRange.getFrom())) { idleNodes++; } } // Only if all nodes are idle, we stop the processing. if (activeNodes == idleNodes) { return ProcessingNodesState.ALL_IDLE; } } // If none of the above checks return, we can assume that some nodes have already indexed the given timerange. return ProcessingNodesState.SOME_UP_TO_DATE; }
@Test @MongoDBFixtures("processing-status-idle-nodes.json") public void processingStateIdleNodesWhereLastMessageBeforeTimeRange() { when(clock.nowUTC()).thenReturn(DateTime.parse("2019-01-01T04:00:00.000Z")); when(updateThreshold.toMilliseconds()).thenReturn(Duration.hours(1).toMilliseconds()); TimeRange timeRange = AbsoluteRange.create("2019-01-01T02:45:00.000Z", "2019-01-01T03:00:00.000Z"); assertThat(dbService.calculateProcessingState(timeRange)).isEqualTo(ProcessingNodesState.ALL_IDLE); }
@Override public RouteContext route(final ShardingRule shardingRule) { RouteContext result = new RouteContext(); String dataSourceName = getDataSourceName(shardingRule.getDataSourceNames()); RouteMapper dataSourceMapper = new RouteMapper(dataSourceName, dataSourceName); if (logicTables.isEmpty()) { result.getRouteUnits().add(new RouteUnit(dataSourceMapper, Collections.emptyList())); } else if (1 == logicTables.size()) { String logicTableName = logicTables.iterator().next(); if (!shardingRule.findShardingTable(logicTableName).isPresent()) { result.getRouteUnits().add(new RouteUnit(dataSourceMapper, Collections.emptyList())); return result; } DataNode dataNode = shardingRule.getDataNode(logicTableName); result.getRouteUnits().add(new RouteUnit(new RouteMapper(dataNode.getDataSourceName(), dataNode.getDataSourceName()), Collections.singletonList(new RouteMapper(logicTableName, dataNode.getTableName())))); } else { routeWithMultipleTables(result, shardingRule); } return result; }
@Test void assertRouteForWithNoIntersection() { assertThrows(ShardingTableRuleNotFoundException.class, () -> new ShardingUnicastRoutingEngine( mock(SQLStatementContext.class), Arrays.asList("t_order", "t_config", "t_product"), new ConnectionContext(Collections::emptySet)).route(shardingRule)); }
public static String fix(final String raw) { if ( raw == null || "".equals( raw.trim() )) { return raw; } MacroProcessor macroProcessor = new MacroProcessor(); macroProcessor.setMacros( macros ); return macroProcessor.parse( raw ); }
@Test public void testModifyRetractModifyInsert() { final String raw = "some code; insert( bar ); modifyRetract( foo );\n More(); retract( bar ); modifyInsert( foo );"; final String result = "some code; drools.insert( bar ); drools.modifyRetract( foo );\n More(); drools.retract( bar ); drools.modifyInsert( foo );"; assertEqualsIgnoreWhitespace( result, KnowledgeHelperFixerTest.fixer.fix( raw ) ); }
public final T getFirst() { return this.firstNode; }
@Test public void testGetFirst() { assertThat(this.list.getFirst()).as("Empty list should return null on getFirst()").isNull(); this.list.add( this.node1 ); assertThat(this.node1).as("First node should be node1").isSameAs(this.list.getFirst()); this.list.add( this.node2 ); assertThat(this.node1).as("List should return node1 on getFirst()").isSameAs(this.list.getFirst()); this.list.add( this.node3 ); assertThat(this.node1).as("List should return node1 on getFirst()").isSameAs(this.list.getFirst()); }
protected ConcurrentMap<Token<?>, DelegationTokenToRenew> getAllTokens() { return allTokens; }
@Test (timeout = 30000) public void testAppSubmissionWithPreviousToken() throws Exception{ rm = new TestSecurityMockRM(conf, null); rm.start(); final MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); nm1.registerNode(); // create Token1: Text userText1 = new Text("user"); DelegationTokenIdentifier dtId1 = new DelegationTokenIdentifier(userText1, new Text("renewer1"), userText1); final Token<DelegationTokenIdentifier> token1 = new Token<DelegationTokenIdentifier>(dtId1.getBytes(), "password1".getBytes(), dtId1.getKind(), new Text("service1")); Credentials credentials = new Credentials(); credentials.addToken(userText1, token1); // submit app1 with a token, set cancelTokenWhenComplete to false; Resource resource = Records.newRecord(Resource.class); resource.setMemorySize(200); MockRMAppSubmissionData data1 = MockRMAppSubmissionData.Builder .createWithResource(resource, rm) .withAppName("name") .withUser("user") .withMaxAppAttempts(2) .withCredentials(credentials) .withCancelTokensWhenComplete(false) .build(); RMApp app1 = MockRMAppSubmitter.submit(rm, data1); MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm1); rm.waitForState(app1.getApplicationId(), RMAppState.RUNNING); DelegationTokenRenewer renewer = rm.getRMContext().getDelegationTokenRenewer(); DelegationTokenToRenew dttr = renewer.getAllTokens().get(token1); Assert.assertNotNull(dttr); // submit app2 with the same token, set cancelTokenWhenComplete to true; MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder .createWithResource(resource, rm) .withAppName("name") .withUser("user") .withMaxAppAttempts(2) .withCredentials(credentials) .withCancelTokensWhenComplete(true) .build(); RMApp app2 = MockRMAppSubmitter.submit(rm, data); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm, nm1); rm.waitForState(app2.getApplicationId(), RMAppState.RUNNING); finishAMAndWaitForComplete(app2, rm, nm1, am2, dttr); Assert.assertTrue(rm.getRMContext().getDelegationTokenRenewer() .getAllTokens().containsKey(token1)); finishAMAndWaitForComplete(app1, rm, nm1, am1, dttr); // app2 completes, app1 is still running, check the token is not cancelled Assert.assertFalse(Renewer.cancelled); }
public boolean flushed() { return this.completions.isEmpty(); }
@Test public void shouldBeFlushedIfNoBufferedRecords() { buildMockProducer(true); assertTrue(producer.flushed()); }
public static <T> T loadData(Map<String, Object> config, T existingData, Class<T> dataCls) { try { String existingConfigJson = MAPPER.writeValueAsString(existingData); Map<String, Object> existingConfig = MAPPER.readValue(existingConfigJson, Map.class); Map<String, Object> newConfig = new HashMap<>(); newConfig.putAll(existingConfig); newConfig.putAll(config); String configJson = MAPPER.writeValueAsString(newConfig); return MAPPER.readValue(configJson, dataCls); } catch (IOException e) { throw new RuntimeException("Failed to load config into existing configuration data", e); } }
@Test public void testLoadConfigurationDataWithUnknownFields() { ReaderConfigurationData confData = new ReaderConfigurationData(); confData.setTopicName("unknown"); confData.setReceiverQueueSize(1000000); confData.setReaderName("unknown-reader"); Map<String, Object> config = new HashMap<>(); config.put("unknown", "test-topic"); config.put("receiverQueueSize", 100); try { ConfigurationDataUtils.loadData(config, confData, ReaderConfigurationData.class); fail("Should fail loading configuration data with unknown fields"); } catch (RuntimeException re) { assertTrue(re.getCause() instanceof IOException); } }
@Override protected ExecuteContext doBefore(ExecuteContext context) { LogUtils.printHttpRequestBeforePoint(context); Object[] arguments = context.getArguments(); AbstractClientHttpRequest request = (AbstractClientHttpRequest) arguments[0]; Optional<Object> httpRequest = ReflectUtils.getFieldValue(request, "httpRequest"); if (!httpRequest.isPresent()) { return context; } StringBuilder sb = new StringBuilder(); ReflectUtils.invokeMethod(httpRequest.get(), "assembleRequestUri", new Class[]{StringBuilder.class}, new Object[]{sb}); if (sb.length() == 0) { return context; } String uri = sb.toString(); Map<String, String> uriInfo = RequestInterceptorUtils.recoverUrl(uri); if (!PlugEffectWhiteBlackUtils.isAllowRun(uriInfo.get(HttpConstants.HTTP_URI_HOST), uriInfo.get(HttpConstants.HTTP_URI_SERVICE))) { return context; } RequestInterceptorUtils.printRequestLog("webClient(http-client)", uriInfo); Optional<Object> result = invokerService.invoke( invokerContext -> buildInvokerFunc(context, invokerContext, request, uriInfo), ex -> ex, uriInfo.get(HttpConstants.HTTP_URI_SERVICE)); if (result.isPresent()) { Object obj = result.get(); if (obj instanceof Exception) { LOGGER.log(Level.SEVERE, "Webclient(http-client) request is error, uri is " + uri, (Exception) obj); context.setThrowableOut((Exception) obj); return context; } context.skip(obj); } return context; }
@Test public void testException() { // Test for anomalies AbstractClientHttpRequest request = new HttpComponentsClientHttpRequest(HttpMethod.GET, THROW_URI, HttpClientContext.create(), new DefaultDataBufferFactory()); arguments[0] = request; ReflectUtils.setFieldValue(request, "httpRequest", new BasicHttpRequest(Method.GET, THROW_URI)); ExecuteContext context = ExecuteContext.forMemberMethod(this, method, arguments, null, null); interceptor.doBefore(context); Assert.assertEquals(ConnectException.class, context.getThrowableOut().getClass()); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void answerWebAppQuery() { SentWebAppMessageResponse response = bot.execute(new AnswerWebAppQuery("123", new InlineQueryResultArticle("1", "title", "text"))); assertFalse(response.isOk()); assertEquals("Bad Request: query is too old and response timeout expired or query ID is invalid", response.description()); assertNull(response.sentWebAppMessage()); }
@Override public List<Map<String, String>> taskConfigs(int maxTasks) { String txnSuffix = "-txn-" + UUID.randomUUID() + "-"; return IntStream.range(0, maxTasks) .mapToObj( i -> { Map<String, String> map = Maps.newHashMap(props); map.put(IcebergSinkConfig.INTERNAL_TRANSACTIONAL_SUFFIX_PROP, txnSuffix + i); return map; }) .collect(Collectors.toList()); }
@Test public void testTaskConfigs() { SinkConnector connector = new IcebergSinkConnector(); connector.start(ImmutableMap.of()); List<Map<String, String>> configs = connector.taskConfigs(3); assertThat(configs).hasSize(3); configs.forEach( map -> assertThat(map).containsKey(IcebergSinkConfig.INTERNAL_TRANSACTIONAL_SUFFIX_PROP)); }
@Override public void pluginJarAdded(BundleOrPluginFileDetails bundleOrPluginFileDetails) { final GoPluginBundleDescriptor bundleDescriptor = goPluginBundleDescriptorBuilder.build(bundleOrPluginFileDetails); try { LOGGER.info("Plugin load starting: {}", bundleOrPluginFileDetails.file()); validateIfExternalPluginRemovingBundledPlugin(bundleDescriptor); validatePluginCompatibilityWithCurrentOS(bundleDescriptor); validatePluginCompatibilityWithGoCD(bundleDescriptor); addPlugin(bundleOrPluginFileDetails, bundleDescriptor); } finally { LOGGER.info("Plugin load finished: {}", bundleOrPluginFileDetails.file()); } }
@Test void shouldNotLoadAPluginWhenCurrentOSIsNotAmongTheListOfTargetOSesAsDeclaredByThePluginInItsXML() throws Exception { File pluginJarFile = new File(pluginWorkDir, PLUGIN_JAR_FILE_NAME); copyPluginToTheDirectory(pluginWorkDir, PLUGIN_JAR_FILE_NAME); final GoPluginDescriptor pluginDescriptor1 = getPluginDescriptor("some.old.id.1", "1.0", pluginJarFile.getAbsolutePath(), new File(PLUGIN_JAR_FILE_NAME), false, null, "Windows"); final GoPluginDescriptor pluginDescriptor2 = getPluginDescriptor("some.old.id.2", "1.0", pluginJarFile.getAbsolutePath(), new File(PLUGIN_JAR_FILE_NAME), false, null, "Linux", "Mac OS X"); GoPluginBundleDescriptor bundleDescriptor = new GoPluginBundleDescriptor(pluginDescriptor1, pluginDescriptor2); when(systemEnvironment.getOperatingSystemFamilyJvmName()).thenReturn("Windows"); when(goPluginBundleDescriptorBuilder.build(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir))).thenReturn(bundleDescriptor); listener = new DefaultPluginJarChangeListener(registry, osgiManifestGenerator, pluginLoader, goPluginBundleDescriptorBuilder, systemEnvironment); listener.pluginJarAdded(new BundleOrPluginFileDetails(pluginJarFile, true, pluginWorkDir)); verify(registry, times(1)).loadPlugin(bundleDescriptor); verifyNoMoreInteractions(pluginLoader); assertThat(pluginDescriptor1.getStatus().getMessages().size()).isEqualTo(1); assertThat(pluginDescriptor1.getStatus().getMessages().get(0)).isEqualTo("Plugins with IDs ([some.old.id.1, some.old.id.2]) are not valid: Incompatible with current operating system 'Windows'. Valid operating systems are: [Linux, Mac OS X]."); assertThat(pluginDescriptor2.getStatus().getMessages().size()).isEqualTo(1); assertThat(pluginDescriptor2.getStatus().getMessages().get(0)).isEqualTo("Plugins with IDs ([some.old.id.1, some.old.id.2]) are not valid: Incompatible with current operating system 'Windows'. Valid operating systems are: [Linux, Mac OS X]."); }
@Override public Optional<CompletableFuture<TaskManagerLocation>> getTaskManagerLocation( ExecutionVertexID executionVertexId) { return inputsLocationsRetriever .getTaskManagerLocation(executionVertexId) .filter(future -> future.isDone() && !future.isCompletedExceptionally()); }
@Test void testInputLocationIfDone() { TestingInputsLocationsRetriever originalLocationRetriever = getOriginalLocationRetriever(); originalLocationRetriever.assignTaskManagerLocation(EV1); InputsLocationsRetriever availableInputsLocationsRetriever = new AvailableInputsLocationsRetriever(originalLocationRetriever); assertThat(availableInputsLocationsRetriever.getTaskManagerLocation(EV1)).isPresent(); }
public static GeyserResourcePack readPack(Path path) throws IllegalArgumentException { if (!path.getFileName().toString().endsWith(".mcpack") && !path.getFileName().toString().endsWith(".zip")) { throw new IllegalArgumentException("Resource pack " + path.getFileName() + " must be a .zip or .mcpack file!"); } AtomicReference<GeyserResourcePackManifest> manifestReference = new AtomicReference<>(); try (ZipFile zip = new ZipFile(path.toFile()); Stream<? extends ZipEntry> stream = zip.stream()) { stream.forEach(x -> { String name = x.getName(); if (SHOW_RESOURCE_PACK_LENGTH_WARNING && name.length() >= 80) { GeyserImpl.getInstance().getLogger().warning("The resource pack " + path.getFileName() + " has a file in it that meets or exceeds 80 characters in its path (" + name + ", " + name.length() + " characters long). This will cause problems on some Bedrock platforms." + " Please rename it to be shorter, or reduce the amount of folders needed to get to the file."); } if (name.contains("manifest.json")) { try { GeyserResourcePackManifest manifest = FileUtils.loadJson(zip.getInputStream(x), GeyserResourcePackManifest.class); if (manifest.header().uuid() != null) { manifestReference.set(manifest); } } catch (IOException e) { e.printStackTrace(); } } }); GeyserResourcePackManifest manifest = manifestReference.get(); if (manifest == null) { throw new IllegalArgumentException(path.getFileName() + " does not contain a valid pack_manifest.json or manifest.json"); } // Check if a file exists with the same name as the resource pack suffixed by .key, // and set this as content key. (e.g. test.zip, key file would be test.zip.key) Path keyFile = path.resolveSibling(path.getFileName().toString() + ".key"); String contentKey = Files.exists(keyFile) ? Files.readString(keyFile, StandardCharsets.UTF_8) : ""; return new GeyserResourcePack(new GeyserPathPackCodec(path), manifest, contentKey); } catch (Exception e) { throw new IllegalArgumentException(GeyserLocale.getLocaleStringLog("geyser.resource_pack.broken", path.getFileName()), e); } }
@Test public void testEncryptedPack() throws Exception { // this zip only contains a contents.json and manifest.json at the root Path path = getResource("encrypted_pack.zip"); ResourcePack pack = ResourcePackLoader.readPack(path); assertEquals("JAGcSXcXwcODc1YS70GzeWAUKEO172UA", pack.contentKey()); }
@SuppressWarnings("deprecation") @VisibleForTesting public String getWebSocketProduceUri(String topic) { String serviceURLWithoutTrailingSlash = serviceURL.substring(0, serviceURL.endsWith("/") ? serviceURL.length() - 1 : serviceURL.length()); TopicName topicName = TopicName.get(topic); String wsTopic; if (topicName.isV2()) { wsTopic = String.format("%s/%s/%s/%s", topicName.getDomain(), topicName.getTenant(), topicName.getNamespacePortion(), topicName.getLocalName()); } else { wsTopic = String.format("%s/%s/%s/%s/%s", topicName.getDomain(), topicName.getTenant(), topicName.getCluster(), topicName.getNamespacePortion(), topicName.getLocalName()); } String uriFormat = "%s/ws" + (topicName.isV2() ? "/v2/" : "/") + "producer/%s"; return String.format(uriFormat, serviceURLWithoutTrailingSlash, wsTopic); }
@Test public void testGetWebSocketProduceUri() { String topicNameV1 = "persistent://public/cluster/default/issue-11067"; assertEquals(cmdProduce.getWebSocketProduceUri(topicNameV1), "ws://localhost:8080/ws/producer/persistent/public/cluster/default/issue-11067"); String topicNameV2 = "persistent://public/default/issue-11067"; assertEquals(cmdProduce.getWebSocketProduceUri(topicNameV2), "ws://localhost:8080/ws/v2/producer/persistent/public/default/issue-11067"); }
public static FEEL_1_1Parser parse(FEELEventListenersManager eventsManager, String source, Map<String, Type> inputVariableTypes, Map<String, Object> inputVariables, Collection<FEELFunction> additionalFunctions, List<FEELProfile> profiles, FEELTypeRegistry typeRegistry) { CharStream input = CharStreams.fromString(source); FEEL_1_1Lexer lexer = new FEEL_1_1Lexer( input ); CommonTokenStream tokens = new CommonTokenStream( lexer ); FEEL_1_1Parser parser = new FEEL_1_1Parser( tokens ); ParserHelper parserHelper = new ParserHelper(eventsManager); additionalFunctions.forEach(f -> parserHelper.getSymbolTable().getBuiltInScope().define(f.getSymbol())); parser.setHelper(parserHelper); parser.setErrorHandler( new FEELErrorHandler() ); parser.removeErrorListeners(); // removes the error listener that prints to the console parser.addErrorListener( new FEELParserErrorListener( eventsManager ) ); // pre-loads the parser with symbols defineVariables( inputVariableTypes, inputVariables, parser ); if (typeRegistry != null) { parserHelper.setTypeRegistry(typeRegistry); } return parser; }
@Test void inValueList() { // TODO review this test might be wrong as list is not homogeneous String inputExpression = "x / 4 in ( 10+y, true, 80, someVar )"; BaseNode inNode = parse( inputExpression ); assertThat( inNode).isInstanceOf(InNode.class); assertThat( inNode.getResultType()).isEqualTo(BuiltInType.BOOLEAN); assertThat( inNode.getText()).isEqualTo(inputExpression); InNode in = (InNode) inNode; assertThat( in.getValue()).isInstanceOf(InfixOpNode.class); assertThat( in.getValue().getText()).isEqualTo( "x / 4"); assertThat( in.getExprs()).isInstanceOf(ListNode.class); assertThat( in.getExprs().getText()).isEqualTo( "10+y, true, 80, someVar"); ListNode list = (ListNode) in.getExprs(); assertThat( list.getElements().get( 0 )).isInstanceOf(InfixOpNode.class); assertThat( list.getElements().get( 1 )).isInstanceOf(BooleanNode.class); assertThat( list.getElements().get( 2 )).isInstanceOf(NumberNode.class); assertThat( list.getElements().get( 3 )).isInstanceOf(NameRefNode.class); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("System"); setAttribute(protobuf, "Server ID", server.getId()); setAttribute(protobuf, "Version", getVersion()); setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel()); setAttribute(protobuf, NCLOC.getName(), statisticsSupport.getLinesOfCode()); setAttribute(protobuf, "Container", containerSupport.isRunningInContainer()); setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName()); setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication()); addIfNotEmpty(protobuf, "Accepted external identity providers", commonSystemInformation.getEnabledIdentityProviders()); addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up", commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders()); setAttribute(protobuf, "High Availability", false); setAttribute(protobuf, "Official Distribution", officialDistribution.check()); setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication()); setAttribute(protobuf, "Home Dir", config.get(PATH_HOME.getKey()).orElse(null)); setAttribute(protobuf, "Data Dir", config.get(PATH_DATA.getKey()).orElse(null)); setAttribute(protobuf, "Temp Dir", config.get(PATH_TEMP.getKey()).orElse(null)); setAttribute(protobuf, "Processors", Runtime.getRuntime().availableProcessors()); return protobuf.build(); }
@Test public void toProtobuf_whenNoExternalUserAuthentication_shouldWriteNothing() { when(commonSystemInformation.getExternalUserAuthentication()).thenReturn(null); ProtobufSystemInfo.Section protobuf = underTest.toProtobuf(); assertThatAttributeDoesNotExist(protobuf, "External User Authentication"); }
public GrantDTO create(GrantDTO grantDTO, @Nullable User currentUser) { return create(grantDTO, requireNonNull(currentUser, "currentUser cannot be null").getName()); }
@Test public void createWithGranteeCapabilityAndTarget() { final GRN grantee = GRNTypes.USER.toGRN("jane"); final GRN target = GRNTypes.DASHBOARD.toGRN("54e3deadbeefdeadbeef0000"); final GrantDTO grant = dbService.create(grantee, Capability.MANAGE, target, "admin"); assertThat(grant.id()).isNotBlank(); assertThat(grant.grantee()).isEqualTo(grantee); assertThat(grant.capability()).isEqualTo(Capability.MANAGE); assertThat(grant.target()).isEqualTo(target); assertThat(grant.createdBy()).isEqualTo("admin"); assertThat(grant.createdAt()).isBefore(ZonedDateTime.now(ZoneOffset.UTC)); assertThat(grant.updatedBy()).isEqualTo("admin"); assertThat(grant.updatedAt()).isBefore(ZonedDateTime.now(ZoneOffset.UTC)); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { if(status.isExists()) { if(log.isWarnEnabled()) { log.warn(String.format("Delete file %s to be replaced with %s", renamed, file)); } new DeepboxTrashFeature(session, fileid).delete(Collections.singletonList(renamed), callback, delete); } final String sourceId = fileid.getFileId(file); final NodeMove nodeMove = new NodeMove(); final String targetParentId = fileid.getFileId(renamed.getParent()); nodeMove.setTargetParentNodeId(targetParentId); new CoreRestControllerApi(session.getClient()).moveNode(nodeMove, sourceId); final NodeUpdate nodeUpdate = new NodeUpdate(); nodeUpdate.setName(renamed.getName()); new CoreRestControllerApi(session.getClient()).updateNode(nodeUpdate, sourceId); fileid.cache(file, null); fileid.cache(renamed, sourceId); return renamed.withAttributes(file.attributes().withFileId(sourceId)); } catch(ApiException e) { throw new DeepboxExceptionMappingService(fileid).map("Cannot rename {0}", e, file); } }
@Test public void testMoveOverrideFolder() throws Exception { final DeepboxIdProvider fileid = new DeepboxIdProvider(session); final Path documents = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path trash = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Trash", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new DeepboxDirectoryFeature(session, fileid).mkdir( new Path(documents, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path target = new DeepboxDirectoryFeature(session, fileid).mkdir( new Path(documents, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); final Path targetInTrash = new Path(trash, target.getName(), target.getType()); final PathAttributes originalTestAttributes = new DeepboxAttributesFinderFeature(session, fileid).find(test); final PathAttributes originalTargetAttributes = new DeepboxAttributesFinderFeature(session, fileid).find(target); new DeepboxMoveFeature(session, fileid).move(test, target, new TransferStatus().exists(true), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new DeepboxFindFeature(session, fileid).find(test.withAttributes(new PathAttributes()))); assertTrue(new DeepboxFindFeature(session, fileid).find(target.withAttributes(new PathAttributes()))); assertTrue(new DeepboxFindFeature(session, fileid).find(targetInTrash)); final PathAttributes overriddenTargetAttributes = new DeepboxAttributesFinderFeature(session, fileid).find(target.withAttributes(new PathAttributes())); assertNotNull(originalTestAttributes.getFileId()); assertEquals(originalTestAttributes.getFileId(), overriddenTargetAttributes.getFileId()); assertEquals(originalTestAttributes.getModificationDate(), overriddenTargetAttributes.getModificationDate()); assertEquals(originalTestAttributes.getChecksum(), overriddenTargetAttributes.getChecksum()); final PathAttributes trashedTargetAttributes = new DeepboxAttributesFinderFeature(session, fileid).find(targetInTrash.withAttributes(new PathAttributes())); assertNotNull(originalTargetAttributes.getFileId()); assertEquals(originalTargetAttributes.getFileId(), trashedTargetAttributes.getFileId()); assertEquals(originalTargetAttributes.getModificationDate(), trashedTargetAttributes.getModificationDate()); assertEquals(originalTargetAttributes.getChecksum(), trashedTargetAttributes.getChecksum()); new DeepboxDeleteFeature(session, fileid).delete(Collections.singletonList(targetInTrash), new DisabledLoginCallback(), new Delete.DisabledCallback()); new DeepboxDeleteFeature(session, fileid).delete(Collections.singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static String dateString(long timeMs, ZoneOffset zoneOffset) { return new Date(timeMs).toInstant(). atOffset(zoneOffset). format(DateTimeFormatter.ISO_OFFSET_DATE_TIME); }
@Test public void testDateString() { assertEquals("2019-01-08T20:59:29.85Z", dateString(1546981169850L, ZoneOffset.UTC)); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { final SMBSession.DiskShareWrapper share = session.openShare(file); try { if(new SMBPathContainerService(session).isContainer(file)) { return true; } if(file.isDirectory()) { return share.get().folderExists(new SMBPathContainerService(session).getKey(file)); } return share.get().fileExists(new SMBPathContainerService(session).getKey(file)); } catch(SMBRuntimeException e) { throw new SMBExceptionMappingService().map("Failure to read attributes of {0}", e, file); } finally { session.releaseShare(share); } } catch(NotfoundException e) { return false; } }
@Test public void testFindShareNotFound() throws Exception { assertFalse(new SMBFindFeature(session).find(new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)))); }
@Override public int read(long position, byte[] buffer, int offset, int length) throws IOException { // When bufferedPreadDisabled = true, this API does not use any shared buffer, // cursor position etc. So this is implemented as NOT synchronized. HBase // kind of random reads on a shared file input stream will greatly get // benefited by such implementation. // Strict close check at the begin of the API only not for the entire flow. synchronized (this) { if (closed) { throw new IOException(FSExceptionMessages.STREAM_IS_CLOSED); } } LOG.debug("pread requested offset = {} len = {} bufferedPreadDisabled = {}", offset, length, bufferedPreadDisabled); if (!bufferedPreadDisabled) { return super.read(position, buffer, offset, length); } validatePositionedReadArgs(position, buffer, offset, length); if (length == 0) { return 0; } if (streamStatistics != null) { streamStatistics.readOperationStarted(); } int bytesRead = readRemote(position, buffer, offset, length, tracingContext); if (statistics != null) { statistics.incrementBytesRead(bytesRead); } if (streamStatistics != null) { streamStatistics.bytesRead(bytesRead); } return bytesRead; }
@Test public void testSuccessfulReadAhead() throws Exception { // Mock failure for client.read() AbfsClient client = getMockAbfsClient(); // Success operation mock AbfsRestOperation op = getMockRestOp(); // Stub : // Pass all readAheads and fail the post eviction request to // prove ReadAhead buffer is used // for post eviction check, fail all read aheads doReturn(op) .doReturn(op) .doReturn(op) .doThrow(new TimeoutException("Internal Server error for RAH-X")) .doThrow(new TimeoutException("Internal Server error for RAH-Y")) .doThrow(new TimeoutException("Internal Server error for RAH-Z")) .when(client) .read(any(String.class), any(Long.class), any(byte[].class), any(Integer.class), any(Integer.class), any(String.class), any(String.class), any(), any(TracingContext.class)); AbfsInputStream inputStream = getAbfsInputStream(client, "testSuccessfulReadAhead.txt"); int beforeReadCompletedListSize = ReadBufferManager.getBufferManager().getCompletedReadListSize(); // First read request that triggers readAheads. inputStream.read(new byte[ONE_KB]); // Only the 3 readAhead threads should have triggered client.read verifyReadCallCount(client, 3); int newAdditionsToCompletedRead = ReadBufferManager.getBufferManager().getCompletedReadListSize() - beforeReadCompletedListSize; // read buffer might be dumped if the ReadBufferManager getblock preceded // the action of buffer being picked for reading from readaheadqueue, so that // inputstream can proceed with read and not be blocked on readahead thread // availability. So the count of buffers in completedReadQueue for the stream // can be same or lesser than the requests triggered to queue readahead. Assertions.assertThat(newAdditionsToCompletedRead) .describedAs( "New additions to completed reads should be same or less than as number of readaheads") .isLessThanOrEqualTo(3); // Another read request whose requested data is already read ahead. inputStream.read(ONE_KB, new byte[ONE_KB], 0, ONE_KB); // Once created, mock will remember all interactions. // As the above read should not have triggered any server calls, total // number of read calls made at this point will be same as last. verifyReadCallCount(client, 3); // Stub will throw exception for client.read() for 4th and later calls // if not using the read-ahead buffer exception will be thrown on read checkEvictedStatus(inputStream, 0, true); }
@Override public BranchRollbackResponseProto convert2Proto(BranchRollbackResponse branchRollbackResponse) { final short typeCode = branchRollbackResponse.getTypeCode(); final AbstractMessageProto abstractMessage = AbstractMessageProto.newBuilder().setMessageType( MessageTypeProto.forNumber(typeCode)).build(); final String msg = branchRollbackResponse.getMsg(); final AbstractResultMessageProto abstractResultMessageProto = AbstractResultMessageProto.newBuilder().setMsg( msg == null ? "" : msg).setResultCode( ResultCodeProto.valueOf(branchRollbackResponse.getResultCode().name())).setAbstractMessage(abstractMessage) .build(); final AbstractTransactionResponseProto abstractTransactionRequestProto = AbstractTransactionResponseProto .newBuilder().setAbstractResultMessage(abstractResultMessageProto).setTransactionExceptionCode( TransactionExceptionCodeProto.valueOf(branchRollbackResponse.getTransactionExceptionCode().name())) .build(); final AbstractBranchEndResponseProto abstractBranchEndResponse = AbstractBranchEndResponseProto.newBuilder(). setAbstractTransactionResponse(abstractTransactionRequestProto).setXid(branchRollbackResponse.getXid()) .setBranchId(branchRollbackResponse.getBranchId()).setBranchStatus( BranchStatusProto.forNumber(branchRollbackResponse.getBranchStatus().getCode())).build(); BranchRollbackResponseProto result = BranchRollbackResponseProto.newBuilder().setAbstractBranchEndResponse( abstractBranchEndResponse).build(); return result; }
@Test public void convert2Proto() { BranchRollbackResponse branchRollbackResponse = new BranchRollbackResponse(); branchRollbackResponse.setTransactionExceptionCode(TransactionExceptionCode.BranchTransactionNotExist); branchRollbackResponse.setResultCode(ResultCode.Success); branchRollbackResponse.setMsg("xx"); branchRollbackResponse.setXid("xid"); branchRollbackResponse.setBranchStatus(BranchStatus.PhaseTwo_Rollbacked); branchRollbackResponse.setBranchId(123); BranchRollbackResponseConvertor convertor = new BranchRollbackResponseConvertor(); BranchRollbackResponseProto proto = convertor.convert2Proto( branchRollbackResponse); BranchRollbackResponse real = convertor.convert2Model(proto); assertThat(real.getTypeCode()).isEqualTo(branchRollbackResponse.getTypeCode()); assertThat(real.getMsg()).isEqualTo(branchRollbackResponse.getMsg()); assertThat(real.getXid()).isEqualTo(branchRollbackResponse.getXid()); assertThat(real.getTransactionExceptionCode()).isEqualTo(branchRollbackResponse.getTransactionExceptionCode()); assertThat(real.getBranchStatus()).isEqualTo(branchRollbackResponse.getBranchStatus()); assertThat(real.getResultCode()).isEqualTo(branchRollbackResponse.getResultCode()); }
public final int get() { return INDEX_UPDATER.get(this) & Integer.MAX_VALUE; }
@Test void testGet() { assertEquals(0, i1.get()); assertEquals(127, i2.get()); assertEquals(Integer.MAX_VALUE, i3.get()); }
@Override public Map<String, StepTransition> translate(WorkflowInstance workflowInstance) { WorkflowInstance instance = objectMapper.convertValue(workflowInstance, WorkflowInstance.class); if (instance.getRunConfig() != null) { if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_INCOMPLETE || instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) { Map<String, StepInstance.Status> statusMap = instance.getAggregatedInfo().getStepAggregatedViews().entrySet().stream() .collect( Collectors.toMap(Map.Entry::getKey, entry -> entry.getValue().getStatus())); if (!statusMap.isEmpty()) { instance .getRunConfig() .setStartStepIds( statusMap.entrySet().stream() .filter( entry -> !entry.getValue().isComplete() && (entry.getValue().isTerminal() || entry.getValue() == StepInstance.Status.NOT_CREATED)) .map(Map.Entry::getKey) .collect(Collectors.toList())); } // handle the special case of restarting from a completed step if (instance.getRunConfig().getPolicy() == RunPolicy.RESTART_FROM_SPECIFIC) { String restartStepId = RunRequest.getCurrentNode(instance.getRunConfig().getRestartConfig()).getStepId(); if (!instance.getRunConfig().getStartStepIds().contains(restartStepId)) { instance.getRunConfig().getStartStepIds().add(restartStepId); } } } else { if (workflowInstance.getRunConfig().getStartStepIds() != null) { instance .getRunConfig() .setStartStepIds(new ArrayList<>(workflowInstance.getRunConfig().getStartStepIds())); } if (workflowInstance.getRunConfig().getEndStepIds() != null) { instance .getRunConfig() .setEndStepIds(new ArrayList<>(workflowInstance.getRunConfig().getEndStepIds())); } } } List<String> startStepIds = instance.getRunConfig() != null && instance.getRunConfig().getStartStepIds() != null ? instance.getRunConfig().getStartStepIds() : null; List<String> endStepIds = instance.getRunConfig() != null && instance.getRunConfig().getEndStepIds() != null ? instance.getRunConfig().getEndStepIds() : null; return WorkflowGraph.computeDag(instance.getRuntimeWorkflow(), startStepIds, endStepIds); }
@Test public void testTranslateForRestartFromSpecificWithNotCreatedSteps() { instance .getAggregatedInfo() .getStepAggregatedViews() .put("job3", StepAggregatedView.builder().status(StepInstance.Status.NOT_CREATED).build()); instance.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_SPECIFIC); instance .getRunConfig() .setRestartConfig( RestartConfig.builder() .addRestartNode("sample-dag-test-3", 1, "job3") .restartPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .build()); Map<String, StepTransition> dag = translator.translate(instance); Assert.assertEquals(new HashSet<>(Arrays.asList("job.2", "job3", "job4")), dag.keySet()); StepTransition jobTransition = new StepTransition(); jobTransition.setPredecessors(Collections.singletonList("job3")); jobTransition.setSuccessors(Collections.singletonMap("job4", "true")); Assert.assertEquals(jobTransition, dag.get("job.2")); jobTransition.setPredecessors(Collections.emptyList()); jobTransition.setSuccessors(new HashMap<>()); jobTransition.getSuccessors().put("job.2", "true"); jobTransition.getSuccessors().put("job4", "true"); Assert.assertEquals(jobTransition, dag.get("job3")); jobTransition.setPredecessors(Arrays.asList("job3", "job.2")); jobTransition.setSuccessors(Collections.emptyMap()); Assert.assertEquals(jobTransition, dag.get("job4")); }
public void close() { close(Long.MAX_VALUE, false); }
@Test public void shouldReturnFalseOnCloseWithCloseOptionWithLeaveGroupFalseWhenThreadsHaventTerminated() throws Exception { prepareStreams(); prepareStreamThread(streamThreadOne, 1); prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ofMillis(10L)); try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier)) { assertFalse(streams.close(closeOptions)); } }
@Override public Collection<V> values() { checkState(!destroyed, destroyedMessage); return Collections2.transform(Maps.filterValues(items, MapValue::isAlive).values(), MapValue::get); }
@Test public void testValues() throws Exception { expectPeerMessage(clusterCommunicator); assertTrue(ecMap.values().isEmpty()); // Generate some values Map<String, String> expectedValues = new HashMap<>(); for (int i = 1; i <= 10; i++) { expectedValues.put("" + i, "value" + i); } // Add them into the map expectedValues.entrySet().forEach(e -> ecMap.put(e.getKey(), e.getValue())); // Check the values collection is correct assertEquals(expectedValues.values().size(), ecMap.values().size()); expectedValues.values().forEach(v -> assertTrue(ecMap.values().contains(v))); // Update the value for one of the keys Map.Entry<String, String> first = expectedValues.entrySet().iterator().next(); expectedValues.put(first.getKey(), "new-value"); ecMap.put(first.getKey(), "new-value"); // Check the values collection is still correct assertEquals(expectedValues.values().size(), ecMap.values().size()); expectedValues.values().forEach(v -> assertTrue(ecMap.values().contains(v))); // Remove a key String removeKey = expectedValues.keySet().iterator().next(); expectedValues.remove(removeKey); ecMap.remove(removeKey); // Check the values collection is still correct assertEquals(expectedValues.values().size(), ecMap.values().size()); expectedValues.values().forEach(v -> assertTrue(ecMap.values().contains(v))); }
public RunResponse start( @NotNull String workflowId, @NotNull String version, @NotNull RunRequest runRequest) { WorkflowDefinition definition = workflowDao.getWorkflowDefinition(workflowId, version); validateRequest(version, definition, runRequest); RunProperties runProperties = RunProperties.from( Checks.notNull( definition.getPropertiesSnapshot(), "property snapshot cannot be null for workflow: " + workflowId)); // create and initiate a new instance with overrides and param evaluation WorkflowInstance instance = workflowHelper.createWorkflowInstance( definition.getWorkflow(), definition.getInternalId(), definition.getMetadata().getWorkflowVersionId(), runProperties, runRequest); RunStrategy runStrategy = definition.getRunStrategyOrDefault(); int ret = runStrategyDao.startWithRunStrategy(instance, runStrategy); RunResponse response = RunResponse.from(instance, ret); LOG.info("Created a workflow instance with response {}", response); return response; }
@Test public void testRunStrategy() throws IOException { WorkflowDefinition definitionParallelRunStrategy = loadObject( "fixtures/workflows/definition/sample-minimal-wf-run-strategy-parallel.json", WorkflowDefinition.class); definitionParallelRunStrategy.getMetadata().setWorkflowVersionId(1L); WorkflowInstance instanceParallelRunStrategy = loadObject( "fixtures/instances/sample-workflow-instance-created.json", WorkflowInstance.class); instanceParallelRunStrategy.setWorkflowId("sample-minimal-wf-run-strategy-parallel"); when(workflowDao.getWorkflowDefinition("sample-minimal-wf-run-strategy-parallel", "active")) .thenReturn(definitionParallelRunStrategy); AtomicReference<WorkflowInstance> instanceRef = new AtomicReference<>(); doAnswer( (Answer<Integer>) invocation -> { WorkflowInstance instance = (WorkflowInstance) invocation.getArguments()[0]; assertEquals(0L, instance.getWorkflowInstanceId()); assertEquals(0L, instance.getWorkflowRunId()); instance.setWorkflowInstanceId(1L); instance.setWorkflowRunId(1L); instanceRef.set(instance); return 1; }) .when(runStrategyDao) .startWithRunStrategy(any(), any()); RunRequest request = RunRequest.builder() .initiator(new ManualInitiator()) .currentPolicy(RunPolicy.START_FRESH_NEW_RUN) .requestId(UUID.fromString("41f0281e-41a2-468d-b830-56141b2f768b")) .build(); actionHandler.start("sample-minimal-wf", "active", request); verify(runStrategyDao, times(1)) .startWithRunStrategy(instanceRef.get(), Defaults.DEFAULT_RUN_STRATEGY); actionHandler.start("sample-minimal-wf-run-strategy-parallel", "active", request); assertEquals( RunStrategy.Rule.PARALLEL, definitionParallelRunStrategy.getRunStrategyOrDefault().getRule()); verify(runStrategyDao, times(1)) .startWithRunStrategy( instanceRef.get(), definitionParallelRunStrategy.getRunStrategyOrDefault()); }
public static boolean containsLocalIp(List<InetSocketAddress> clusterAddresses, AlluxioConfiguration conf) { String localAddressIp = getLocalIpAddress((int) conf.getMs(PropertyKey .NETWORK_HOST_RESOLUTION_TIMEOUT_MS)); for (InetSocketAddress addr : clusterAddresses) { String clusterNodeIp; try { clusterNodeIp = InetAddress.getByName(addr.getHostName()).getHostAddress(); if (clusterNodeIp.equals(localAddressIp)) { return true; } } catch (UnknownHostException e) { LOG.error("Get raft cluster node ip by hostname({}) failed", addr.getHostName(), e); } } return false; }
@Test public void testNotContainsLocalIP() { List<InetSocketAddress> clusterAddresses = new ArrayList<>(); InetSocketAddress raftNodeAddress1 = new InetSocketAddress("host1", 10); InetSocketAddress raftNodeAddress2 = new InetSocketAddress("host2", 20); InetSocketAddress raftNodeAddress3 = new InetSocketAddress("host3", 30); clusterAddresses.add(raftNodeAddress1); clusterAddresses.add(raftNodeAddress2); clusterAddresses.add(raftNodeAddress3); assertFalse(NetworkAddressUtils.containsLocalIp(clusterAddresses, mConfiguration)); }
public V put(K1 key1, K2 key2, V value) { checkNotNull(key1, "Key1 cannot be null"); checkNotNull(key2, "Key2 cannot be null"); checkNotNull(value, "Value cannot be null"); Map<K2, V> innerMap = backingMap.computeIfAbsent(key1, x -> new HashMap<>()); return innerMap.put(key2, value); }
@Test public void givenValueAssociated_whenPut_thenReturnPreviousValue() { map.put("1", "2", "prevValue"); String prevValue = map.put("1", "2", "newValue"); assertEquals("prevValue", prevValue); }
public void run() { runner = newJsonRunnerWithSetting( globalSettings.stream() .filter(byEnv(this.env)) .map(toRunnerSetting()) .collect(toList()), startArgs); runner.run(); }
@Test public void should_throw_exception_without_global_request_settings() { stream = getResourceAsStream("settings/request-settings.json"); runner = new SettingRunner(stream, createStartArgs(12306)); runner.run(); assertThrows(HttpResponseException.class, () -> helper.get(remoteUrl("/foo"))); }
@Override public Properties info(RedisClusterNode node) { Map<String, String> info = execute(node, RedisCommands.INFO_ALL); Properties result = new Properties(); for (Entry<String, String> entry : info.entrySet()) { result.setProperty(entry.getKey(), entry.getValue()); } return result; }
@Test public void testInfo() { RedisClusterNode master = getFirstMaster(); Properties info = connection.info(master); assertThat(info.size()).isGreaterThan(10); }
@Override public boolean equals(Object o) { return super.equals(o); }
@Test public void equalsTest() { ChunkMessageIdImpl chunkMsgId1 = new ChunkMessageIdImpl( new MessageIdImpl(0, 0, 0), new MessageIdImpl(1, 1, 1) ); ChunkMessageIdImpl chunkMsgId2 = new ChunkMessageIdImpl( new MessageIdImpl(2, 2, 2), new MessageIdImpl(3, 3, 3) ); MessageIdImpl msgId = new MessageIdImpl(1, 1, 1); assertEquals(chunkMsgId1, chunkMsgId1); assertNotEquals(chunkMsgId2, chunkMsgId1); assertEquals(msgId, chunkMsgId1); }
@Override void toHtml() throws IOException { writeLinks(); writeln("<br/>"); writeTitle("clock.png", getString("hotspots")); writeTable(); }
@Test public void test() throws IOException { final SamplingProfiler samplingProfiler = new SamplingProfiler(new ArrayList<>(), null); final List<SampledMethod> emptyHotspots = samplingProfiler.getHotspots(NB_ROWS); samplingProfiler.update(); final List<SampledMethod> hotspots = samplingProfiler.getHotspots(NB_ROWS); final StringWriter writer = new StringWriter(); new HtmlHotspotsReport(emptyHotspots, writer).toHtml(); assertNotEmptyAndClear(writer); new HtmlHotspotsReport(hotspots, writer).toHtml(); assertNotEmptyAndClear(writer); final Counter counter = new Counter("test html report", null); final Collector collector = new Collector("test", Collections.singletonList(counter)); final Period period = Period.TOUT; final HtmlReport htmlReport = new HtmlReport(collector, null, createJavaInformationsList(), period, writer); htmlReport.writeHotspots(hotspots); assertNotEmptyAndClear(writer); }
@Nonnull public static String removeBracketsFromIpv6Address(@Nonnull final String address) { final String result; if (address.startsWith("[") && address.endsWith("]")) { result = address.substring(1, address.length()-1); try { Ipv6.parse(result); // The remainder is a valid IPv6 address. Return the original value. return result; } catch (IllegalArgumentException e) { // The remainder isn't a valid IPv6 address. Return the original value. return address; } } // Not a bracket-enclosed string. Return the original input. return address; }
@Test public void stripBracketsNonIP() throws Exception { // Setup test fixture. final String input = "[Foo Bar]"; // Execute system under test. final String result = AuthCheckFilter.removeBracketsFromIpv6Address(input); // Verify result. assertEquals(input, result); // Should only strip brackets from IPv6, nothing else. }
public FEELFnResult<TemporalAmount> invoke(@ParameterName( "from" ) String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { // try to parse as days/hours/minute/seconds return FEELFnResult.ofResult( Duration.parse( val ) ); } catch( DateTimeParseException e ) { // if it failed, try to parse as years/months try { return FEELFnResult.ofResult(ComparablePeriod.parse(val).normalized()); } catch( DateTimeParseException e2 ) { // failed to parse, so return null according to the spec return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "date-parsing exception", new RuntimeException(new Throwable() { public final List<Throwable> causes = Arrays.asList( new Throwable[]{e, e2} ); } ))); } } }
@Test void invokeParamTemporalAmountNull() { FunctionTestUtil.assertResultError(durationFunction.invoke((TemporalAmount) null), InvalidParametersEvent.class); }
@Override public boolean isAvailable() { return sonarRuntime.getEdition() == ENTERPRISE || sonarRuntime.getEdition() == DATACENTER; }
@Test @UseDataProvider("editionsAndLoginMessageFeatureAvailability") public void isAvailable_shouldOnlyBeEnabledInEnterpriseEditionPlus(SonarEdition edition, boolean shouldBeEnabled) { when(sonarRuntime.getEdition()).thenReturn(edition); boolean isAvailable = underTest.isAvailable(); assertThat(isAvailable).isEqualTo(shouldBeEnabled); }
public static boolean checkActivationFlag(String activationFlag) { switch (activationFlag) { case ENABLE: return true; case DISABLE: return false; default: throw new IllegalArgumentException("The given activation flag is not valid!"); } }
@Test public void testCheckActivationFlag() { assertFalse(checkActivationFlag("disable")); assertTrue(checkActivationFlag("enable")); }