focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void decrementIndex(int taskIndex) { moveTask(taskIndex, DECREMENT_INDEX); }
@Test public void shouldDecrementIndexOfGivenTask() { Tasks tasks = new Tasks(); AntTask task1 = antTask("b1", "t1", "w1"); tasks.add(task1); AntTask task2 = antTask("b2", "t2", "w2"); tasks.add(task2); AntTask task3 = antTask("b3", "t3", "w3"); tasks.add(task3); tasks.decrementIndex(2); assertThat(tasks.get(0), is(task1)); assertThat(tasks.get(1), is(task3)); assertThat(tasks.get(2), is(task2)); }
public static Optional<IndexSetValidator.Violation> validate(ElasticsearchConfiguration elasticsearchConfiguration, IndexLifetimeConfig retentionConfig) { Period indexLifetimeMin = retentionConfig.indexLifetimeMin(); Period indexLifetimeMax = retentionConfig.indexLifetimeMax(); final Period leeway = indexLifetimeMax.minus(indexLifetimeMin); if (leeway.toStandardSeconds().getSeconds() < 0) { return Optional.of(IndexSetValidator.Violation.create(f("%s <%s> is shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax, FIELD_INDEX_LIFETIME_MIN, indexLifetimeMin))); } if (leeway.toStandardSeconds().isLessThan(elasticsearchConfiguration.getTimeSizeOptimizingRotationPeriod().toStandardSeconds())) { return Optional.of(IndexSetValidator.Violation.create(f("The duration between %s and %s <%s> cannot be shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, FIELD_INDEX_LIFETIME_MIN, leeway, TIME_SIZE_OPTIMIZING_ROTATION_PERIOD, elasticsearchConfiguration.getTimeSizeOptimizingRotationPeriod()))); } Period fixedLeeway = elasticsearchConfiguration.getTimeSizeOptimizingRetentionFixedLeeway(); if (Objects.nonNull(fixedLeeway) && leeway.toStandardSeconds().isLessThan(fixedLeeway.toStandardSeconds())) { return Optional.of(IndexSetValidator.Violation.create(f("The duration between %s and %s <%s> cannot be shorter than %s <%s>", FIELD_INDEX_LIFETIME_MAX, FIELD_INDEX_LIFETIME_MIN, leeway, TIME_SIZE_OPTIMIZING_RETENTION_FIXED_LEEWAY, fixedLeeway))); } final Period maxRetentionPeriod = elasticsearchConfiguration.getMaxIndexRetentionPeriod(); if (maxRetentionPeriod != null && indexLifetimeMax.toStandardSeconds().isGreaterThan(maxRetentionPeriod.toStandardSeconds())) { return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> exceeds the configured maximum of %s=%s.", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax, ElasticsearchConfiguration.MAX_INDEX_RETENTION_PERIOD, maxRetentionPeriod))); } if (periodOtherThanDays(indexLifetimeMax) && !elasticsearchConfiguration.allowFlexibleRetentionPeriod()) { return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> can only be a multiple of days", FIELD_INDEX_LIFETIME_MAX, indexLifetimeMax))); } if (periodOtherThanDays(indexLifetimeMin) && !elasticsearchConfiguration.allowFlexibleRetentionPeriod()) { return Optional.of(IndexSetValidator.Violation.create(f("Lifetime setting %s <%s> can only be a multiple of days", FIELD_INDEX_LIFETIME_MIN, indexLifetimeMin))); } return Optional.empty(); }
@Test public void validateLifetimeMaxIsShorterThanLifetimeMin() { IndexLifetimeConfig config = IndexLifetimeConfig.builder() .indexLifetimeMin(Period.days(5)) .indexLifetimeMax(Period.days(4)) .build(); assertThat(validate(this.elasticConfig, config)).hasValueSatisfying(v -> assertThat(v.message()).contains("is shorter than index_lifetime_min") ); }
public int getValueListCount() { InsertStatement insertStatement = getSqlStatement(); return insertStatement.getSetAssignment().isPresent() ? 1 : insertStatement.getValues().size(); }
@Test void assertGetValueListCountWithSetAssignmentForMySQL() { MySQLInsertStatement insertStatement = new MySQLInsertStatement(); List<ColumnSegment> columns = new LinkedList<>(); columns.add(new ColumnSegment(0, 0, new IdentifierValue("col"))); ColumnAssignmentSegment insertStatementAssignment = new ColumnAssignmentSegment(0, 0, columns, new LiteralExpressionSegment(0, 0, 1)); insertStatement.setSetAssignment(new SetAssignmentSegment(0, 0, Collections.singletonList(insertStatementAssignment))); insertStatement.setTable(new SimpleTableSegment(new TableNameSegment(0, 0, new IdentifierValue("")))); InsertStatementContext insertStatementContext = createInsertStatementContext(Collections.emptyList(), insertStatement); assertThat(insertStatementContext.getValueListCount(), is(1)); }
public static long adjustSplitSize(long scanSize, int parallelism, long splitSize) { // use the configured split size if it produces at least one split per slot // otherwise, adjust the split size to target parallelism with a reasonable minimum // increasing the split size may cause expensive spills and is not done automatically long splitCount = LongMath.divide(scanSize, splitSize, RoundingMode.CEILING); long adjustedSplitSize = Math.max(scanSize / parallelism, Math.min(MIN_SPLIT_SIZE, splitSize)); return splitCount < parallelism ? adjustedSplitSize : splitSize; }
@Test public void testAdaptiveSplitSize() { long scanSize = 500L * 1024 * 1024 * 1024; // 500 GB int parallelism = 500; long smallDefaultSplitSize = 128 * 1024 * 1024; // 128 MB long largeDefaultSplitSize = 2L * 1024 * 1024 * 1024; // 2 GB long adjusted1 = TableScanUtil.adjustSplitSize(scanSize, parallelism, smallDefaultSplitSize); assertThat(adjusted1).isEqualTo(smallDefaultSplitSize); long adjusted2 = TableScanUtil.adjustSplitSize(scanSize, parallelism, largeDefaultSplitSize); assertThat(adjusted2).isEqualTo(scanSize / parallelism); }
public static String generateXID(long tranId) { return new StringBuilder().append(ipAddress).append(IP_PORT_SPLIT_CHAR).append(port).append(IP_PORT_SPLIT_CHAR).append(tranId).toString(); }
@Test public void testGenerateXID() { long tranId = new Random().nextLong(); XID.setPort(8080); XID.setIpAddress("127.0.0.1"); assertThat(XID.generateXID(tranId)).isEqualTo(XID.getIpAddress() + ":" + XID.getPort() + ":" + tranId); }
public static String normalize(String path) { if (path == null) { return null; } //兼容Windows下的共享目录路径(原始路径如果以\\开头,则保留这种路径) if (path.startsWith("\\\\")) { return path; } // 兼容Spring风格的ClassPath路径,去除前缀,不区分大小写 String pathToUse = StrUtil.removePrefixIgnoreCase(path, URLUtil.CLASSPATH_URL_PREFIX); // 去除file:前缀 pathToUse = StrUtil.removePrefixIgnoreCase(pathToUse, URLUtil.FILE_URL_PREFIX); // 识别home目录形式,并转换为绝对路径 if (StrUtil.startWith(pathToUse, '~')) { pathToUse = getUserHomePath() + pathToUse.substring(1); } // 统一使用斜杠 pathToUse = pathToUse.replaceAll("[/\\\\]+", StrUtil.SLASH); // 去除开头空白符,末尾空白符合法,不去除 pathToUse = StrUtil.trimStart(pathToUse); // issue#IAB65V 去除尾部的换行符 pathToUse = StrUtil.trim(pathToUse, 1, (c)->c == '\n' || c == '\r'); String prefix = StrUtil.EMPTY; int prefixIndex = pathToUse.indexOf(StrUtil.COLON); if (prefixIndex > -1) { // 可能Windows风格路径 prefix = pathToUse.substring(0, prefixIndex + 1); if (StrUtil.startWith(prefix, StrUtil.C_SLASH)) { // 去除类似于/C:这类路径开头的斜杠 prefix = prefix.substring(1); } if (false == prefix.contains(StrUtil.SLASH)) { pathToUse = pathToUse.substring(prefixIndex + 1); } else { // 如果前缀中包含/,说明非Windows风格path prefix = StrUtil.EMPTY; } } if (pathToUse.startsWith(StrUtil.SLASH)) { prefix += StrUtil.SLASH; pathToUse = pathToUse.substring(1); } List<String> pathList = StrUtil.split(pathToUse, StrUtil.C_SLASH); List<String> pathElements = new LinkedList<>(); int tops = 0; String element; for (int i = pathList.size() - 1; i >= 0; i--) { element = pathList.get(i); // 只处理非.的目录,即只处理非当前目录 if (false == StrUtil.DOT.equals(element)) { if (StrUtil.DOUBLE_DOT.equals(element)) { tops++; } else { if (tops > 0) { // 有上级目录标记时按照个数依次跳过 tops--; } else { // Normal path element found. pathElements.add(0, element); } } } } // issue#1703@Github if (tops > 0 && StrUtil.isEmpty(prefix)) { // 只有相对路径补充开头的..,绝对路径直接忽略之 while (tops-- > 0) { //遍历完节点发现还有上级标注(即开头有一个或多个..),补充之 // Normal path element found. pathElements.add(0, StrUtil.DOUBLE_DOT); } } return prefix + CollUtil.join(pathElements, StrUtil.SLASH); }
@Test public void normalizeTest() { assertEquals("/foo/", FileUtil.normalize("/foo//")); assertEquals("/foo/", FileUtil.normalize("/foo/./")); assertEquals("/bar", FileUtil.normalize("/foo/../bar")); assertEquals("/bar/", FileUtil.normalize("/foo/../bar/")); assertEquals("/baz", FileUtil.normalize("/foo/../bar/../baz")); assertEquals("/", FileUtil.normalize("/../")); assertEquals("foo", FileUtil.normalize("foo/bar/..")); assertEquals("../bar", FileUtil.normalize("foo/../../bar")); assertEquals("bar", FileUtil.normalize("foo/../bar")); assertEquals("/server/bar", FileUtil.normalize("//server/foo/../bar")); assertEquals("/bar", FileUtil.normalize("//server/../bar")); assertEquals("C:/bar", FileUtil.normalize("C:\\foo\\..\\bar")); // assertEquals("C:/bar", FileUtil.normalize("C:\\..\\bar")); assertEquals("../../bar", FileUtil.normalize("../../bar")); assertEquals("C:/bar", FileUtil.normalize("/C:/bar")); assertEquals("C:", FileUtil.normalize("C:")); // issue#3253,smb保留格式 assertEquals("\\\\192.168.1.1\\Share\\", FileUtil.normalize("\\\\192.168.1.1\\Share\\")); }
@Override public Map<T, Double> getWeightedSubset(Map<T, Double> weightMap, DeterministicSubsettingMetadata metadata) { if (metadata != null) { List<T> points = new ArrayList<>(weightMap.keySet()); Collections.sort(points); Collections.shuffle(points, new Random(_randomSeed)); List<Double> weights = points.stream().map(weightMap::get).collect(Collectors.toList()); double totalWeight = weights.stream().mapToDouble(Double::doubleValue).sum(); if (totalWeight == 0) { return null; } Ring ring = new Ring(weights, totalWeight); double offset = metadata.getInstanceId() / (double) metadata.getTotalInstanceCount(); double subsetSliceWidth = getSubsetSliceWidth(metadata.getTotalInstanceCount(), points.size()); List<Integer> indices = ring.getIndices(offset, subsetSliceWidth); return indices.stream().collect( Collectors.toMap(points::get, i -> round(ring.getWeight(i, offset, subsetSliceWidth), WEIGHT_DECIMAL_PLACE))); } else { _log.warn("Cannot retrieve metadata required for D2 subsetting. Revert to use all available hosts."); return null; } }
@Test(dataProvider = "differentWeightsData") public void testDistributionWithDifferentWeights(int clientNum, double[] weights, int minSubsetSize) { Map<String, Double> pointsMap = constructPointsMap(weights); Map<String, Double> distributionMap = new HashMap<>(); double minSubsetWeight = minSubsetSize / (double) weights.length; double totalHostWeights = Arrays.stream(weights).sum(); for (int i = 0; i < clientNum; i++) { _deterministicSubsettingStrategy = new DeterministicSubsettingStrategy<>("test", minSubsetSize); Map<String, Double> weightedSubset = _deterministicSubsettingStrategy.getWeightedSubset(pointsMap, new DeterministicSubsettingMetadata(i, clientNum, 0)); double totalWeights = 0; for (Map.Entry<String, Double> entry: weightedSubset.entrySet()) { String hostName = entry.getKey(); double weight = entry.getValue(); distributionMap.put(hostName, distributionMap.getOrDefault(hostName, 0D) + weight * pointsMap.get(hostName)); totalWeights += weights[Integer.parseInt(hostName.substring("test".length()))] / totalHostWeights * weight; } assertTrue(totalWeights + DELTA_DIFF >= Math.min(minSubsetWeight, 1D)); } double totalWeights = distributionMap.values().stream().mapToDouble(Double::doubleValue).sum(); for (Map.Entry<String, Double> entry: distributionMap.entrySet()) { String hostName = entry.getKey(); double hostWeight = weights[Integer.parseInt(hostName.substring("test".length()))]; assertEquals(entry.getValue() / totalWeights, hostWeight / totalHostWeights, DELTA_DIFF); } }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@Test void testFunctionDependingOnInputWithTupleInput() { IdentityMapper2<Boolean> function = new IdentityMapper2<Boolean>(); TypeInformation<Tuple2<Boolean, String>> inputType = new TupleTypeInfo<Tuple2<Boolean, String>>( BasicTypeInfo.BOOLEAN_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO); TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(function, inputType); assertThat(ti.isBasicType()).isTrue(); assertThat(ti).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO); }
@Override public Reiterator<ShuffleEntry> read( @Nullable ShufflePosition startPosition, @Nullable ShufflePosition endPosition) { return new ShuffleReadIterator(startPosition, endPosition); }
@Test public void readerShouldMergeMultipleBatchResults() throws Exception { ShuffleEntry e1 = newShuffleEntry(KEY, SKEY, VALUE); List<ShuffleEntry> e1s = Collections.singletonList(e1); ShuffleEntry e2 = newShuffleEntry(KEY, SKEY, VALUE); List<ShuffleEntry> e2s = Collections.singletonList(e2); when(batchReader.read(START_POSITION, END_POSITION)) .thenReturn(new ShuffleBatchReader.Batch(e1s, NEXT_START_POSITION)); when(batchReader.read(NEXT_START_POSITION, END_POSITION)) .thenReturn(new ShuffleBatchReader.Batch(e2s, null)); List<ShuffleEntry> results = newArrayList(reader.read(START_POSITION, END_POSITION)); assertThat(results, contains(e1, e2)); verify(batchReader).read(START_POSITION, END_POSITION); verify(batchReader).read(NEXT_START_POSITION, END_POSITION); verifyNoMoreInteractions(batchReader); }
public List<StepInstance> getStepInstances( String workflowId, long workflowInstanceId, long workflowRunId, String stepId) { return getStepInstancesByIds( workflowId, workflowInstanceId, workflowRunId, stepId, this::maestroStepFromResult); }
@Test public void testGetStepInstances() { List<StepInstance> instances = stepDao.getStepInstances(TEST_WORKFLOW_ID, 1, 1, "job1"); assertEquals(1, instances.size()); StepInstance instance = instances.get(0); assertEquals(StepInstance.Status.RUNNING, instance.getRuntimeState().getStatus()); assertFalse(instance.getSignalDependencies().isSatisfied()); assertEquals( 2, instance .getOutputs() .get(StepOutputsDefinition.StepOutputType.SIGNAL) .asSignalStepOutputs() .getOutputs() .size()); assertTrue(instance.getArtifacts().isEmpty()); assertTrue(instance.getTimeline().isEmpty()); instance.setArtifacts(null); instance.setTimeline(null); Assertions.assertThat(instance).usingRecursiveComparison().isEqualTo(si); }
public String build() { return build(null, Maps.<String, Object>newHashMap()); }
@Test public void testFromInstance() { ServiceInstanceBuilder<Void> builder = new ServiceInstanceBuilder<Void>(); builder.address("1.2.3.4"); builder.name("foo"); builder.id("bar"); builder.port(5); builder.sslPort(6); builder.registrationTimeUTC(789); builder.serviceType(ServiceType.PERMANENT); ServiceInstance<Void> instance = builder.build(); UriSpec spec = new UriSpec( "{scheme}://{address}:{port}:{ssl-port}/{name}/{id}/{registration-time-utc}/{service-type}"); Map<String, Object> m = Maps.newHashMap(); m.put("scheme", "test"); assertEquals(spec.build(instance, m), "test://1.2.3.4:5:6/foo/bar/789/permanent"); }
@Override public void processWatermark(Watermark mark) throws Exception { if (propagateWatermark || Watermark.MAX_WATERMARK.equals(mark)) { super.processWatermark(mark); } }
@Test public void testWatermarkSuppression() throws Exception { final InputConversionOperator<Row> operator = new InputConversionOperator<>( createConverter(DataTypes.ROW(DataTypes.FIELD("f", DataTypes.INT()))), false, false, false, true); // would throw an exception otherwise because an output is not set operator.processWatermark(new Watermark(1000)); }
public static List<String> parseColumnsFromPath(String filePath, List<String> columnsFromPath) throws UserException { if (columnsFromPath == null || columnsFromPath.isEmpty()) { return Collections.emptyList(); } String[] strings = filePath.split("/"); if (strings.length < 2) { throw new UserException( "Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); } String[] columns = new String[columnsFromPath.size()]; int size = 0; for (int i = strings.length - 2; i >= 0; i--) { String str = strings[i]; if (str != null && str.isEmpty()) { continue; } if (str == null || !str.contains("=")) { throw new UserException( "Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); } String[] pair = str.split("=", 2); if (pair.length != 2) { throw new UserException( "Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); } int index = columnsFromPath.indexOf(pair[0]); if (index == -1) { continue; } columns[index] = pair[1]; size++; if (size >= columnsFromPath.size()) { break; } } if (size != columnsFromPath.size()) { throw new UserException( "Fail to parse columnsFromPath, expected: " + columnsFromPath + ", filePath: " + filePath); } return Lists.newArrayList(columns); }
@Test public void parseColumnsFromPath() { String path = "/path/to/dir/k1=v1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); assertEquals(1, columns.size()); assertEquals(Collections.singletonList("v1"), columns); } catch (UserException e) { fail(); } path = "/path/to/dir/k1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); fail(); } catch (UserException ignored) { } path = "/path/to/dir/k1=v1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k2")); fail(); } catch (UserException ignored) { } path = "/path/to/dir/k1=v2/k1=v1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); assertEquals(1, columns.size()); assertEquals(Collections.singletonList("v1"), columns); } catch (UserException e) { fail(); } path = "/path/to/dir/k2=v2/k1=v1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); assertEquals(2, columns.size()); assertEquals(Lists.newArrayList("v1", "v2"), columns); } catch (UserException e) { fail(); } path = "/path/to/dir/k2=v2/a/k1=v1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); fail(); } catch (UserException ignored) { } path = "/path/to/dir/k2=v2/k1=v1/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2", "k3")); fail(); } catch (UserException ignored) { } path = "/path/to/dir/k2=v2//k1=v1//xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); assertEquals(2, columns.size()); assertEquals(Lists.newArrayList("v1", "v2"), columns); } catch (UserException e) { fail(); } path = "/path/to/dir/k2==v2=//k1=v1//xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); assertEquals(2, columns.size()); assertEquals(Lists.newArrayList("v1", "=v2="), columns); } catch (UserException e) { fail(); } path = "/path/to/dir/k2==v2=//k1=v1/"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Lists.newArrayList("k1", "k2")); fail(); } catch (UserException ignored) { } path = "/path/to/dir/k1=2/a/xxx.csv"; try { List<String> columns = BrokerUtil.parseColumnsFromPath(path, Collections.singletonList("k1")); fail(); } catch (UserException ignored) { ignored.printStackTrace(); } }
public boolean hasPersistentLocalStore() { for (final StateStore store : stateStores) { if (store.persistent()) { return true; } } return false; }
@Test public void persistentLocalStoreShouldBeDetected() { final ProcessorTopology processorTopology = createLocalStoreTopology(Stores.persistentKeyValueStore("my-store")); assertTrue(processorTopology.hasPersistentLocalStore()); }
@Override public boolean tryAdd(V... values) { return get(tryAddAsync(values)); }
@Test public void testTryAdd() { RSetCache<String> cache = redisson.getSetCache("list", IntegerCodec.INSTANCE); Set<String> names = new HashSet<>(); int elements = 200000; for (int i = 0; i < elements; i++) { names.add("name" + i); } boolean s = cache.tryAdd(names.toArray(new String[]{})); assertThat(s).isTrue(); assertThat(cache.size()).isEqualTo(elements); Set<String> names2 = new HashSet<>(); for (int i = elements+1; i < elements + 10000; i++) { names2.add("name" + i); } names2.add("name10"); boolean r = cache.tryAdd(names2.toArray(new String[]{})); assertThat(r).isFalse(); assertThat(cache.size()).isEqualTo(elements); }
@SuppressWarnings("unchecked") @Override public RestLiRequestData extractRequestData(RoutingResult routingResult, DataMap dataMap) { final RestLiRequestDataImpl.Builder builder = new RestLiRequestDataImpl.Builder(); Set<?> ids = routingResult.getContext().getPathKeys().getBatchIds(); // No entity for unstructured data requests if (UnstructuredDataUtil.isUnstructuredDataRouting(routingResult)) { if (ids != null) { builder.batchKeys(ids); } return builder.build(); } else { checkEntityNotNull(dataMap, ResourceMethod.BATCH_UPDATE); Class<? extends RecordTemplate> valueClass = ArgumentUtils.getValueClass(routingResult); @SuppressWarnings({"rawtypes"}) Map inputMap = ArgumentBuilder.buildBatchRequestMap(routingResult, dataMap, valueClass, ids); if (inputMap != null) { builder.batchKeyEntityMap(inputMap); } return builder.build(); } }
@Test(dataProvider = "failureData") public void testFailure(ProtocolVersion version, Key primaryKey, Key[] associationKeys, String requestEntity, Object[] keys, String errorMessage) throws Exception { Set<Object> batchKeys = new HashSet<>(Arrays.asList(keys)); RestRequest request = RestLiArgumentBuilderTestHelper.getMockRequest(requestEntity, version); ResourceModel model = RestLiArgumentBuilderTestHelper.getMockResourceModel(MyComplexKey.class, primaryKey, associationKeys, batchKeys); ResourceMethodDescriptor descriptor = RestLiArgumentBuilderTestHelper.getMockResourceMethodDescriptor( model, null,null, CollectionResourceAsyncTemplate.class.getMethod("batchUpdate", BatchUpdateRequest.class, Callback.class)); ServerResourceContext context = RestLiArgumentBuilderTestHelper.getMockResourceContext(batchKeys, version, false, false); RoutingResult routingResult = RestLiArgumentBuilderTestHelper.getMockRoutingResult(descriptor, context); RestLiArgumentBuilder argumentBuilder = new BatchUpdateArgumentBuilder(); try { argumentBuilder.extractRequestData(routingResult, DataMapUtils.readMapWithExceptions(request)); fail("Expected RoutingException"); } catch (RoutingException e) { assertTrue(e.getMessage().contains(errorMessage)); assertEquals(HttpStatus.S_400_BAD_REQUEST.getCode(), e.getStatus()); } verify(request, model, descriptor, context, routingResult); }
public static Map<String, String> parseMapProperty(String property) { Map<String, String> result = new LinkedHashMap<>(); // LinkedHashMap to keep insertion order // Split on non-escaped commas List<String> entries = parseListProperty(property); for (String entry : entries) { Matcher matcher = KEY_VALUE_PATTERN.matcher(entry); if (!matcher.matches()) { throw new IllegalArgumentException("'" + entry + "' is not a valid key-value pair"); } result.put(matcher.group("name"), matcher.group("value")); } return result; }
@Test public void testParseMapProperty() { assertThat(ConfigurationPropertyValidator.parseMapProperty("abc=def")) .containsExactly("abc", "def"); assertThat( ConfigurationPropertyValidator.parseMapProperty( "abc=def,gh\\,i=j\\\\\\,kl,mno=,pqr=stu")) .containsExactly("abc", "def", "gh,i", "j\\\\,kl", "mno", "", "pqr", "stu") .inOrder(); assertThrows( IllegalArgumentException.class, () -> ConfigurationPropertyValidator.parseMapProperty("not valid")); }
static <T extends CompoundPredicate> T flattenCompound(Predicate predicateLeft, Predicate predicateRight, Class<T> klass) { // The following could have been achieved with {@link com.hazelcast.query.impl.predicates.FlatteningVisitor}, // however since we only care for 2-argument flattening, we can avoid constructing a visitor and its internals // for each token pass at the cost of the following explicit code. Predicate[] predicates; if (klass.isInstance(predicateLeft) || klass.isInstance(predicateRight)) { Predicate[] left = getSubPredicatesIfClass(predicateLeft, klass); Predicate[] right = getSubPredicatesIfClass(predicateRight, klass); predicates = new Predicate[left.length + right.length]; ArrayUtils.concat(left, right, predicates); } else { predicates = new Predicate[]{predicateLeft, predicateRight}; } try { T compoundPredicate = klass.getDeclaredConstructor().newInstance(); compoundPredicate.setPredicates(predicates); return compoundPredicate; } catch (ReflectiveOperationException e) { throw new IllegalArgumentException(String.format("%s must have a public default constructor", klass.getName())); } }
@Test public void testFlattenOr_withAndOrPredicates() { OrPredicate orPredicate = new OrPredicate(leftOfOr, rightOfOr); AndPredicate andPredicate = new AndPredicate(leftOfAnd, rightOfAnd); OrPredicate flattenedCompoundOr = SqlPredicate.flattenCompound(andPredicate, orPredicate, OrPredicate.class); assertSame(andPredicate, flattenedCompoundOr.getPredicates()[0]); assertSame(leftOfOr, flattenedCompoundOr.getPredicates()[1]); assertSame(rightOfOr, flattenedCompoundOr.getPredicates()[2]); }
public static Map<String, Object> convertValues(final Map<String, Object> data, final ConfigurationRequest configurationRequest) throws ValidationException { final Map<String, Object> configuration = Maps.newHashMapWithExpectedSize(data.size()); final Map<String, Map<String, Object>> configurationFields = configurationRequest.asList(); for (final Map.Entry<String, Object> entry : data.entrySet()) { final String field = entry.getKey(); final Map<String, Object> fieldDescription = configurationFields.get(field); if (fieldDescription == null || fieldDescription.isEmpty()) { throw new ValidationException(field, "Unknown configuration field description for field \"" + field + "\""); } final String type = (String) fieldDescription.get("type"); // Decide what to cast to. (string, bool, number) Object value; switch (type) { case "text": case "dropdown": value = entry.getValue() == null ? "" : String.valueOf(entry.getValue()); break; case "number": try { value = Integer.parseInt(String.valueOf(entry.getValue())); } catch (NumberFormatException e) { // If a numeric field is optional and not provided, use null as value if ("true".equals(String.valueOf(fieldDescription.get("is_optional")))) { value = null; } else { throw new ValidationException(field, e.getMessage()); } } break; case "boolean": value = "true".equalsIgnoreCase(String.valueOf(entry.getValue())); break; case "list": final List<?> valueList = entry.getValue() == null ? Collections.emptyList() : (List<?>) entry.getValue(); value = valueList.stream() .filter(o -> o != null && o instanceof String) .map(String::valueOf) .collect(Collectors.toList()); break; default: throw new ValidationException(field, "Unknown configuration field type \"" + type + "\""); } configuration.put(field, value); } return configuration; }
@Test public void convertValuesThrowsIllegalArgumentExceptionOnEmptyFieldDescription() throws Exception { thrown.expect(ValidationException.class); thrown.expectMessage("Unknown configuration field description for field \"string\""); final ConfigurationRequest cr = new ConfigurationRequest(); final Map<String, Object> data = new HashMap<>(); data.put("string", "foo"); ConfigurationMapConverter.convertValues(data, cr); }
public void loadUdtfFromClass( final Class<?> theClass, final String path ) { final UdtfDescription udtfDescriptionAnnotation = theClass.getAnnotation(UdtfDescription.class); if (udtfDescriptionAnnotation == null) { throw new KsqlException(String.format("Cannot load class %s. Classes containing UDTFs must" + "be annotated with @UdtfDescription.", theClass.getName())); } final String functionName = udtfDescriptionAnnotation.name(); final String sensorName = "ksql-udtf-" + functionName; FunctionMetrics.initInvocationSensor(metrics, sensorName, "ksql-udtf", functionName + " udtf"); final UdfMetadata metadata = new UdfMetadata( udtfDescriptionAnnotation.name(), udtfDescriptionAnnotation.description(), udtfDescriptionAnnotation.author(), udtfDescriptionAnnotation.version(), udtfDescriptionAnnotation.category(), path ); final TableFunctionFactory factory = new TableFunctionFactory(metadata); for (final Method method : theClass.getMethods()) { if (method.getAnnotation(Udtf.class) != null) { final Udtf annotation = method.getAnnotation(Udtf.class); try { if (method.getReturnType() != List.class) { throw new KsqlException(String .format("UDTF functions must return a List. Class %s Method %s", theClass.getName(), method.getName() )); } final Type ret = method.getGenericReturnType(); if (!(ret instanceof ParameterizedType)) { throw new KsqlException(String .format( "UDTF functions must return a parameterized List. Class %s Method %s", theClass.getName(), method.getName() )); } final Type typeArg = ((ParameterizedType) ret).getActualTypeArguments()[0]; final ParamType returnType = FunctionLoaderUtils .getReturnType(method, typeArg, annotation.schema(), typeParser); final List<ParameterInfo> parameters = FunctionLoaderUtils .createParameters(method, functionName, typeParser); final KsqlTableFunction tableFunction = createTableFunction(method, FunctionName.of(functionName), returnType, parameters, annotation.description(), annotation ); factory.addFunction(tableFunction); } catch (final KsqlException e) { if (throwExceptionOnLoadFailure) { throw e; } else { LOGGER.warn( "Failed to add UDTF to the MetaStore. name={} method={}", udtfDescriptionAnnotation.name(), method, e ); } } } } functionRegistry.addTableFunctionFactory(factory); }
@Test public void shouldNotLoadUdtfWithRawListReturn() { // Given: final MutableFunctionRegistry functionRegistry = new InternalFunctionRegistry(); final SqlTypeParser typeParser = create(EMPTY); final UdtfLoader udtfLoader = new UdtfLoader( functionRegistry, empty(), typeParser, true ); // When: final Exception e = assertThrows( KsqlException.class, () -> udtfLoader.loadUdtfFromClass(RawListReturn.class, INTERNAL_PATH) ); // Then: assertThat(e.getMessage(), containsString( "UDTF functions must return a parameterized List. Class io.confluent.ksql.function.UdtfLoaderTest$RawListReturn Method badReturn")); }
@Override public <T> ListState<T> getListState(ListStateDescriptor<T> stateProperties) { KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties); stateProperties.initializeSerializerUnlessSet(this::createSerializer); return keyedStateStore.getListState(stateProperties); }
@Test void testListStateReturnsEmptyListByDefault() throws Exception { StreamingRuntimeContext context = createRuntimeContext(); ListStateDescriptor<String> descr = new ListStateDescriptor<>("name", String.class); ListState<String> state = context.getListState(descr); Iterable<String> value = state.get(); assertThat(value).isNotNull(); assertThat(value.iterator()).isExhausted(); }
public static Area getArea(String ip) { return AreaUtils.getArea(getAreaId(ip)); }
@Test public void testGetArea_string() { // 120.202.4.0|120.202.4.255|420600 Area area = IPUtils.getArea("120.202.4.50"); assertEquals("襄阳市", area.getName()); }
@Override protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, ByteBuf msg, boolean preferDirect) { return allocateBuffer(ctx, msg, preferDirect, true); }
@Test public void testAllocateOnHeapBufferOverflowsOutputSize() { final int maxEncodeSize = Integer.MAX_VALUE; final Lz4FrameEncoder encoder = newEncoder(Lz4Constants.DEFAULT_BLOCK_SIZE, maxEncodeSize); when(buffer.readableBytes()).thenReturn(maxEncodeSize); buffer.writerIndex(maxEncodeSize); assertThrows(EncoderException.class, new Executable() { @Override public void execute() { encoder.allocateBuffer(ctx, buffer, false); } }); }
@Override public void updateIndices(SegmentDirectory.Writer segmentWriter) throws Exception { Map<String, List<Operation>> columnOperationsMap = computeOperations(segmentWriter); if (columnOperationsMap.isEmpty()) { return; } for (Map.Entry<String, List<Operation>> entry : columnOperationsMap.entrySet()) { String column = entry.getKey(); List<Operation> operations = entry.getValue(); for (Operation operation : operations) { switch (operation) { case DISABLE_FORWARD_INDEX: // Deletion of the forward index will be handled outside the index handler to ensure that other index // handlers that need the forward index to construct their own indexes will have it available. _tmpForwardIndexColumns.add(column); break; case ENABLE_FORWARD_INDEX: ColumnMetadata columnMetadata = createForwardIndexIfNeeded(segmentWriter, column, false); if (columnMetadata.hasDictionary()) { if (!segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException(String.format( "Dictionary should still exist after rebuilding forward index for dictionary column: %s", column)); } } else { if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after rebuilding forward index for raw column: %s", column)); } } break; case DISABLE_DICTIONARY: Set<String> newForwardIndexDisabledColumns = FieldIndexConfigsUtil.columnsWithIndexDisabled(_fieldIndexConfigs.keySet(), StandardIndexes.forward(), _fieldIndexConfigs); if (newForwardIndexDisabledColumns.contains(column)) { removeDictionaryFromForwardIndexDisabledColumn(column, segmentWriter); if (segmentWriter.hasIndexFor(column, StandardIndexes.dictionary())) { throw new IllegalStateException( String.format("Dictionary should not exist after disabling dictionary for column: %s", column)); } } else { disableDictionaryAndCreateRawForwardIndex(column, segmentWriter); } break; case ENABLE_DICTIONARY: createDictBasedForwardIndex(column, segmentWriter); if (!segmentWriter.hasIndexFor(column, StandardIndexes.forward())) { throw new IllegalStateException(String.format("Forward index was not created for column: %s", column)); } break; case CHANGE_INDEX_COMPRESSION_TYPE: rewriteForwardIndexForCompressionChange(column, segmentWriter); break; default: throw new IllegalStateException("Unsupported operation for column " + column); } } } }
@Test public void testEnableDictionaryForMultipleColumns() throws Exception { SegmentMetadataImpl existingSegmentMetadata = new SegmentMetadataImpl(_segmentDirectory); SegmentDirectory segmentLocalFSDirectory = new SegmentLocalFSDirectory(_segmentDirectory, existingSegmentMetadata, ReadMode.mmap); SegmentDirectory.Writer writer = segmentLocalFSDirectory.createWriter(); IndexLoadingConfig indexLoadingConfig = new IndexLoadingConfig(null, _tableConfig); Random rand = new Random(); String col1; do { col1 = _noDictionaryColumns.get(rand.nextInt(_noDictionaryColumns.size())); } while (FORWARD_INDEX_DISABLED_RAW_COLUMNS.contains(col1)); indexLoadingConfig.removeNoDictionaryColumns(col1); String col2; do { col2 = _noDictionaryColumns.get(rand.nextInt(_noDictionaryColumns.size())); } while (FORWARD_INDEX_DISABLED_RAW_COLUMNS.contains(col2) || col2.equals(col1)); indexLoadingConfig.removeNoDictionaryColumns(col2); ForwardIndexHandler fwdIndexHandler = new ForwardIndexHandler(segmentLocalFSDirectory, indexLoadingConfig, _schema); fwdIndexHandler.updateIndices(writer); fwdIndexHandler.postUpdateIndicesCleanup(writer); // Tear down before validation. Because columns.psf and index map cleanup happens at segmentDirectory.close() segmentLocalFSDirectory.close(); // Col1 validation. ColumnMetadata metadata = existingSegmentMetadata.getColumnMetadataFor(col1); testIndexExists(col1, StandardIndexes.forward()); testIndexExists(col1, StandardIndexes.dictionary()); validateIndexMap(col1, true, false); validateForwardIndex(col1, null, metadata.isSorted()); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. int dictionaryElementSize = 0; FieldSpec.DataType dataType = metadata.getDataType(); if (dataType == FieldSpec.DataType.STRING || dataType == FieldSpec.DataType.BYTES) { // This value is based on the rows in createTestData(). dictionaryElementSize = 7; } else if (dataType == FieldSpec.DataType.BIG_DECIMAL) { dictionaryElementSize = 4; } validateMetadataProperties(col1, true, dictionaryElementSize, metadata.getCardinality(), metadata.getTotalDocs(), dataType, metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); // Col2 validation. metadata = existingSegmentMetadata.getColumnMetadataFor(col2); testIndexExists(col2, StandardIndexes.forward()); testIndexExists(col2, StandardIndexes.dictionary()); validateIndexMap(col2, true, false); validateForwardIndex(col2, null, metadata.isSorted()); // In column metadata, nothing other than hasDictionary and dictionaryElementSize should change. dictionaryElementSize = 0; dataType = metadata.getDataType(); if (dataType == FieldSpec.DataType.STRING || dataType == FieldSpec.DataType.BYTES) { // This value is based on the rows in createTestData(). dictionaryElementSize = 7; } else if (dataType == FieldSpec.DataType.BIG_DECIMAL) { dictionaryElementSize = 4; } validateMetadataProperties(col2, true, dictionaryElementSize, metadata.getCardinality(), metadata.getTotalDocs(), dataType, metadata.getFieldType(), metadata.isSorted(), metadata.isSingleValue(), metadata.getMaxNumberOfMultiValues(), metadata.getTotalNumberOfEntries(), metadata.isAutoGenerated(), metadata.getMinValue(), metadata.getMaxValue(), false); }
public Lease acquire() throws Exception { String path = internals.attemptLock(-1, null, null); return makeLease(path); }
@Test public void testThreads() throws Exception { final int THREAD_QTY = 10; Timing timing = new Timing(); CuratorFramework client = CuratorFrameworkFactory.newClient( server.getConnectString(), timing.session(), timing.connection(), new RetryOneTime(1)); client.start(); try { final InterProcessSemaphoreV2 semaphore = new InterProcessSemaphoreV2(client, "/test", 1); ExecutorService service = Executors.newFixedThreadPool(THREAD_QTY); for (int i = 0; i < THREAD_QTY; ++i) { service.submit(new Callable<Object>() { @Override public Object call() throws Exception { Lease lease = semaphore.acquire(); try { Thread.sleep(1); } finally { lease.close(); } return null; } }); } service.shutdown(); assertTrue(service.awaitTermination(10, TimeUnit.SECONDS)); } finally { TestCleanState.closeAndTestClean(client); } }
@Override public void close() throws IOException { delegate.close(); }
@Test public void testClose() throws IOException { CompressionProvider provider = outStream.getCompressionProvider(); ByteArrayOutputStream out = new ByteArrayOutputStream(); outStream = new DummyCompressionOS( out, provider ); outStream.close(); }
@Deprecated(forRemoval=true, since = "13.0") public static String convertTextToUrlEncoded(Object source, MediaType sourceType) { return urlEncode(source, sourceType); }
@Test public void testTextToURLEncodedConversion() throws UnsupportedEncodingException { String source = "They're either a benefit or a hazard. If they're a benefit, it's not my problem."; String result = StandardConversions.convertTextToUrlEncoded(source, TEXT_PLAIN.withCharset(UTF_16)); assertEquals(URLEncoder.encode(source, "UTF-16"), result); }
public static Application fromApplicationPackage(Path path, Networking networking) { return new Application(path, networking, false); }
@Test void athenz_in_deployment_xml() { try (Application application = Application.fromApplicationPackage(new File("src/test/app-packages/athenz-in-deployment-xml/"), Networking.disable)) { // Deployment succeeded } }
@Override public double variance() { return variance; }
@Test public void testVariance() { System.out.println("variance"); KernelDensity instance = new KernelDensity(x); double expResult = 9.404966; double result = instance.variance(); assertEquals(expResult, result, 1E-6); }
@Override public Boolean convert(String source) { return isNotEmpty(source) ? valueOf(source) : null; }
@Test void testConvert() { assertTrue(converter.convert("true")); assertTrue(converter.convert("true")); assertTrue(converter.convert("True")); assertFalse(converter.convert("a")); assertNull(converter.convert("")); assertNull(converter.convert(null)); }
static ParseResult parse(String expression, NameValidator variableValidator) { ParseResult result = new ParseResult(); try { Parser parser = new Parser(new Scanner("ignore", new StringReader(expression))); Java.Atom atom = parser.parseConditionalExpression(); if (parser.peek().type == TokenType.END_OF_INPUT) { result.guessedVariables = new LinkedHashSet<>(); result.operators = new LinkedHashSet<>(); ValueExpressionVisitor visitor = new ValueExpressionVisitor(result, variableValidator); result.ok = atom.accept(visitor); result.invalidMessage = visitor.invalidMessage; } } catch (Exception ex) { } return result; }
@Test public void isValidAndSimpleCondition() { ParseResult result = parse("edge == edge", (arg) -> false); assertFalse(result.ok); result = parse("Math.sqrt(2)", (arg) -> false); assertTrue(result.ok, result.invalidMessage); assertTrue(result.guessedVariables.isEmpty()); result = parse("Math.sqrt(my_speed)", (arg) -> arg.equals("my_speed")); assertTrue(result.ok, result.invalidMessage); assertEquals("[my_speed]", result.guessedVariables.toString()); result = parse("edge.getDistance()", (arg) -> false); assertFalse(result.ok); result = parse("road_class == PRIMARY", (arg) -> false); assertFalse(result.ok); result = parse("toll == Toll.NO", (arg) -> false); assertFalse(result.ok); result = parse("priority * 2", (s) -> s.equals("priority")); assertTrue(result.ok, result.invalidMessage); assertEquals("[priority]", result.guessedVariables.toString()); // LATER but requires accepting also EnumEncodedValue for value expression // result = parse("road_class.ordinal()*2", validVariable); // assertTrue(result.ok, result.invalidMessage); // assertTrue(parse("Math.sqrt(road_class.ordinal())", validVariable).ok); }
public FEELFnResult<String> invoke(@ParameterName("string") String string, @ParameterName("start position") Number start) { return invoke(string, start, null); }
@Test void invokeNull2ParamsMethod() { FunctionTestUtil.assertResultError(substringFunction.invoke((String) null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(substringFunction.invoke("test", null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(substringFunction.invoke(null, 0), InvalidParametersEvent.class); }
@Override protected void processOptions(LinkedList<String> args) throws IOException { CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, OPTION_PATHONLY, OPTION_DIRECTORY, OPTION_HUMAN, OPTION_HIDENONPRINTABLE, OPTION_RECURSIVE, OPTION_REVERSE, OPTION_MTIME, OPTION_SIZE, OPTION_ATIME, OPTION_ECPOLICY); cf.parse(args); pathOnly = cf.getOpt(OPTION_PATHONLY); dirRecurse = !cf.getOpt(OPTION_DIRECTORY); setRecursive(cf.getOpt(OPTION_RECURSIVE) && dirRecurse); humanReadable = cf.getOpt(OPTION_HUMAN); hideNonPrintable = cf.getOpt(OPTION_HIDENONPRINTABLE); orderReverse = cf.getOpt(OPTION_REVERSE); orderTime = cf.getOpt(OPTION_MTIME); orderSize = !orderTime && cf.getOpt(OPTION_SIZE); useAtime = cf.getOpt(OPTION_ATIME); displayECPolicy = cf.getOpt(OPTION_ECPOLICY); if (args.isEmpty()) args.add(Path.CUR_DIR); initialiseOrderComparator(); }
@Test public void processPathDirOrderLengthReverse() throws IOException { TestFile testfile01 = new TestFile("testDirectory", "testFile01"); TestFile testfile02 = new TestFile("testDirectory", "testFile02"); TestFile testfile03 = new TestFile("testDirectory", "testFile03"); TestFile testfile04 = new TestFile("testDirectory", "testFile04"); TestFile testfile05 = new TestFile("testDirectory", "testFile05"); TestFile testfile06 = new TestFile("testDirectory", "testFile06"); // set file length in different order to file names long length = 1234567890; testfile01.setLength(length + 10); testfile02.setLength(length + 30); testfile03.setLength(length + 20); testfile04.setLength(length + 60); testfile05.setLength(length + 50); testfile06.setLength(length + 40); TestFile testDir = new TestFile("", "testDirectory"); testDir.setIsDir(true); testDir.addContents(testfile01, testfile02, testfile03, testfile04, testfile05, testfile06); LinkedList<PathData> pathData = new LinkedList<PathData>(); pathData.add(testDir.getPathData()); PrintStream out = mock(PrintStream.class); Ls ls = new Ls(); ls.out = out; LinkedList<String> options = new LinkedList<String>(); options.add("-S"); options.add("-r"); ls.processOptions(options); String lineFormat = TestFile.computeLineFormat(pathData); ls.processArguments(pathData); InOrder inOrder = inOrder(out); inOrder.verify(out).println("Found 6 items"); inOrder.verify(out).println(testfile01.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile03.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile02.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile06.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile05.formatLineMtime(lineFormat)); inOrder.verify(out).println(testfile04.formatLineMtime(lineFormat)); verifyNoMoreInteractions(out); }
@Override public Snapshot getSnapshot() { return reservoir.getSnapshot(); }
@Test public void returnsTheSnapshotFromTheReservoir() { final Snapshot snapshot = mock(Snapshot.class); when(reservoir.getSnapshot()).thenReturn(snapshot); assertThat(histogram.getSnapshot()) .isEqualTo(snapshot); }
@Override public DirectPipelineResult run(Pipeline pipeline) { try { options = MAPPER .readValue(MAPPER.writeValueAsBytes(options), PipelineOptions.class) .as(DirectOptions.class); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } performRewrites(pipeline); MetricsEnvironment.setMetricsSupported(true); try { DirectGraphVisitor graphVisitor = new DirectGraphVisitor(); pipeline.traverseTopologically(graphVisitor); @SuppressWarnings("rawtypes") KeyedPValueTrackingVisitor keyedPValueVisitor = KeyedPValueTrackingVisitor.create(); pipeline.traverseTopologically(keyedPValueVisitor); DisplayDataValidator.validatePipeline(pipeline); DisplayDataValidator.validateOptions(options); ExecutorService metricsPool = Executors.newCachedThreadPool( new ThreadFactoryBuilder() .setThreadFactory(MoreExecutors.platformThreadFactory()) .setDaemon(false) // otherwise you say you want to leak, please don't! .setNameFormat("direct-metrics-counter-committer") .build()); DirectGraph graph = graphVisitor.getGraph(); EvaluationContext context = EvaluationContext.create( clockSupplier.get(), Enforcement.bundleFactoryFor(enabledEnforcements, graph), graph, keyedPValueVisitor.getKeyedPValues(), metricsPool); TransformEvaluatorRegistry registry = TransformEvaluatorRegistry.javaSdkNativeRegistry(context, options); PipelineExecutor executor = ExecutorServiceParallelExecutor.create( options.getTargetParallelism(), registry, Enforcement.defaultModelEnforcements(enabledEnforcements), context, metricsPool); executor.start(graph, RootProviderRegistry.javaNativeRegistry(context, options)); DirectPipelineResult result = new DirectPipelineResult(executor, context); if (options.isBlockOnRun()) { try { result.waitUntilFinish(); } catch (UserCodeException userException) { throw new PipelineExecutionException(userException.getCause()); } catch (Throwable t) { if (t instanceof RuntimeException) { throw (RuntimeException) t; } throw new RuntimeException(t); } } return result; } finally { MetricsEnvironment.setMetricsSupported(false); } }
@Test public void testUnencodableOutputFromUnboundedRead() { Pipeline p = getPipeline(); p.apply(GenerateSequence.from(0)).setCoder(new LongNoDecodeCoder()); thrown.expectMessage("Cannot decode a long"); p.run(); }
@Cacheable(value = CACHE_LATEST_EXTENSION_VERSION, keyGenerator = GENERATOR_LATEST_EXTENSION_VERSION) public ExtensionVersion getLatest(List<ExtensionVersion> versions, boolean groupedByTargetPlatform) { return getLatest(versions, groupedByTargetPlatform, false); }
@Test public void testGetLatestTargetPlatformSortUniversal() { var version = "1.0.0"; var universal = new ExtensionVersion(); universal.setTargetPlatform(TargetPlatform.NAME_UNIVERSAL); universal.setVersion(version); var linux = new ExtensionVersion(); linux.setTargetPlatform(TargetPlatform.NAME_LINUX_X64); linux.setVersion(version); var windows = new ExtensionVersion(); windows.setTargetPlatform(TargetPlatform.NAME_WIN32_ARM64); windows.setVersion(version); var latest = versions.getLatest(List.of(windows, linux, universal), false); assertEquals(universal, latest); }
@VisibleForTesting public int getAppsFailedKilled() { return numAppsFailedKilled.value(); }
@Test public void testAppsFailedKilled() { long totalBadbefore = metrics.getAppsFailedKilled(); badSubCluster.forceKillApplication(); Assert.assertEquals(totalBadbefore + 1, metrics.getAppsFailedKilled()); }
@Override public InetAddress address(String inetHost, ResolvedAddressTypes resolvedAddressTypes) { return firstAddress(addresses(inetHost, resolvedAddressTypes)); }
@Test public void shouldPickIpv4WhenBothAreDefinedButIpv4IsPreferred() { HostsFileEntriesProvider.Parser parser = givenHostsParserWith( LOCALHOST_V4_ADDRESSES, LOCALHOST_V6_ADDRESSES ); DefaultHostsFileEntriesResolver resolver = new DefaultHostsFileEntriesResolver(parser, ENTRIES_TTL); InetAddress address = resolver.address("localhost", ResolvedAddressTypes.IPV4_PREFERRED); assertThat("Should pick an IPv4 address", address, instanceOf(Inet4Address.class)); }
@Override public Collection<SQLToken> generateSQLTokens(final CreateTableStatementContext sqlStatementContext) { Collection<SQLToken> result = new LinkedList<>(); String tableName = sqlStatementContext.getSqlStatement().getTable().getTableName().getIdentifier().getValue(); EncryptTable encryptTable = encryptRule.getEncryptTable(tableName); List<ColumnDefinitionSegment> columns = new ArrayList<>(sqlStatementContext.getSqlStatement().getColumnDefinitions()); for (int index = 0; index < columns.size(); index++) { ColumnDefinitionSegment each = columns.get(index); String columnName = each.getColumnName().getIdentifier().getValue(); if (encryptTable.isEncryptColumn(columnName)) { result.addAll(getColumnTokens(encryptTable.getEncryptColumn(columnName), each, columns, index)); } } return result; }
@Test void assertGenerateSQLTokens() { Collection<SQLToken> actual = generator.generateSQLTokens(mockCreateTableStatementContext()); assertThat(actual.size(), is(4)); Iterator<SQLToken> actualIterator = actual.iterator(); assertThat(actualIterator.next(), instanceOf(RemoveToken.class)); EncryptColumnToken cipherToken = (EncryptColumnToken) actualIterator.next(); assertThat(cipherToken.toString(), is("cipher_certificate_number VARCHAR(4000)")); assertThat(cipherToken.getStartIndex(), is(79)); assertThat(cipherToken.getStopIndex(), is(78)); EncryptColumnToken assistedToken = (EncryptColumnToken) actualIterator.next(); assertThat(assistedToken.toString(), is(", assisted_certificate_number VARCHAR(4000)")); assertThat(assistedToken.getStartIndex(), is(79)); assertThat(assistedToken.getStopIndex(), is(78)); EncryptColumnToken likeToken = (EncryptColumnToken) actualIterator.next(); assertThat(likeToken.toString(), is(", like_certificate_number VARCHAR(4000)")); assertThat(likeToken.getStartIndex(), is(79)); assertThat(likeToken.getStopIndex(), is(78)); }
private void fail(final ChannelHandlerContext ctx, int length) { fail(ctx, String.valueOf(length)); }
@Test public void testTooLongLineWithFailFastAndEmitLastLine() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new LenientLineBasedFrameDecoder(16, false, true, true)); try { ch.writeInbound(copiedBuffer("12345678901234567", CharsetUtil.US_ASCII)); fail(); } catch (Exception e) { assertThat(e, is(instanceOf(TooLongFrameException.class))); } assertThat(ch.writeInbound(copiedBuffer("890", CharsetUtil.US_ASCII)), is(false)); assertThat(ch.writeInbound(copiedBuffer("123\r\nfirst\r\n", CharsetUtil.US_ASCII)), is(true)); ByteBuf buf = ch.readInbound(); ByteBuf buf2 = copiedBuffer("first\r\n", CharsetUtil.US_ASCII); assertThat(buf, is(buf2)); assertThat(ch.finish(), is(false)); buf.release(); buf2.release(); }
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception { return newGetter(object, parent, modifier, field.getType(), field::get, (t, et) -> new FieldGetter(parent, field, modifier, t, et)); }
@Test public void newFieldGetter_whenExtractingFromNonEmpty_Array_AndReducerSuffixInNotEmpty_thenInferTypeFromCollectionItem() throws Exception { OuterObject object = new OuterObject("name", new InnerObject("inner")); Getter getter = GetterFactory.newFieldGetter(object, null, innersArrayField, "[any]"); Class<?> returnType = getter.getReturnType(); assertEquals(InnerObject.class, returnType); }
@Override public PartialConfig load(File configRepoCheckoutDirectory, PartialConfigLoadContext context) { File[] allFiles = getFiles(configRepoCheckoutDirectory, context); // if context had changed files list then we could parse only new content PartialConfig[] allFragments = parseFiles(allFiles); PartialConfig partialConfig = new PartialConfig(); collectFragments(allFragments, partialConfig); return partialConfig; }
@Test public void shouldLoadDirectoryWithOnePipelineGroup() throws Exception{ GoConfigMother mother = new GoConfigMother(); PipelineConfigs group1 = mother.cruiseConfigWithOnePipelineGroup().getGroups().get(0); helper.addFileWithPipelineGroup("group1.gocd.xml", group1); PartialConfig part = xmlPartialProvider.load(tmpFolder, mock(PartialConfigLoadContext.class)); PipelineConfigs groupRead = part.getGroups().get(0); assertThat(groupRead,is(group1)); assertThat(groupRead.size(),is(group1.size())); assertThat(groupRead.get(0),is(group1.get(0))); }
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date " + "string using the given format pattern. The format pattern should be in the format" + " expected by java.time.format.DateTimeFormatter") public String formatDate( @UdfParameter( description = "The date to convert") final Date date, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (date == null || formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalDate.ofEpochDay(TimeUnit.MILLISECONDS.toDays(date.getTime())).format(formatter); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format date " + date + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldThrowIfFormatInvalid() { // When: final Exception e = assertThrows( KsqlFunctionException.class, () -> udf.formatDate(Date.valueOf("2014-11-09"), "invalid") ); // Then: assertThat(e.getMessage(), containsString("Failed to format date 2014-11-09 with formatter 'invalid'")); }
@Override public boolean isReadOnly() { return false; }
@Test void assertIsReadOnly() { assertFalse(connection.isReadOnly()); }
public static Thread daemonThread(Runnable r, Class<?> context, String description) { return daemonThread(r, "hollow", context, description); }
@Test public void described() { Thread thread = daemonThread(() -> {}, getClass(), "howdy"); assertEquals("hollow | ThreadsTest | howdy", thread.getName()); assertTrue(thread.isDaemon()); }
public boolean isMatch(Object[] row) { return _rowMatcher.isMatch(row); }
@Test public void testHavingFilter() { // Simple having { QueryContext queryContext = QueryContextConverterUtils .getQueryContext("SELECT COUNT(*) FROM testTable GROUP BY d1 HAVING COUNT(*) > 5"); DataSchema dataSchema = new DataSchema(new String[]{"d1", "count(*)"}, new ColumnDataType[]{ColumnDataType.INT, ColumnDataType.LONG}); PostAggregationHandler postAggregationHandler = new PostAggregationHandler(queryContext, dataSchema); HavingFilterHandler havingFilterHandler = new HavingFilterHandler(queryContext.getHavingFilter(), postAggregationHandler, false); assertFalse(havingFilterHandler.isMatch(new Object[]{1, 5L})); assertTrue(havingFilterHandler.isMatch(new Object[]{2, 10L})); assertFalse(havingFilterHandler.isMatch(new Object[]{3, 3L})); } // Nested having { QueryContext queryContext = QueryContextConverterUtils.getQueryContext( "SELECT MAX(m1), MIN(m1) FROM testTable GROUP BY d1 HAVING MAX(m1) IN (15, 20, 25) AND (MIN(m1) > 10 OR MIN" + "(m1) <= 3)"); DataSchema dataSchema = new DataSchema(new String[]{"d1", "max(m1)", "min(m1)"}, new ColumnDataType[]{ColumnDataType.INT, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); PostAggregationHandler postAggregationHandler = new PostAggregationHandler(queryContext, dataSchema); HavingFilterHandler havingFilterHandler = new HavingFilterHandler(queryContext.getHavingFilter(), postAggregationHandler, false); assertFalse(havingFilterHandler.isMatch(new Object[]{1, 15.5, 13.0})); assertTrue(havingFilterHandler.isMatch(new Object[]{2, 15.0, 3.0})); assertFalse(havingFilterHandler.isMatch(new Object[]{3, 20.0, 7.5})); } // Having with post-aggregation { QueryContext queryContext = QueryContextConverterUtils .getQueryContext("SELECT MAX(m1), MIN(m2) FROM testTable GROUP BY d1 HAVING MAX(m1) > MIN(m2) * 2"); DataSchema dataSchema = new DataSchema(new String[]{"d1", "max(m1)", "min(m2)"}, new ColumnDataType[]{ColumnDataType.INT, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); PostAggregationHandler postAggregationHandler = new PostAggregationHandler(queryContext, dataSchema); HavingFilterHandler havingFilterHandler = new HavingFilterHandler(queryContext.getHavingFilter(), postAggregationHandler, false); assertFalse(havingFilterHandler.isMatch(new Object[]{1, 15.5, 13.0})); assertTrue(havingFilterHandler.isMatch(new Object[]{2, 15.0, 3.0})); assertFalse(havingFilterHandler.isMatch(new Object[]{3, 20.0, 10.0})); } // Having with all data types { QueryContext queryContext = QueryContextConverterUtils.getQueryContext( "SELECT COUNT(*) FROM testTable GROUP BY d1, d2, d3, d4, d5, d6 HAVING d1 > 10 AND d2 > 10 AND d3 > 10 AND " + "d4 > 10 AND d5 > 10 AND d6 > 10"); DataSchema dataSchema = new DataSchema(new String[]{"d1", "d2", "d3", "d4", "d5", "d6", "count(*)"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.FLOAT, ColumnDataType.DOUBLE, ColumnDataType.STRING, ColumnDataType.BYTES, ColumnDataType.LONG }); PostAggregationHandler postAggregationHandler = new PostAggregationHandler(queryContext, dataSchema); HavingFilterHandler havingFilterHandler = new HavingFilterHandler(queryContext.getHavingFilter(), postAggregationHandler, false); assertTrue(havingFilterHandler.isMatch(new Object[]{11, 11L, 10.5f, 10.5, "11", new byte[]{17}, 5})); assertFalse(havingFilterHandler.isMatch(new Object[]{10, 11L, 10.5f, 10.5, "11", new byte[]{17}, 5})); assertFalse(havingFilterHandler.isMatch(new Object[]{11, 10L, 10.5f, 10.5, "11", new byte[]{17}, 5})); assertFalse(havingFilterHandler.isMatch(new Object[]{11, 11L, 10.0f, 10.5, "11", new byte[]{17}, 5})); assertFalse(havingFilterHandler.isMatch(new Object[]{11, 11L, 10.5f, 10.0, "11", new byte[]{17}, 5})); assertFalse(havingFilterHandler.isMatch(new Object[]{11, 11L, 10.5f, 10.5, "10", new byte[]{17}, 5})); assertFalse(havingFilterHandler.isMatch(new Object[]{11, 11L, 10.5f, 10.5, "11", new byte[]{16}, 5})); } }
public static Map<String, DATA_TYPE> getTargetFieldsTypeMap(final List<Field<?>> fields, final Model model) { Map<String, DATA_TYPE> toReturn = new LinkedHashMap<>(); if (model.getMiningSchema() != null && model.getMiningSchema().getMiningFields() != null) { for (MiningField miningField : model.getMiningSchema().getMiningFields()) { if (MiningField.UsageType.TARGET.equals(miningField.getUsageType()) || MiningField.UsageType.PREDICTED.equals(miningField.getUsageType())) { toReturn.put(miningField.getName(), getDATA_TYPE(fields,miningField.getName())); } } } return toReturn; }
@Test void getTargetFieldsTypeMapWithoutTargetFieldsWithoutTargets() { final Model model = new RegressionModel(); final DataDictionary dataDictionary = new DataDictionary(); final MiningSchema miningSchema = new MiningSchema(); IntStream.range(0, 3).forEach(i -> { final DataField dataField = getRandomDataField(); dataDictionary.addDataFields(dataField); final MiningField miningField = getMiningField(dataField.getName(), MiningField.UsageType.ACTIVE); miningSchema.addMiningFields(miningField); }); model.setMiningSchema(miningSchema); Map<String, DATA_TYPE> retrieved = org.kie.pmml.compiler.api.utils.ModelUtils.getTargetFieldsTypeMap(getFieldsFromDataDictionary(dataDictionary), model); assertThat(retrieved).isNotNull(); assertThat(retrieved).isEmpty(); }
@CheckForNull @Override public Set<Path> branchChangedFiles(String targetBranchName, Path rootBaseDir) { return Optional.ofNullable((branchChangedFilesWithFileMovementDetection(targetBranchName, rootBaseDir))) .map(GitScmProvider::extractAbsoluteFilePaths) .orElse(null); }
@Test public void branchChangedFiles_falls_back_to_upstream_ref() throws IOException, GitAPIException { git.branchCreate().setName("b1").call(); git.checkout().setName("b1").call(); createAndCommitFile("file-b1"); Path worktree2 = temp.newFolder().toPath(); Git.cloneRepository() .setURI(worktree.toString()) .setRemote("upstream") .setDirectory(worktree2.toFile()) .call(); assertThat(newScmProvider().branchChangedFiles("master", worktree2)) .containsOnly(worktree2.resolve("file-b1")); verifyNoInteractions(analysisWarnings); }
public Map<String, String> getEnv() { Map<String, String> env = new HashMap<>(); env.putAll(cleanEnvironment(this.env)); env.putAll(additionalVariables); return Collections.unmodifiableMap(env); }
@Test void testFiltering() { final Environment env = new Environment(Map.of("USER", "test", "JAVA_HOME", "/path/to/jre")); Assertions.assertThat(env.getEnv()) .doesNotContainKey("JAVA_HOME") .containsKey("USER"); }
@Override public boolean support(AnnotatedElement annotatedEle) { return ObjectUtil.isNotNull(annotatedEle); }
@Test public void supportTest() { final ElementAnnotationScanner scanner = new ElementAnnotationScanner(); assertTrue(scanner.support(ReflectUtil.getField(FieldAnnotationScannerTest.Example.class, "id"))); assertTrue(scanner.support(ReflectUtil.getMethod(FieldAnnotationScannerTest.Example.class, "getId"))); assertFalse(scanner.support(null)); assertTrue(scanner.support(FieldAnnotationScannerTest.Example.class)); }
boolean createNonExistentDirs(FileContext localFs, FsPermission perm) { boolean failed = false; List<String> localDirectories = null; this.readLock.lock(); try { localDirectories = new ArrayList<>(localDirs); } finally { this.readLock.unlock(); } for (final String dir : localDirectories) { try { createDir(localFs, new Path(dir), perm); } catch (IOException e) { LOG.warn("Unable to create directory " + dir + " error " + e.getMessage() + ", removing from the list of valid directories."); this.writeLock.lock(); try { localDirs.remove(dir); errorDirs.add(dir); directoryErrorInfo.put(dir, new DiskErrorInformation(DiskErrorCause.OTHER, "Cannot create directory : " + dir + ", error " + e.getMessage())); numFailures++; } finally { this.writeLock.unlock(); } failed = true; } } return !failed; }
@Test public void testCreateDirectories() throws IOException { conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077"); String dirA = new File(testDir, "dirA").getPath(); String dirB = new File(dirA, "dirB").getPath(); String dirC = new File(testDir, "dirC").getPath(); Path pathC = new Path(dirC); FsPermission permDirC = new FsPermission((short)0710); localFs.mkdir(pathC, null, true); localFs.setPermission(pathC, permDirC); String[] dirs = { dirA, dirB, dirC }; DirectoryCollection dc = new DirectoryCollection(dirs, conf.getFloat( YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE, YarnConfiguration.DEFAULT_NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE)); FsPermission defaultPerm = FsPermission.getDefault() .applyUMask(new FsPermission((short)FsPermission.DEFAULT_UMASK)); boolean createResult = dc.createNonExistentDirs(localFs, defaultPerm); Assert.assertTrue(createResult); FileStatus status = localFs.getFileStatus(new Path(dirA)); Assert.assertEquals("local dir parent not created with proper permissions", defaultPerm, status.getPermission()); status = localFs.getFileStatus(new Path(dirB)); Assert.assertEquals("local dir not created with proper permissions", defaultPerm, status.getPermission()); status = localFs.getFileStatus(pathC); Assert.assertEquals("existing local directory permissions modified", permDirC, status.getPermission()); }
@Override public <T> UncommittedBundle<T> createBundle(PCollection<T> output) { if (Enforcement.IMMUTABILITY.appliesTo(output, graph)) { return new ImmutabilityEnforcingBundle<>(underlying.createBundle(output)); } return underlying.createBundle(output); }
@Test public void mutationAfterAddCreateBundleThrows() { UncommittedBundle<byte[]> intermediate = factory.createBundle(transformed); byte[] array = new byte[] {4, 8, 12}; WindowedValue<byte[]> windowedArray = WindowedValue.of( array, new Instant(891L), new IntervalWindow(new Instant(0), new Instant(1000)), PaneInfo.ON_TIME_AND_ONLY_FIRING); intermediate.add(windowedArray); array[2] = -3; thrown.expect(IllegalMutationException.class); thrown.expectMessage("Values must not be mutated in any way after being output"); intermediate.commit(Instant.now()); }
public static Path extractPath(Graph graph, Weighting weighting, SPTEntry fwdEntry, SPTEntry bwdEntry, double weight) { return new DefaultBidirPathExtractor(graph, weighting).extract(fwdEntry, bwdEntry, weight); }
@Test public void testExtract2() { // 1->2->3 Graph graph = createGraph(); graph.edge(1, 2).setDistance(10).set(speedEnc, 10, 0); graph.edge(2, 3).setDistance(20).set(speedEnc, 10, 0); // add some turn costs at node 2 where fwd&bwd searches meet. these costs have to be included in the // weight and the time of the path TurnCostStorage turnCostStorage = graph.getTurnCostStorage(); turnCostStorage.set(turnCostEnc, 0, 2, 1, 5); SPTEntry fwdEntry = new SPTEntry(0, 2, 0.6, new SPTEntry(1, 0)); SPTEntry bwdEntry = new SPTEntry(1, 2, 1.2, new SPTEntry(3, 0)); Path p = DefaultBidirPathExtractor.extractPath(graph, new SpeedWeighting(speedEnc, turnCostEnc, turnCostStorage, Double.POSITIVE_INFINITY), fwdEntry, bwdEntry, 0); p.setWeight(5 + 3); assertEquals(IntArrayList.from(1, 2, 3), p.calcNodes()); assertEquals(30, p.getDistance(), 1e-4); assertEquals(8, p.getWeight(), 1e-4); assertEquals(8000, p.getTime(), 1.e-6); }
public synchronized T get() { long now = clock.millis(); if (cachedInstance == null || (now - lastRefreshTimestamp) >= cacheDurationMillis) { cachedInstance = supplier.get(); lastRefreshTimestamp = now; } return cachedInstance; }
@Test public void testCache() { AtomicLong currentTime = new AtomicLong(0); Clock clock = mock(Clock.class); when(clock.millis()).then(invocation -> currentTime.longValue()); AtomicInteger currentValue = new AtomicInteger(0); Supplier<Integer> cache = new ObjectCache<>(() -> currentValue.getAndIncrement(), 10, TimeUnit.MILLISECONDS, clock); cache.get(); assertEquals(cache.get().intValue(), 0); assertEquals(cache.get().intValue(), 0); currentTime.set(1); // Still the value has not expired assertEquals(cache.get().intValue(), 0); currentTime.set(10); assertEquals(cache.get().intValue(), 1); currentTime.set(15); assertEquals(cache.get().intValue(), 1); currentTime.set(22); assertEquals(cache.get().intValue(), 2); }
private void updateConfig() { DefaultDhcpRelayConfig defaultConfig = cfgService.getConfig(appId, DefaultDhcpRelayConfig.class); IndirectDhcpRelayConfig indirectConfig = cfgService.getConfig(appId, IndirectDhcpRelayConfig.class); IgnoreDhcpConfig ignoreDhcpConfig = cfgService.getConfig(appId, IgnoreDhcpConfig.class); HostAutoRelearnConfig hostAutoRelearnConfig = cfgService.getConfig(appId, HostAutoRelearnConfig.class); if (defaultConfig != null) { updateConfig(defaultConfig); } if (indirectConfig != null) { updateConfig(indirectConfig); } if (ignoreDhcpConfig != null) { updateConfig(ignoreDhcpConfig); } if (hostAutoRelearnConfig != null) { updateConfig(hostAutoRelearnConfig); } }
@Test public void testIgnoreUnknownDevice() throws IOException { reset(manager.deviceService); Device device = createNiceMock(Device.class); expect(device.is(Pipeliner.class)).andReturn(true).anyTimes(); expect(manager.deviceService.getDevice(DEV_1_ID)).andReturn(device).anyTimes(); expect(manager.deviceService.getDevice(DEV_2_ID)).andReturn(null).anyTimes(); ObjectMapper om = new ObjectMapper(); JsonNode json = om.readTree(Resources.getResource(CONFIG_FILE_PATH)); IgnoreDhcpConfig config = new IgnoreDhcpConfig(); json = json.path("apps").path(DHCP_RELAY_APP).path(IgnoreDhcpConfig.KEY); config.init(APP_ID, IgnoreDhcpConfig.KEY, json, om, null); Capture<Objective> capturedFromDev1 = newCapture(CaptureType.ALL); flowObjectiveService.apply(eq(DEV_1_ID), capture(capturedFromDev1)); expectLastCall().times(DHCP_SELECTORS.size()); replay(flowObjectiveService, manager.deviceService, device); manager.updateConfig(config); capturedFromDev1.getValues().forEach(obj -> obj.context().ifPresent(ctx -> ctx.onSuccess(obj))); assertEquals(1, v4Handler.ignoredVlans.size()); assertEquals(1, v6Handler.ignoredVlans.size()); }
@Override public String getFullName(){ return getFullName(organization, job); }
@Test public void testWorkflowPipieline() throws Exception { Job job = j.createProject(WorkflowJob.class, "multibranch"); login(); Assert.assertEquals( get("/organizations/jenkins/pipelines/" + job.getFullName() + "/").get("disabled"), false ); put("/organizations/jenkins/pipelines/" + job.getFullName() + "/disable", "{}"); Assert.assertEquals( get("/organizations/jenkins/pipelines/" + job.getFullName() + "/").get("disabled"), true ); put("/organizations/jenkins/pipelines/" + job.getFullName() + "/enable", "{}"); Assert.assertEquals( get("/organizations/jenkins/pipelines/" + job.getFullName() + "/").get("disabled"), false ); }
@GetMapping("/api/v1/meetings/{uuid}/attendees/me/schedules") public MomoApiResponse<AttendeeScheduleResponse> findMySchedule(@PathVariable String uuid, @AuthAttendee long id) { AttendeeScheduleResponse response = scheduleService.findMySchedule(uuid, id); return new MomoApiResponse<>(response); }
@DisplayName("UUID와 참가자 ID로 자신의 스케줄을 조회한다.") @Test void findMySchedule() { AttendeeLoginRequest loginRequest = new AttendeeLoginRequest(attendee.name(), attendee.password()); createAttendeeSchedule(attendee); String token = RestAssured.given().log().all() .contentType(ContentType.JSON) .body(loginRequest) .when().post("/api/v1/meetings/{uuid}/login", meeting.getUuid()) .then().log().all() .statusCode(HttpStatus.OK.value()) .extract().cookie("ACCESS_TOKEN"); RestAssured.given().log().all() .cookie("ACCESS_TOKEN", token) .pathParam("uuid", meeting.getUuid()) .contentType(ContentType.JSON) .when().get("/api/v1/meetings/{uuid}/attendees/me/schedules") .then().log().all() .statusCode(HttpStatus.OK.value()); }
@Override public boolean isReady(PCollectionView<?> sideInput, BoundedWindow window) { Set<BoundedWindow> readyWindows = stateInternals.state(StateNamespaces.global(), availableWindowsTags.get(sideInput)).read(); return readyWindows != null && readyWindows.contains(window); }
@Test public void testIsReady() { long view1WindowSize = 100; PCollection<String> pc = Pipeline.create().apply(Create.of("1")); PCollectionView<Iterable<String>> view1 = pc.apply(Window.into(FixedWindows.of(Duration.millis(view1WindowSize)))) .apply(View.asIterable()); // Unused, just to have a non-trivial handler set up PCollectionView<Iterable<String>> view2 = pc.apply(View.asIterable()); SideInputHandler sideInputHandler = new SideInputHandler( ImmutableList.of(view1, view2), InMemoryStateInternals.<Void>forKey(null)); // Adjacent fixed windows IntervalWindow firstWindow = new IntervalWindow(new Instant(0), new Instant(view1WindowSize)); IntervalWindow secondWindow = new IntervalWindow(new Instant(view1WindowSize), new Instant(view1WindowSize * 2)); // side input should not yet be ready in first window assertFalse(sideInputHandler.isReady(view1, firstWindow)); // add a value for view1 sideInputHandler.addSideInputValue( view1, valuesInWindow( materializeValuesFor(view1.getPipeline().getOptions(), View.asIterable(), "Hello"), new Instant(0), firstWindow)); // now side input should be ready in first window assertTrue(sideInputHandler.isReady(view1, firstWindow)); // second window input should still not be ready assertFalse(sideInputHandler.isReady(view1, secondWindow)); }
private int getNodeId() { int nodeId = getNodeId(System.nanoTime()); assert nodeId > 0 || nodeId == NODE_ID_OUT_OF_RANGE : "getNodeId() returned invalid value: " + nodeId; return nodeId; }
@Test public void test_positiveNodeIdOffset() { int nodeIdOffset = 5; int memberListJoinVersion = 20; initialize(new FlakeIdGeneratorConfig().setNodeIdOffset(nodeIdOffset)); when(clusterService.getMemberListJoinVersion()).thenReturn(memberListJoinVersion); assertEquals((memberListJoinVersion + nodeIdOffset), gen.getNodeId(0)); }
@Override public boolean archive(String fileName, byte[] data) { checkArgument(!Strings.isNullOrEmpty(fileName)); checkNotNull(data); try { logger.atInfo().log("Archiving data to file system with filename '%s'.", fileName); Files.asByteSink(new File(fileName)).write(data); return true; } catch (IOException e) { logger.atWarning().withCause(e).log("Failed archiving data to file '%s'.", fileName); return false; } }
@Test public void archive_whenValidTargetFileAndCharSequenceData_archivesGivenDataWithGivenName() throws IOException { File tempFile = temporaryFolder.newFile(); String data = "file data"; RawFileArchiver rawFileArchiver = new RawFileArchiver(); assertThat(rawFileArchiver.archive(tempFile.getAbsolutePath(), data)).isTrue(); assertThat(Files.asCharSource(tempFile, UTF_8).read()).isEqualTo(data); }
public Analysis analyze(Statement statement) { return analyze(statement, false); }
@Test public void testCreateSchema() { analyze("CREATE SCHEMA test"); analyze("CREATE SCHEMA test WITH (p1 = 'p1')"); assertFails(MISSING_ATTRIBUTE, ".*'y' cannot be resolved", "CREATE SCHEMA test WITH (p1 = y)"); assertFails(DUPLICATE_PROPERTY, ".* Duplicate property: p1", "CREATE SCHEMA test WITH (p1 = 'p1', p2 = 'p2', p1 = 'p3')"); assertFails(DUPLICATE_PROPERTY, ".* Duplicate property: p1", "CREATE SCHEMA test WITH (p1 = 'p1', \"p1\" = 'p2')"); }
public static Upstream selector(final List<Upstream> upstreamList, final String algorithm, final String ip) { LoadBalancer loadBalance = ExtensionLoader.getExtensionLoader(LoadBalancer.class).getJoin(algorithm); return loadBalance.select(upstreamList, ip); }
@Test public void loadBalanceUtilsDisOrderedWeightTest() { List<Upstream> upstreamList = Stream.of(70, 10, 20) .map(weight -> Upstream.builder() .url("upstream-" + weight) .weight(weight) .build()) .collect(Collectors.toList()); Map<String, Integer> countMap = new HashMap<>(); IntStream.range(0, 120).forEach(i -> { Upstream result = LoadBalancerFactory.selector(upstreamList, LoadBalanceEnum.ROUND_ROBIN.getName(), ""); int count = countMap.getOrDefault(result.getUrl(), 0); countMap.put(result.getUrl(), ++count); }); assertEquals(12, countMap.get("upstream-10").intValue()); }
@Override @MethodNotAvailable public CompletionStage<Boolean> putIfAbsentAsync(K key, V value) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testPutIfAbsentAsync() { adapter.putIfAbsentAsync(23, "value"); }
public static KsqlAggregateFunction<?, ?, ?> resolveAggregateFunction( final FunctionRegistry functionRegistry, final FunctionCall functionCall, final LogicalSchema schema, final KsqlConfig config ) { try { final ExpressionTypeManager expressionTypeManager = new ExpressionTypeManager(schema, functionRegistry); final List<SqlType> args = functionCall.getArguments().stream() .map(expressionTypeManager::getExpressionSqlType) .collect(Collectors.toList()); final AggregateFunctionFactory.FunctionSource func = functionRegistry .getAggregateFactory(functionCall.getName()) .getFunction(args); final int totalArgs = functionCall.getArguments().size(); // All non-constant UDAF arguments must be column references final List<Integer> argIndices = functionCall.getArguments().stream() .limit(totalArgs - func.initArgs) .map((arg) -> { final Optional<Column> column; if (arg instanceof UnqualifiedColumnReferenceExp) { final UnqualifiedColumnReferenceExp colRef = (UnqualifiedColumnReferenceExp) arg; column = schema.findValueColumn(colRef.getColumnName()); } else { // assume that it is a column reference with no alias column = schema.findValueColumn(ColumnName.of(arg.toString())); } return column.orElseThrow( () -> new KsqlException("Could not find column for expression: " + arg) ); }).map(Column::index).collect(Collectors.toList()); return func.source.apply(createAggregateFunctionInitArgs( func.initArgs, argIndices, functionCall, config )); } catch (final Exception e) { throw new KsqlException("Failed to create aggregate function: " + functionCall, e); } }
@Test public void shouldThrowIfSecondParamIsColArgAndNotACol() { // Given: when(functionCall.getArguments()).thenReturn(ImmutableList.of( new UnqualifiedColumnReferenceExp(ColumnName.of("FOO")), new StringLiteral("Not good!"), new StringLiteral("No issue here") )); // When: final Exception e = assertThrows( KsqlException.class, () -> UdafUtil.resolveAggregateFunction( functionRegistry, functionCall, SCHEMA, KsqlConfig.empty() ) ); // Then: assertThat(e.getMessage(), is("Failed to create aggregate function: functionCall")); }
@Override public long getLength() { return mFileSize; }
@Test public void getLength() { Assert.assertEquals(mPagedFileReader.getLength(), mFileLen); }
@Override public RouteContext route(final RouteContext routeContext, final BroadcastRule broadcastRule) { for (String each : broadcastRule.getDataSourceNames()) { routeContext.getRouteUnits().add(new RouteUnit(new RouteMapper(each, each), Collections.emptyList())); } return routeContext; }
@Test void assertRoute() { BroadcastRule broadcastRule = mock(BroadcastRule.class); when(broadcastRule.getDataSourceNames()).thenReturn(Arrays.asList("ds_0", "ds_1")); BroadcastDatabaseBroadcastRoutingEngine engine = new BroadcastDatabaseBroadcastRoutingEngine(); RouteContext routeContext = engine.route(new RouteContext(), broadcastRule); assertThat(routeContext.getRouteUnits().size(), is(2)); Iterator<RouteUnit> iterator = routeContext.getRouteUnits().iterator(); assertDataSourceRouteMapper(iterator.next(), "ds_0"); assertDataSourceRouteMapper(iterator.next(), "ds_1"); }
@Override public Optional<ServiceInstance> choose(String serviceName, List<ServiceInstance> instances) { if (instances == null || instances.isEmpty()) { return Optional.empty(); } if (instances.size() == 1) { return Optional.ofNullable(instances.get(0)); } return Optional.ofNullable(doChoose(serviceName, instances)); }
@Test public void choose() { final AbstractLoadbalancer roundRobinLoadbalancer = new RoundRobinLoadbalancer(); final Optional<ServiceInstance> choose = roundRobinLoadbalancer.choose(null, null); Assert.assertFalse(choose.isPresent()); // Test an instance final List<ServiceInstance> serviceInstances = Collections.singletonList(build()); final Optional<ServiceInstance> test = roundRobinLoadbalancer.choose("test", serviceInstances); Assert.assertTrue(test.isPresent()); Assert.assertEquals(test.get(), serviceInstances.get(0)); // Test multiple instances final List<ServiceInstance> serviceInstances1 = Arrays.asList(build(), build()); final Optional<ServiceInstance> instance = roundRobinLoadbalancer.choose("name", serviceInstances1); Assert.assertTrue(instance.isPresent()); Assert.assertTrue(serviceInstances1.contains(instance.get())); }
@Override public JCNewClass inline(Inliner inliner) throws CouldNotResolveImportException { return inliner .maker() .NewClass( (getEnclosingExpression() == null) ? null : getEnclosingExpression().inline(inliner), inliner.<JCExpression>inlineList(getTypeArguments()), getIdentifier().inline(inliner), inliner.<JCExpression>inlineList(getArguments()), (getClassBody() == null) ? null : getClassBody().inline(inliner)); }
@Test public void inline() { ImportPolicy.bind(context, ImportPolicy.IMPORT_TOP_LEVEL); assertInlines( "new String(\"123\")", UNewClass.create(UClassIdent.create("java.lang.String"), ULiteral.stringLit("123"))); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) { return helper.interpret(session, st, context); }
@Test void should_describe_udt_withing_logged_in_keyspace() { // Given String query = "USE live_data;\n" + "DESCRIBE TYPE address;"; final String expected = reformatHtml(readTestResource( "/scalate/DescribeType_live_data_address_within_current_keyspace.html")); // When final InterpreterResult actual = interpreter.interpret(query, intrContext); // Then assertEquals(Code.SUCCESS, actual.code()); assertEquals(expected, reformatHtml(actual.message().get(0).getData())); }
public void registerActivity(@Nonnull final JID userJid) { // Only tracking it for the local cluster node, as those are the only users for which this node will monitor activity anyway mutex.writeLock().lock(); try { if (XMPPServer.getInstance().isLocal(userJid)) { final Map<JID, Set<Occupant>> localOccupants = localOccupantsByNode.get((XMPPServer.getInstance().getNodeID())); if (localOccupants != null) { final Set<Occupant> localOccupantsForUser = localOccupants.get(userJid); if (localOccupantsForUser != null) { localOccupantsForUser.forEach(occupant -> occupant.setLastActive(Instant.now())); } } } else { final Set<Occupant> federatedOccupantsForUser = federatedOccupants.get(userJid); if (federatedOccupantsForUser != null) { federatedOccupantsForUser.forEach(occupant -> occupant.setLastActive(Instant.now())); } } } finally { mutex.writeLock().unlock(); } }
@Test public void testRegisterActivity() throws Exception { // Setup test fixture. final OccupantManager occupantManager = new OccupantManager(mockService); final JID roomJID = new JID("room", mockService.getServiceDomain(), null); final JID userJID = new JID("johndoe", "example.org", null); final String nickname = "John Doe"; final Duration pause = Duration.ofMillis(10); // Execute system under test. occupantManager.occupantJoined(roomJID, userJID, nickname); final Instant start = Instant.now(); Thread.sleep(pause.toMillis()); occupantManager.registerActivity(userJID); // Verify results. final Instant lastActive = occupantManager.getLocalOccupants().iterator().next().getLastActive(); assertTrue(Duration.between(start, lastActive).compareTo(pause) >= 0); }
List<GSBlobIdentifier> getComponentBlobIds(GSFileSystemOptions options) { String temporaryBucketName = BlobUtils.getTemporaryBucketName(finalBlobIdentifier, options); List<GSBlobIdentifier> componentBlobIdentifiers = componentObjectIds.stream() .map( temporaryObjectId -> options.isFileSinkEntropyEnabled() ? BlobUtils.getTemporaryObjectNameWithEntropy( finalBlobIdentifier, temporaryObjectId) : BlobUtils.getTemporaryObjectName( finalBlobIdentifier, temporaryObjectId)) .map( temporaryObjectName -> new GSBlobIdentifier( temporaryBucketName, temporaryObjectName)) .collect(Collectors.toList()); LOGGER.trace( "Resolved component blob identifiers for blob {}: {}", finalBlobIdentifier, componentBlobIdentifiers); return componentBlobIdentifiers; }
@Test public void shouldGetComponentBlobIds() { // configure options, if this test configuration has a temporary bucket name, set it Configuration flinkConfig = new Configuration(); if (temporaryBucketName != null) { flinkConfig.set(GSFileSystemOptions.WRITER_TEMPORARY_BUCKET_NAME, temporaryBucketName); } GSFileSystemOptions options = new GSFileSystemOptions(flinkConfig); GSCommitRecoverable commitRecoverable = new GSCommitRecoverable(blobIdentifier, componentObjectIds); List<GSBlobIdentifier> componentBlobIdentifiers = commitRecoverable.getComponentBlobIds(options); for (int i = 0; i < componentObjectIds.size(); i++) { UUID componentObjectId = componentObjectIds.get(i); GSBlobIdentifier componentBlobIdentifier = componentBlobIdentifiers.get(i); // if a temporary bucket is specified in options, the component blob identifier // should be in this bucket; otherwise, it should be in the bucket with the final blob assertEquals( temporaryBucketName == null ? blobIdentifier.bucketName : temporaryBucketName, componentBlobIdentifier.bucketName); // make sure the name is what is expected String expectedObjectName = String.format( ".inprogress/%s/%s/%s", blobIdentifier.bucketName, blobIdentifier.objectName, componentObjectId); assertEquals(expectedObjectName, componentBlobIdentifier.objectName); } }
public static void stringNotEmptyAndThenExecute(String source, Runnable runnable) { if (StringUtils.isNotEmpty(source)) { try { runnable.run(); } catch (Exception e) { LogUtils.NAMING_LOGGER.error("string not empty and then execute cause an exception.", e); } } }
@Test void testStringNotEmptyAndThenExecuteSuccess() { String word = "run"; Runnable task = Mockito.mock(Runnable.class); TemplateUtils.stringNotEmptyAndThenExecute(word, task); Mockito.verify(task, Mockito.times(1)).run(); }
void removeFactMappingByIndex(int index) { factMappings.remove(index); }
@Test public void removeFactMappingByIndex() { modelDescriptor.addFactMapping(factIdentifier, expressionIdentifier); modelDescriptor.removeFactMappingByIndex(0); assertThatExceptionOfType(IndexOutOfBoundsException.class).isThrownBy(() -> modelDescriptor.getFactMappingByIndex(0)); }
public String generateReducedMetadataString(Connection connection, String serviceEntityId) { EntitiesDescriptor entitiesDescriptor = generateReducedEntitiesDescriptor(connection, serviceEntityId); String xmlString = ""; try { Marshaller out = XMLObjectProviderRegistrySupport.getMarshallerFactory().getMarshaller(entitiesDescriptor); out.marshall(entitiesDescriptor); Element element = entitiesDescriptor.getDOM(); TransformerFactory factory = TransformerFactory.newInstance(); factory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true); Transformer transformer = factory.newTransformer(); StreamResult result = new StreamResult(new StringWriter()); DOMSource source = new DOMSource(element); transformer.transform(source, result); xmlString = Base64.getEncoder().encodeToString(result.getWriter().toString().getBytes()); } catch (MarshallingException | TransformerException e) { LOGGER.error("An error has occurred generating metadata string: {}", e.getMessage()); } return xmlString; }
@Test void generateReducedMetadataString() throws InitializationException { setupParserPool(); String result = metadataRetrieverServiceMock.generateReducedMetadataString(newConnection(SAML_COMBICONNECT, true, true, true), newMetadataRequest().getServiceEntityId()); assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", result); }
@Override public void writeRecord(Tuple2<K, V> record) throws IOException { this.recordWriter.write(record.f0, record.f1); }
@Test void testWriteRecord() throws Exception { OutputFormat<String, Long> dummyOutputFormat = mock(DummyOutputFormat.class); DummyRecordWriter recordWriter = mock(DummyRecordWriter.class); JobConf jobConf = mock(JobConf.class); HadoopOutputFormat<String, Long> outputFormat = new HadoopOutputFormat<>(dummyOutputFormat, jobConf); outputFormat.recordWriter = recordWriter; outputFormat.writeRecord(new Tuple2<>("key", 1L)); verify(recordWriter, times(1)).write(anyString(), anyLong()); }
public static String replaceSpacesWithDelimiter(String content, String delimiter) { List<String> parts = new ArrayList<String>(); Matcher m = Pattern.compile("([^\"]\\S*|\".+?\")\\s*").matcher(content); while (m.find()) { String part = m.group(1); if(part.startsWith("\"") && part.endsWith("\"")) { part = part.replaceAll("^\"|\"$", ""); } parts.add(part); } return String.join(delimiter, parts); }
@Test public void testReplaceSpacesWithDelimiter() { String command = "ls -l \" space\""; String expected = "ls,-l, space"; String actual = ProviderUtils.replaceSpacesWithDelimiter(command, ","); Assert.assertEquals("replaceSpaceWithDelimiter produces unexpected result.", expected, actual); }
public static AuthorizationDoc fromDto(IndexType indexType, IndexPermissions dto) { AuthorizationDoc res = new AuthorizationDoc(indexType, dto.getEntityUuid()); if (dto.isAllowAnyone()) { return res.setAllowAnyone(); } return res.setRestricted(dto.getGroupUuids(), dto.getUserUuids()); }
@Test public void fromDto_ignores_userIds_and_groupUuids_if_allowAnyone_is_true() { IndexPermissions underTest = new IndexPermissions(randomAlphabetic(3), randomAlphabetic(4)); IntStream.range(0, 1 + new Random().nextInt(5)).mapToObj(String::valueOf).forEach(underTest::addUserUuid); IntStream.range(0, 1 + new Random().nextInt(5)).mapToObj(Integer::toString).forEach(underTest::addGroupUuid); underTest.allowAnyone(); AuthorizationDoc doc = AuthorizationDoc.fromDto(IndexType.main(Index.simple("foo"), "bar"), underTest); boolean auth_allowAnyone = doc.getField("auth_allowAnyone"); assertThat(auth_allowAnyone).isTrue(); try { doc.getField("auth_userIds"); fail("should have thrown IllegalStateException"); } catch (IllegalStateException e) { assertThat(e).hasMessage("Field auth_userIds not specified in query options"); } try { doc.getField("auth_groupUuids"); fail("should have thrown IllegalStateException"); } catch (IllegalStateException e) { assertThat(e).hasMessage("Field auth_groupUuids not specified in query options"); } }
public TimelineEntities getEntities( String entityType, NameValuePair primaryFilter, Collection<NameValuePair> secondaryFilter, Long windowStart, Long windowEnd, String fromId, Long fromTs, Long limit, EnumSet<Field> fields, UserGroupInformation callerUGI) throws YarnException, IOException { long startTime = Time.monotonicNow(); metrics.incrGetEntitiesOps(); try { TimelineEntities entities = doGetEntities( entityType, primaryFilter, secondaryFilter, windowStart, windowEnd, fromId, fromTs, limit, fields, callerUGI); metrics.incrGetEntitiesTotal(entities.getEntities().size()); return entities; } finally { metrics.addGetEntitiesTime(Time.monotonicNow() - startTime); } }
@Test void testGetEntitiesAclEnabled() throws Exception { AdminACLsManager oldAdminACLsManager = aclsManager.setAdminACLsManager(adminACLsManager); try { TimelineEntities entities = dataManaer.getEntities( "ACL_ENTITY_TYPE_1", null, null, null, null, null, null, 1L, null, UserGroupInformation.createUserForTesting("owner_1", new String[]{"group1"})); assertEquals(1, entities.getEntities().size()); assertEquals("ACL_ENTITY_ID_11", entities.getEntities().get(0).getEntityId()); } finally { aclsManager.setAdminACLsManager(oldAdminACLsManager); } }
public LoggerContext getLoggerContext() { String contextName = null; Context ctx = null; // First check if ThreadLocal has been set already LoggerContext lc = threadLocal.get(); if (lc != null) { return lc; } try { // We first try to find the name of our // environment's LoggerContext ctx = JNDIUtil.getInitialContext(); contextName = (String) JNDIUtil.lookupString(ctx, JNDI_CONTEXT_NAME); } catch (NamingException ne) { // We can't log here } if (contextName == null) { // We return the default context return defaultContext; } else { // Let's see if we already know such a context LoggerContext loggerContext = synchronizedContextMap.get(contextName); if (loggerContext == null) { // We have to create a new LoggerContext loggerContext = new LoggerContext(); loggerContext.setName(contextName); synchronizedContextMap.put(contextName, loggerContext); URL url = findConfigFileURL(ctx, loggerContext); if (url != null) { configureLoggerContextByURL(loggerContext, url); } else { try { new ContextInitializer(loggerContext).autoConfig(); } catch (JoranException je) { } } // logback-292 if (!StatusUtil.contextHasStatusListener(loggerContext)) StatusPrinter.printInCaseOfErrorsOrWarnings(loggerContext); } return loggerContext; } }
@Test public void defaultContext() { MockInitialContext mic = MockInitialContextFactory.getContext(); mic.map.put(ClassicConstants.JNDI_CONTEXT_NAME, null); ContextJNDISelector selector = (ContextJNDISelector) ContextSelectorStaticBinder.getSingleton() .getContextSelector(); Context context = selector.getLoggerContext(); assertEquals("default", context.getName()); }
@Override public ServletResponse getResponse(javax.servlet.ServletResponse servletResponse) { return new JettyResponse(servletResponse); }
@Test public void shouldGetJettyResponse() { ServletResponse response = new JettyServletHelper().getResponse(mock(Response.class)); assertThat(response instanceof JettyResponse, is(true)); }
public List<Serializable> getAssignedResources(String resourceType) { AssignedResources ar = assignedResourcesMap.get(resourceType); if (null == ar) { return Collections.emptyList(); } return ar.getAssignedResources(); }
@Test public void testSerializeAssignedResourcesWithSerializationUtils() { try { byte[] serializedString = testResources.toBytes(); ResourceMappings.AssignedResources deserialized = ResourceMappings.AssignedResources.fromBytes(serializedString); Assert.assertEquals(testResources.getAssignedResources(), deserialized.getAssignedResources()); } catch (IOException e) { e.printStackTrace(); Assert.fail(String.format("Serialization of test AssignedResources " + "failed with %s", e.getMessage())); } }
public void formatSource(CharSource input, CharSink output) throws FormatterException, IOException { // TODO(cushon): proper support for streaming input/output. Input may // not be feasible (parsing) but output should be easier. output.write(formatSource(input.read())); }
@Test public void blankInClassBody() throws FormatterException { String input = "package test;\nclass T {\n\n}\n"; String output = new Formatter().formatSource(input); String expect = "package test;\n\nclass T {}\n"; assertThat(output).isEqualTo(expect); }
@Override public void abort(OutputBufferId bufferId) { checkState(!Thread.holdsLock(this), "Can not abort while holding a lock on this"); requireNonNull(bufferId, "bufferId is null"); getBuffer(bufferId).destroy(); checkFlushComplete(); }
@Test public void testAbort() { ArbitraryOutputBuffer buffer = createArbitraryBuffer(createInitialEmptyOutputBuffers(ARBITRARY), sizeOfPages(10)); // fill the buffer for (int i = 0; i < 10; i++) { addPage(buffer, createPage(i)); } buffer.setNoMorePages(); // add one output buffer OutputBuffers outputBuffers = createInitialEmptyOutputBuffers(ARBITRARY).withBuffer(FIRST, 0); buffer.setOutputBuffers(outputBuffers); // read a page from the first buffer assertBufferResultEquals(TYPES, getBufferResult(buffer, FIRST, 0, sizeOfPages(1), NO_WAIT), bufferResult(0, createPage(0))); // abort buffer, and verify page cannot be acknowledged buffer.abort(FIRST); assertQueueClosed(buffer, 9, FIRST, 0); assertBufferResultEquals(TYPES, getBufferResult(buffer, FIRST, 1, sizeOfPages(1), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 0, true)); outputBuffers = outputBuffers.withBuffer(SECOND, 0).withNoMoreBufferIds(); buffer.setOutputBuffers(outputBuffers); // first page is lost because the first buffer was aborted assertBufferResultEquals(TYPES, getBufferResult(buffer, SECOND, 0, sizeOfPages(1), NO_WAIT), bufferResult(0, createPage(1))); buffer.abort(SECOND); assertQueueClosed(buffer, 0, SECOND, 0); assertFinished(buffer); assertBufferResultEquals(TYPES, getBufferResult(buffer, SECOND, 1, sizeOfPages(1), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 0, true)); }
public synchronized void allocate(RegularFile file, int count) throws IOException { int newAllocatedBlockCount = allocatedBlockCount + count; if (newAllocatedBlockCount > maxBlockCount) { throw new IOException("out of disk space"); } int newBlocksNeeded = Math.max(count - blockCache.blockCount(), 0); for (int i = 0; i < newBlocksNeeded; i++) { file.addBlock(new byte[blockSize]); } if (newBlocksNeeded != count) { blockCache.transferBlocksTo(file, count - newBlocksNeeded); } allocatedBlockCount = newAllocatedBlockCount; }
@Test public void testFullDisk() throws IOException { HeapDisk disk = new HeapDisk(4, 10, 4); disk.allocate(blocks, 10); try { disk.allocate(blocks, 1); fail(); } catch (IOException expected) { } }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testShhNewIdentity() throws Exception { web3j.shhNewIdentity().send(); verifyResult("{\"jsonrpc\":\"2.0\",\"method\":\"shh_newIdentity\",\"params\":[],\"id\":1}"); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testLastSpdySynReplyFrame() throws Exception { short type = 2; byte flags = 0x01; // FLAG_FIN int length = 4; int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(streamId); decoder.decode(buf); verify(delegate).readSynReplyFrame(streamId, true); verify(delegate).readHeaderBlockEnd(); assertFalse(buf.isReadable()); buf.release(); }
@Override public void updateCouponTemplate(CouponTemplateUpdateReqVO updateReqVO) { // 校验存在 CouponTemplateDO couponTemplate = validateCouponTemplateExists(updateReqVO.getId()); // 校验发放数量不能过小 if (updateReqVO.getTotalCount() < couponTemplate.getTakeCount()) { throw exception(COUPON_TEMPLATE_TOTAL_COUNT_TOO_SMALL, couponTemplate.getTakeCount()); } // 校验商品范围 validateProductScope(updateReqVO.getProductScope(), updateReqVO.getProductScopeValues()); // 更新 CouponTemplateDO updateObj = CouponTemplateConvert.INSTANCE.convert(updateReqVO); couponTemplateMapper.updateById(updateObj); }
@Test public void testUpdateCouponTemplate_notExists() { // 准备参数 CouponTemplateUpdateReqVO reqVO = randomPojo(CouponTemplateUpdateReqVO.class); // 调用, 并断言异常 assertServiceException(() -> couponTemplateService.updateCouponTemplate(reqVO), COUPON_TEMPLATE_NOT_EXISTS); }
protected double getLaunchPower(JsonNode connectivityReply) { double power = -99; if (connectivityReply.has("result") && connectivityReply.get("result").has("response")) { Iterator<JsonNode> elements = connectivityReply.get("result").get("response") .elements().next().get("path-properties").get("path-metric").elements(); Iterable<JsonNode> iterable = () -> elements; List<JsonNode> elementsList = StreamSupport .stream(iterable.spliterator(), false) .collect(Collectors.toList()); for (JsonNode node : elementsList) { if (node.has("metric-type") && node.get("metric-type").asText().equals("reference_power")) { power = node.get("accumulative-value").asDouble(); break; } } } return 10 * log10(power * 1000); }
@Test public void testGetLaunchPower() throws IOException { double power = manager.getLaunchPower(reply); assertEquals(0.0, power); }
private RemotingCommand getUnknownCmdResponse(ChannelHandlerContext ctx, RemotingCommand request) { String error = " request type " + request.getCode() + " not supported"; final RemotingCommand response = RemotingCommand.createResponseCommand(RemotingSysResponseCode.REQUEST_CODE_NOT_SUPPORTED, error); return response; }
@Test public void testGetUnknownCmdResponse() throws RemotingCommandException { RemotingCommand request = RemotingCommand.createRequestCommand(10000, null); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.REQUEST_CODE_NOT_SUPPORTED); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback delete, final ConnectionCallback callback) throws BackgroundException { try { final StoregateApiClient client = session.getClient(); final MoveFileRequest move = new MoveFileRequest() .name(renamed.getName()) .parentID(fileid.getFileId(renamed.getParent())) .mode(1); // Overwrite final HttpEntityEnclosingRequestBase request; request = new HttpPost(String.format("%s/v4.2/files/%s/move", client.getBasePath(), fileid.getFileId(file))); if(status.getLockId() != null) { request.addHeader("X-Lock-Id", status.getLockId().toString()); } request.setEntity(new StringEntity(new JSON().getContext(move.getClass()).writeValueAsString(move), ContentType.create("application/json", StandardCharsets.UTF_8.name()))); request.addHeader(HTTP.CONTENT_TYPE, MEDIA_TYPE); final HttpResponse response = client.getClient().execute(request); try { switch(response.getStatusLine().getStatusCode()) { case HttpStatus.SC_NO_CONTENT: final PathAttributes attr = new PathAttributes(file.attributes()); fileid.cache(file, null); fileid.cache(renamed, attr.getFileId()); return renamed.withAttributes(attr); default: throw new StoregateExceptionMappingService(fileid).map("Cannot rename {0}", new ApiException(response.getStatusLine().getStatusCode(), response.getStatusLine().getReasonPhrase()), file); } } finally { EntityUtils.consume(response.getEntity()); } } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Cannot rename {0}", e, file); } }
@Test(expected = NotfoundException.class) public void testMoveNotFound() throws Exception { final Path room = new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final StoregateIdProvider nodeid = new StoregateIdProvider(session); new StoregateMoveFeature(session, nodeid).move(test, new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); }
int nextPullBatchNums() { return Math.min(clientConfig.getRmqPullMessageBatchNums(), consumeRequestCache.remainingCapacity()); }
@Test public void testNextPullBatchNums() throws Exception { assertThat(localMessageCache.nextPullBatchNums()).isEqualTo(512); for (int i = 0; i < 513; i++) { localMessageCache.submitConsumeRequest(consumeRequest); } assertThat(localMessageCache.nextPullBatchNums()).isEqualTo(511); }
@Override public <V> Map<String, Object> offset(Map<String, V> partition) { return offsets(Collections.singletonList(partition)).get(partition); }
@Test public void should_return_offset_for_existing_partition() { Map<String, String> partition = mapOf("part1", "something"); Map<Map<String, ?>, Map<String, ?>> partitionToOffset = mapOf(partition, mapOf("part1", 123)); State state = new State(partitionToOffset); JetSourceOffsetStorageReader sut = new JetSourceOffsetStorageReader(state); Map<String, Object> offset = sut.offset(partition); assertThat(offset).isEqualTo(mapOf("part1", 123)); }
public void rollback() { try { this.treeMapLock.writeLock().lockInterruptibly(); try { this.msgTreeMap.putAll(this.consumingMsgOrderlyTreeMap); this.consumingMsgOrderlyTreeMap.clear(); } finally { this.treeMapLock.writeLock().unlock(); } } catch (InterruptedException e) { log.error("rollback exception", e); } }
@Test public void testRollback() throws IllegalAccessException { ProcessQueue processQueue = createProcessQueue(); processQueue.rollback(); Field consumingMsgOrderlyTreeMapField = FieldUtils.getDeclaredField(processQueue.getClass(), "consumingMsgOrderlyTreeMap", true); TreeMap<Long, MessageExt> consumingMsgOrderlyTreeMap = (TreeMap<Long, MessageExt>) consumingMsgOrderlyTreeMapField.get(processQueue); assertEquals(0, consumingMsgOrderlyTreeMap.size()); }
public static <E extends Enum<E>> FlagSet<E> createFlagSet( final Class<E> enumClass, final String prefix, final EnumSet<E> flags) { return new FlagSet<>(enumClass, prefix, flags); }
@Test public void testCreateNullPrefix() throws Throwable { intercept(NullPointerException.class, () -> createFlagSet(SimpleEnum.class, null, SimpleEnum.a)); }
@Override @DataPermission(enable = false) // 发送短信时,无需考虑数据权限 public Long sendSingleSmsToAdmin(String mobile, Long userId, String templateCode, Map<String, Object> templateParams) { // 如果 mobile 为空,则加载用户编号对应的手机号 if (StrUtil.isEmpty(mobile)) { AdminUserDO user = adminUserService.getUser(userId); if (user != null) { mobile = user.getMobile(); } } // 执行发送 return sendSingleSms(mobile, userId, UserTypeEnum.ADMIN.getValue(), templateCode, templateParams); }
@Test public void testSendSingleSmsToAdmin() { // 准备参数 Long userId = randomLongId(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock adminUserService 的方法 AdminUserDO user = randomPojo(AdminUserDO.class, o -> o.setMobile("15601691300")); when(adminUserService.getUser(eq(userId))).thenReturn(user); // mock SmsTemplateService 的方法 SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock SmsChannelService 的方法 SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel); // mock SmsLogService 的方法 Long smsLogId = randomLongId(); when(smsLogService.createSmsLog(eq(user.getMobile()), eq(userId), eq(UserTypeEnum.ADMIN.getValue()), eq(Boolean.TRUE), eq(template), eq(content), eq(templateParams))).thenReturn(smsLogId); // 调用 Long resultSmsLogId = smsSendService.sendSingleSmsToAdmin(null, userId, templateCode, templateParams); // 断言 assertEquals(smsLogId, resultSmsLogId); // 断言调用 verify(smsProducer).sendSmsSendMessage(eq(smsLogId), eq(user.getMobile()), eq(template.getChannelId()), eq(template.getApiTemplateId()), eq(Lists.newArrayList(new KeyValue<>("code", "1234"), new KeyValue<>("op", "login")))); }
@Override public HttpResponseOutputStream<Chunk> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final String uploadUri; final String resourceId; if(null == status.getUrl()) { if(status.isExists()) { resourceId = fileid.getFileId(file); uploadUri = EueUploadHelper.updateResource(session, resourceId, status, UploadType.SIMPLE).getUploadURI(); } else { final ResourceCreationResponseEntry uploadResourceCreationResponseEntry = EueUploadHelper .createResource(session, fileid.getFileId(file.getParent()), file.getName(), status, UploadType.SIMPLE); resourceId = EueResourceIdProvider.getResourceIdFromResourceUri(uploadResourceCreationResponseEntry.getHeaders().getLocation()); uploadUri = uploadResourceCreationResponseEntry.getEntity().getUploadURI(); } } else { uploadUri = status.getUrl(); resourceId = status.getParameters().get(RESOURCE_ID); } final HttpResponseOutputStream<Chunk> stream = this.write(file, status, new DelayedHttpEntityCallable<Chunk>(file) { @Override public Chunk call(final HttpEntity entity) throws BackgroundException { try { final HttpResponse response; final StringBuilder uploadUriWithParameters = new StringBuilder(uploadUri); if(!Checksum.NONE.equals(status.getChecksum())) { uploadUriWithParameters.append(String.format("&x_cdash64=%s", new ChunkListSHA256ChecksumCompute().compute(status.getLength(), Hex.decodeHex(status.getChecksum().hash)))); } if(status.getLength() != -1) { uploadUriWithParameters.append(String.format("&x_size=%d", status.getLength())); } if(status.isSegment()) { // Chunked upload from large upload service uploadUriWithParameters.append(String.format("&x_offset=%d", new HostPreferences(session.getHost()).getLong("eue.upload.multipart.size") * (status.getPart() - 1))); final HttpPut request = new HttpPut(uploadUriWithParameters.toString()); request.setEntity(entity); response = session.getClient().execute(request); } else { final HttpPost request = new HttpPost(uploadUriWithParameters.toString()); request.setEntity(entity); request.setHeader(HttpHeaders.CONTENT_TYPE, MimeTypeService.DEFAULT_CONTENT_TYPE); response = session.getClient().execute(request); } try { if(response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) { return new Chunk(resourceId, status.getPart(), status.getLength(), status.getChecksum()); } EntityUtils.updateEntity(response, new BufferedHttpEntity(response.getEntity())); throw new EueExceptionMappingService().map(response); } finally { EntityUtils.consume(response.getEntity()); } } catch(HttpResponseException e) { throw new DefaultHttpResponseExceptionMappingService().map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } catch(DecoderException e) { throw new ChecksumException(LocaleFactory.localizedString("Checksum failure", "Error"), e); } } @Override public long getContentLength() { return status.getLength(); } } ); fileid.cache(file, resourceId); return stream; }
@Test(expected = TransferStatusCanceledException.class) public void testWriteCancel() throws Exception { final EueResourceIdProvider fileid = new EueResourceIdProvider(session); final EueWriteFeature writer = new EueWriteFeature(session, fileid); final byte[] content = RandomUtils.nextBytes(32769); final Path test = new Path(String.format("{%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)); { final BytecountStreamListener count = new BytecountStreamListener(); final TransferStatus status = new TransferStatus() { @Override public void validate() throws ConnectionCanceledException { if(count.getSent() >= 32768) { throw new TransferStatusCanceledException(); } super.validate(); } }; status.setLength(content.length); final StatusOutputStream<EueWriteFeature.Chunk> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).withListener(count).transfer(new ByteArrayInputStream(content), out); assertFalse(new DefaultFindFeature(session).find(test)); try { out.getStatus(); fail(); } catch(TransferCanceledException e) { // } } // Rewrite final TransferStatus status = new TransferStatus(); status.setLength(content.length); final StatusOutputStream<EueWriteFeature.Chunk> out = writer.write(test, status, new DisabledConnectionCallback()); assertNotNull(out); new StreamCopier(status, status).transfer(new ByteArrayInputStream(content), out); assertFalse(new DefaultFindFeature(session).find(test)); new EueDeleteFeature(session, fileid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public void delete(final Map<Path, TransferStatus> files, final PasswordCallback prompt, final Callback callback) throws BackgroundException { for(Path f : files.keySet()) { if(f.isPlaceholder()) { log.warn(String.format("Ignore placeholder %s", f)); continue; } try { if(new SimplePathPredicate(DriveHomeFinderService.SHARED_DRIVES_NAME).test(f.getParent())) { session.getClient().teamdrives().delete(fileid.getFileId(f)).execute(); } else { if(f.attributes().isHidden()) { log.warn(String.format("Delete file %s already in trash", f)); new DriveDeleteFeature(session, fileid).delete(Collections.singletonList(f), prompt, callback); continue; } callback.delete(f); final File properties = new File(); properties.setTrashed(true); session.getClient().files().update(fileid.getFileId(f), properties) .setSupportsAllDrives(new HostPreferences(session.getHost()).getBoolean("googledrive.teamdrive.enable")).execute(); } fileid.cache(f, null); } catch(IOException e) { throw new DriveExceptionMappingService(fileid).map("Cannot delete {0}", e, f); } } }
@Test public void testDeleteFromTrash() throws Exception { final DriveFileIdProvider fileid = new DriveFileIdProvider(session); final Path file = new DriveTouchFeature(session, fileid).touch( new Path(DriveHomeFinderService.MYDRIVE_FOLDER, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final String fileId = file.attributes().getFileId(); new DriveTrashFeature(session, fileid).delete(Collections.singletonList(file), new DisabledPasswordCallback(), new Delete.DisabledCallback()); assertFalse(new DriveFindFeature(session, fileid).find(file)); assertTrue(new DriveFindFeature(session, fileid).find(file.withAttributes(new PathAttributes().withFileId(fileId)))); final PathAttributes attributesInTrash = new DriveAttributesFinderFeature(session, fileid).find(file.withAttributes(new PathAttributes().withFileId(fileId))); assertTrue(attributesInTrash.isHidden()); new DriveTrashFeature(session, fileid).delete(Collections.singletonList(file.withAttributes(attributesInTrash)), new DisabledPasswordCallback(), new Delete.DisabledCallback()); assertFalse(new DriveFindFeature(session, fileid).find(file.withAttributes(attributesInTrash))); }
public void matches(@Nullable String regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that matches", regex); } else if (!actual.matches(regex)) { if (regex.equals(actual)) { failWithoutActual( fact("expected to match", regex), fact("but was", actual), simpleFact("Looks like you want to use .isEqualTo() for an exact equality assertion.")); } else if (Platform.containsMatch(actual, regex)) { failWithoutActual( fact("expected to match", regex), fact("but was", actual), simpleFact("Did you mean to call containsMatch() instead of match()?")); } else { failWithActual("expected to match", regex); } } }
@Test @GwtIncompatible("Pattern") public void stringMatchesPattern() { assertThat("abcaaadev").matches(Pattern.compile(".*aaa.*")); }
@Override public String getMetricIdentifier(String metricName) { return getMetricIdentifier(metricName, CharacterFilter.NO_OP_FILTER); }
@Test void testScopeGenerationWithoutReporters() throws Exception { Configuration config = new Configuration(); config.set(MetricOptions.SCOPE_NAMING_TM, "A.B.C.D"); MetricRegistryImpl testRegistry = new MetricRegistryImpl(MetricRegistryTestUtils.fromConfiguration(config)); try { TaskManagerMetricGroup group = TaskManagerMetricGroup.createTaskManagerMetricGroup( testRegistry, "host", new ResourceID("id")); assertThat(testRegistry.getReporters()) .withFailMessage("MetricReporters list should be empty") .isEmpty(); // default delimiter should be used assertThat(group.getMetricIdentifier("1", FILTER_C)).isEqualTo("A.B.X.D.1"); // no caching should occur assertThat(group.getMetricIdentifier("1", FILTER_B)).isEqualTo("A.X.C.D.1"); // invalid reporter indices do not throw errors assertThat(group.getMetricIdentifier("1", FILTER_B, -1, '.')).isEqualTo("A.X.C.D.1"); assertThat(group.getMetricIdentifier("1", FILTER_B, 2, '.')).isEqualTo("A.X.C.D.1"); } finally { testRegistry.closeAsync().get(); } }
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) { //FIXME this is a total hack but it works around a number of issues related to vertical map //replication and horiztonal replication that can cause polygons to completely disappear when //panning if (pZoom < 3) return true; boolean latMatch = false; boolean lonMatch = false; //vertical wrapping detection if (pBoundingBox.mLatSouth <= mLatNorth && pBoundingBox.mLatSouth >= mLatSouth) latMatch = true; //normal case, non overlapping if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast) lonMatch = true; //normal case, non overlapping if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast) lonMatch = true; //special case for when *this completely surrounds the pBoundbox if (mLonWest <= pBoundingBox.mLonWest && mLonEast >= pBoundingBox.mLonEast && mLatNorth >= pBoundingBox.mLatNorth && mLatSouth <= pBoundingBox.mLatSouth) return true; //normal case, non overlapping if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth) latMatch = true; //normal case, non overlapping if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth) latMatch = true; if (mLonWest > mLonEast) { //the date line is included in the bounding box //we want to match lon from the dateline to the eastern bounds of the box //and the dateline to the western bounds of the box if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest) lonMatch = true; if (mLonWest >= pBoundingBox.mLonEast && mLonEast <= pBoundingBox.mLonEast) { lonMatch = true; if (pBoundingBox.mLonEast < mLonWest && pBoundingBox.mLonWest < mLonWest) lonMatch = false; if (pBoundingBox.mLonEast > mLonEast && pBoundingBox.mLonWest > mLonEast) lonMatch = false; } if (mLonWest >= pBoundingBox.mLonEast && mLonEast >= pBoundingBox.mLonEast) { lonMatch = true; } /* //that is completely within this if (mLonWest>= pBoundingBox.mLonEast && mLonEast<= pBoundingBox.mLonEast) { lonMatch = true; if (pBoundingBox.mLonEast < mLonWest && pBoundingBox.mLonWest < mLonWest) lonMatch = false; if (pBoundingBox.mLonEast > mLonEast && pBoundingBox.mLonWest > mLonEast ) lonMatch = false; } if (mLonWest>= pBoundingBox.mLonEast && mLonEast>= pBoundingBox.mLonEast) { lonMatch = true; }*/ } return latMatch && lonMatch; }
@Test public void testOverlap2() { // ________________ // | | | // | ***** | // |----*-+-*-----| // | ***** | // | | | // ---------------- //box is notated as * not too scale //test area is notated as ? BoundingBox box = new BoundingBox(1, 1, -1, -1); Assert.assertTrue(box.overlaps(box, 4)); // ________________ // | ????? | // | ***** | // |----*-+-*-----| // | ***** | // | | | // ---------------- //box is notated as * not too scale //test area is notated as ? //overlap on the norther edge BoundingBox item = new BoundingBox(2, 1, 1, -1); Assert.assertTrue(box.overlaps(item, 4)); Assert.assertTrue(item.overlaps(box, 4)); // ________________ // | | // | ?***** | // |---?*-+-*-----| // | ?***** | // | | | // ---------------- //box is notated as * not too scale //test area is notated as ? //overlap on the western edge of box item = new BoundingBox(1, -1, -1, -2); Assert.assertTrue(box.overlaps(item, 4)); // ________________ // | | // | *****? | // |--- *-+-*?----| // | *****? | // | | | // ---------------- //box is notated as * not too scale //test area is notated as ? //overlap on the east edge of box item = new BoundingBox(1, 2, -1, 1.0); Assert.assertTrue(box.overlaps(item, 4)); // ________________ // | | // | ***** | // |--- *-+-*-----| // | ***** | // | ????? | // ---------------- //box is notated as * not too scale //test area is notated as ? //overlap on the southern edge of box item = new BoundingBox(-1, 1, -2, -1); Assert.assertTrue(box.overlaps(item, 4)); // ________________ // | | // | ***** | // |--- *-+-*-----| // | ***** | // | | | // | ????? | // ---------------- //box is notated as * not too scale //test area is notated as ? //non overlap on the southern edge of box item = new BoundingBox(-2, 1, -4, -1); Assert.assertTrue(box.overlaps(item, 4)); }