focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void handleReceive(HttpClientResponse response, Span span) { handleFinish(response, span); }
@Test void handleReceive_responseRequired() { brave.Span span = mock(brave.Span.class); assertThatThrownBy(() -> handler.handleReceive(null, span)) .isInstanceOf(NullPointerException.class) .hasMessage("response == null"); }
@Udf(description = "Returns the INT base raised to the INT exponent.") public Double power( @UdfParameter( value = "base", description = "the base of the power." ) final Integer base, @UdfParameter( value = "exponent", description = "the exponent of the power." ) final Integer exponent ) { return power( base == null ? null : base.doubleValue(), exponent == null ? null : exponent.doubleValue() ); }
@Test public void shouldHandleZeroBase() { assertThat(udf.power(0, 13), closeTo(0.0, 0.000000000000001)); assertThat(udf.power(0L, 13L), closeTo(0.0, 0.000000000000001)); assertThat(udf.power(0.0, 13.0), closeTo(0.0, 0.000000000000001)); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadIgnoresFromGitPartial() throws Exception { String gitPartial = """ <git url='file:///tmp/testGitRepo/project1' > <filter> <ignore pattern='x'/> </filter> </git>"""; GitMaterialConfig gitMaterial = xmlLoader.fromXmlPartial(gitPartial, GitMaterialConfig.class); assertThat(gitMaterial.getBranch()).isEqualTo(GitMaterialConfig.DEFAULT_BRANCH); Filter parsedFilter = gitMaterial.filter(); Filter expectedFilter = new Filter(); expectedFilter.add(new IgnoredFiles("x")); assertThat(parsedFilter).isEqualTo(expectedFilter); }
public static String getBaseUrl() { try { var requestAttrs = (ServletRequestAttributes) RequestContextHolder.currentRequestAttributes(); return getBaseUrl(requestAttrs.getRequest()); } catch (IllegalStateException e) { // method is called outside of web request context return ""; } }
@Test public void testWithoutXForwarded() throws Exception { doReturn("http").when(request).getScheme(); doReturn("localhost").when(request).getServerName(); doReturn(8080).when(request).getServerPort(); doReturn("/").when(request).getContextPath(); assertThat(UrlUtil.getBaseUrl(request)).isEqualTo("http://localhost:8080/"); }
public void addOtherTesseractConfig(String key, String value) { if (key == null) { throw new IllegalArgumentException("key must not be null"); } if (value == null) { throw new IllegalArgumentException("value must not be null"); } Matcher m = ALLOWABLE_OTHER_PARAMS_PATTERN.matcher(key); if (!m.find()) { throw new IllegalArgumentException("Key contains illegal characters: " + key); } m.reset(value); if (!m.find()) { throw new IllegalArgumentException("Value contains illegal characters: " + value); } otherTesseractConfig.put(key.trim(), value.trim()); userConfigured.add("otherTesseractConfig"); }
@Test public void testBadOtherKey() { TesseractOCRConfig config = new TesseractOCRConfig(); assertThrows(IllegalArgumentException.class, () -> { config.addOtherTesseractConfig("bad bad", "bad"); }); }
@Deprecated public Expression createExpression(String expression) { return createExpression(expression, new ParsingOptions()); }
@Test public void testPossibleExponentialBacktracking() { SQL_PARSER.createExpression("(((((((((((((((((((((((((((true)))))))))))))))))))))))))))"); }
@Udf(description = "Splits a string into an array of substrings based on a delimiter.") public List<String> split( @UdfParameter( description = "The string to be split. If NULL, then function returns NULL.") final String string, @UdfParameter( description = "The delimiter to split a string by. If NULL, then function returns NULL.") final String delimiter) { if (string == null || delimiter == null) { return null; } // Java split() accepts regular expressions as a delimiter, but the behavior of this UDF split() // is to accept only literal strings. This method uses Guava Splitter instead, which does not // accept any regex pattern. This is to avoid a confusion to users when splitting by regex // special characters, such as '.' and '|'. try { // Guava Splitter does not accept empty delimiters. Use the Java split() method instead. if (delimiter.isEmpty()) { return Arrays.asList(EMPTY_DELIMITER.split(string)); } else { return Splitter.on(delimiter).splitToList(string); } } catch (final Exception e) { throw new KsqlFunctionException( String.format("Invalid delimiter '%s' in the split() function.", delimiter), e); } }
@Test public void shouldReturnOriginalBytesOnNotFoundDelimiter() { assertThat(splitUdf.split(EMPTY_BYTES, DOT_BYTES), contains(EMPTY_BYTES)); assertThat(splitUdf.split(X_DASH_Y_BYTES, DOT_BYTES), contains(X_DASH_Y_BYTES)); }
public OffsetRange[] getNextOffsetRanges(Option<String> lastCheckpointStr, long sourceLimit, HoodieIngestionMetrics metrics) { // Come up with final set of OffsetRanges to read (account for new partitions, limit number of events) long maxEventsToReadFromKafka = getLongWithAltKeys(props, KafkaSourceConfig.MAX_EVENTS_FROM_KAFKA_SOURCE); long numEvents; if (sourceLimit == Long.MAX_VALUE) { numEvents = maxEventsToReadFromKafka; LOG.info("SourceLimit not configured, set numEvents to default value : {}", maxEventsToReadFromKafka); } else { numEvents = sourceLimit; } long minPartitions = getLongWithAltKeys(props, KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS); LOG.info("getNextOffsetRanges set config {} to {}", KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), minPartitions); return getNextOffsetRanges(lastCheckpointStr, numEvents, minPartitions, metrics); }
@Test public void testGetNextOffsetRangesWithMinPartitionsForMultiPartition() { HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(); testUtils.createTopic(testTopicName, 2); testUtils.sendMessages(testTopicName, Helpers.jsonifyRecords(dataGenerator.generateInserts("000", 1000))); TypedProperties props = getConsumerConfigs("earliest", KAFKA_CHECKPOINT_TYPE_STRING); // default no minPartition or minPartition less than TopicPartitions KafkaOffsetGen kafkaOffsetGen = new KafkaOffsetGen(props); OffsetRange[] nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 300, metrics); assertEquals(2, nextOffsetRanges.length); assertEquals(0, nextOffsetRanges[0].partition()); assertEquals(0, nextOffsetRanges[0].fromOffset()); assertEquals(150, nextOffsetRanges[0].untilOffset()); assertEquals(1, nextOffsetRanges[1].partition()); assertEquals(0, nextOffsetRanges[1].fromOffset()); assertEquals(150, nextOffsetRanges[1].untilOffset()); props.put(KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), 1L); kafkaOffsetGen = new KafkaOffsetGen(props); nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 300, metrics); assertEquals(2, nextOffsetRanges.length); assertEquals(0, nextOffsetRanges[0].partition()); assertEquals(0, nextOffsetRanges[0].fromOffset()); assertEquals(150, nextOffsetRanges[0].untilOffset()); assertEquals(1, nextOffsetRanges[1].partition()); assertEquals(0, nextOffsetRanges[1].fromOffset()); assertEquals(150, nextOffsetRanges[1].untilOffset()); // minPartition more than TopicPartitions props.put(KafkaSourceConfig.KAFKA_SOURCE_MIN_PARTITIONS.key(), 4L); kafkaOffsetGen = new KafkaOffsetGen(props); nextOffsetRanges = kafkaOffsetGen.getNextOffsetRanges(Option.empty(), 300, metrics); assertEquals(4, nextOffsetRanges.length); assertEquals(0, nextOffsetRanges[0].partition()); assertEquals(0, nextOffsetRanges[0].fromOffset()); assertEquals(75, nextOffsetRanges[0].untilOffset()); assertEquals(0, nextOffsetRanges[1].partition()); assertEquals(75, nextOffsetRanges[1].fromOffset()); assertEquals(150, nextOffsetRanges[1].untilOffset()); assertEquals(1, nextOffsetRanges[2].partition()); assertEquals(0, nextOffsetRanges[2].fromOffset()); assertEquals(75, nextOffsetRanges[2].untilOffset()); assertEquals(1, nextOffsetRanges[3].partition()); assertEquals(75, nextOffsetRanges[3].fromOffset()); assertEquals(150, nextOffsetRanges[3].untilOffset()); }
@Override public int partition(StatisticsOrRecord wrapper, int numPartitions) { if (wrapper.hasStatistics()) { this.delegatePartitioner = delegatePartitioner(wrapper.statistics()); return (int) (roundRobinCounter(numPartitions).getAndIncrement() % numPartitions); } else { if (delegatePartitioner != null) { return delegatePartitioner.partition(wrapper.record(), numPartitions); } else { int partition = (int) (roundRobinCounter(numPartitions).getAndIncrement() % numPartitions); LOG.trace("Statistics not available. Round robin to partition {}", partition); return partition; } } }
@Test public void testRoundRobinRecordsBeforeStatisticsAvailable() { RangePartitioner partitioner = new RangePartitioner(SCHEMA, SORT_ORDER); Set<Integer> results = Sets.newHashSetWithExpectedSize(numPartitions); for (int i = 0; i < numPartitions; ++i) { results.add( partitioner.partition( StatisticsOrRecord.fromRecord(GenericRowData.of(StringData.fromString("a"), 1)), numPartitions)); } // round-robin. every partition should get an assignment assertThat(results).containsExactlyInAnyOrder(0, 1, 2, 3); }
public List<Stream> match(Message message) { final Set<Stream> result = Sets.newHashSet(); final Set<String> blackList = Sets.newHashSet(); for (final Rule rule : rulesList) { if (blackList.contains(rule.getStreamId())) { continue; } final StreamRule streamRule = rule.getStreamRule(); final StreamRuleType streamRuleType = streamRule.getType(); final Stream.MatchingType matchingType = rule.getMatchingType(); if (!ruleTypesNotNeedingFieldPresence.contains(streamRuleType) && !message.hasField(streamRule.getField())) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } continue; } final Stream stream; if (streamRuleType != StreamRuleType.REGEX) { stream = rule.match(message); } else { stream = rule.matchWithTimeOut(message, streamProcessingTimeout, TimeUnit.MILLISECONDS); } if (stream == null) { if (matchingType == Stream.MatchingType.AND) { result.remove(rule.getStream()); // blacklist stream because it can't match anymore blackList.add(rule.getStreamId()); } } else { result.add(stream); if (matchingType == Stream.MatchingType.OR) { // blacklist stream because it is already matched blackList.add(rule.getStreamId()); } } } final Stream defaultStream = defaultStreamProvider.get(); boolean alreadyRemovedDefaultStream = false; for (Stream stream : result) { if (stream.getRemoveMatchesFromDefaultStream()) { if (alreadyRemovedDefaultStream || message.removeStream(defaultStream)) { alreadyRemovedDefaultStream = true; if (LOG.isTraceEnabled()) { LOG.trace("Successfully removed default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } else { // A previously executed message processor (or Illuminate) has likely already removed the // default stream from the message. Now, the message has matched a stream in the Graylog // MessageFilterChain, and the matching stream is also set to remove the default stream. // This is usually from user-defined stream rules, and is generally not a problem. cannotRemoveDefaultMeter.inc(); if (LOG.isTraceEnabled()) { LOG.trace("Couldn't remove default stream <{}> from message <{}>", defaultStream.getId(), message.getId()); } } } } return ImmutableList.copyOf(result); }
@Test public void testAndStreamWithMultipleRules() { final String dummyField = "dummyField"; final String dummyValue = "dummyValue"; final StreamRule streamRule1 = getStreamRuleMock("StreamRule1Id", StreamRuleType.EXACT, dummyField, dummyValue); final StreamRule streamRule2 = getStreamRuleMock("StreamRule2Id", StreamRuleType.EXACT, dummyField, dummyValue); final Stream stream = mock(Stream.class); when(stream.getId()).thenReturn("Stream1Id"); when(stream.getMatchingType()).thenReturn(Stream.MatchingType.OR); when(stream.getStreamRules()).thenReturn(Lists.newArrayList(streamRule1, streamRule2)); final Message message = mock(Message.class); when(message.getField(eq(dummyField))).thenReturn(dummyValue); final StreamRouterEngine engine = newEngine(Lists.newArrayList(stream)); final List<Stream> result = engine.match(message); assertThat(result).hasSize(1); assertThat(result).contains(stream); }
int getCurrentDirectoryIndex() { AllocatorPerContext context = obtainContext(contextCfgItemName); return context.getCurrentDirectoryIndex(); }
@Test (timeout = 30000) public void testDirsNotExist() throws Exception { assumeNotWindows(); String dir2 = buildBufferDir(ROOT, 2); String dir3 = buildBufferDir(ROOT, 3); try { conf.set(CONTEXT, dir2 + "," + dir3); // create the first file, and then figure the round-robin sequence createTempFile(SMALL_FILE_SIZE); int firstDirIdx = (dirAllocator.getCurrentDirectoryIndex() == 0) ? 2 : 3; int secondDirIdx = (firstDirIdx == 2) ? 3 : 2; // check if tmp dirs are allocated in a round-robin manner validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx)); validateTempDirCreation(buildBufferDir(ROOT, secondDirIdx)); validateTempDirCreation(buildBufferDir(ROOT, firstDirIdx)); } finally { rmBufferDirs(); } }
String getApplicationId() { return configuration.get(APPLICATION_ID).orElseThrow(() -> new IllegalArgumentException("Application ID is missing")); }
@Test public void return_application_id() { settings.setProperty("sonar.auth.saml.applicationId", "MyApp"); assertThat(underTest.getApplicationId()).isEqualTo("MyApp"); }
@Override public V remove() { return removeFirst(); }
@Test public void testRemove() { RQueue<Integer> queue = getQueue(); queue.add(1); queue.add(2); queue.add(3); queue.add(4); queue.remove(); queue.remove(); assertThat(queue).containsExactly(3, 4); queue.remove(); queue.remove(); Assertions.assertTrue(queue.isEmpty()); }
public long getNumber8() { checkAvailable(8); return get(Wire::getUInt64, 8); }
@Test public void testGetLong() { ZFrame frame = new ZFrame(new byte[8]); ZNeedle needle = new ZNeedle(frame); assertThat(needle.getNumber8(), is(0L)); }
public static Map<String, ConnectorConfigField> createConnectorFieldsAsMap( final ConfigDef configDef, final Class<?> configClass, final Set<String> requiredFields, final Map<String, Object> overriddenDefaultValues) { // first we extract deprecated fields final Set<String> deprecatedFields = getDeprecatedFieldsFromConfigClass(configClass); return createConnectorFieldsAsMap(configDef, deprecatedFields, requiredFields, overriddenDefaultValues); }
@Test void testIfCreatesFieldsMapWithDeprecatedFields() { final ConfigDef configDef = new ConfigDef() .define("test.field.1", ConfigDef.Type.STRING, ConfigDef.Importance.MEDIUM, "docs1") .define("test.field.2", ConfigDef.Type.CLASS, ConfigDef.Importance.MEDIUM, "docs2") .define("test.field.3", ConfigDef.Type.PASSWORD, ConfigDef.Importance.MEDIUM, "doc3") .define("test.field.4", ConfigDef.Type.INT, ConfigDef.Importance.MEDIUM, "doc4"); final Set<String> deprecatedFields = new HashSet<>(Collections.singletonList("test.field.2")); final Set<String> requiredFields = new HashSet<>(Collections.singletonList("test.field.1")); final Map<String, Object> overridenFields = Collections.singletonMap("test.field.1", "I am overriden"); final Map<String, ConnectorConfigField> connectorConfigToField = ConnectorConfigFieldsFactory.createConnectorFieldsAsMap( configDef, deprecatedFields, requiredFields, overridenFields); assertEquals(4, connectorConfigToField.size()); final ConnectorConfigField connectorConfigField1 = connectorConfigToField.get("test.field.1"); assertEquals("testField1", connectorConfigField1.getFieldName()); assertEquals("I am overriden", connectorConfigField1.getDefaultValue()); assertTrue(connectorConfigField1.isRequired()); assertFalse(connectorConfigField1.isDeprecated()); final ConnectorConfigField connectorConfigField2 = connectorConfigToField.get("test.field.2"); assertFalse(connectorConfigField2.isRequired()); assertTrue(connectorConfigField2.isDeprecated()); }
@Override @CheckForNull public EmailMessage format(Notification notification) { if (!BuiltInQPChangeNotification.TYPE.equals(notification.getType())) { return null; } BuiltInQPChangeNotificationBuilder profilesNotification = parse(notification); StringBuilder message = new StringBuilder("The following built-in profiles have been updated:\n\n"); profilesNotification.getProfiles().stream() .sorted(Comparator.comparing(Profile::getLanguageName).thenComparing(Profile::getProfileName)) .forEach(profile -> { message.append("\"") .append(profile.getProfileName()) .append("\" - ") .append(profile.getLanguageName()) .append(": ") .append(server.getPublicRootUrl()).append("/profiles/changelog?language=") .append(profile.getLanguageKey()) .append("&name=") .append(encode(profile.getProfileName())) .append("&since=") .append(formatDate(new Date(profile.getStartDate()))) .append("&to=") .append(formatDate(new Date(profile.getEndDate()))) .append("\n"); int newRules = profile.getNewRules(); if (newRules > 0) { message.append(" ").append(newRules).append(" new rule") .append(plural(newRules)) .append('\n'); } int updatedRules = profile.getUpdatedRules(); if (updatedRules > 0) { message.append(" ").append(updatedRules).append(" rule") .append(updatedRules > 1 ? "s have been updated" : " has been updated") .append("\n"); } int removedRules = profile.getRemovedRules(); if (removedRules > 0) { message.append(" ").append(removedRules).append(" rule") .append(plural(removedRules)) .append(" removed\n"); } message.append("\n"); }); message.append("This is a good time to review your quality profiles and update them to benefit from the latest evolutions: "); message.append(server.getPublicRootUrl()).append("/profiles"); // And finally return the email that will be sent return new EmailMessage() .setMessageId(BuiltInQPChangeNotification.TYPE) .setSubject("Built-in quality profiles have been updated") .setPlainTextMessage(message.toString()); }
@Test public void notification_contains_profiles_sorted_by_language_then_by_profile_name() { String languageKey1 = "langkey1_" + randomAlphanumeric(20); String languageName1 = "langName1_" + randomAlphanumeric(20); String languageKey2 = "langKey2_" + randomAlphanumeric(20); String languageName2 = "langName2_" + randomAlphanumeric(20); String profileName1 = "profile1_" + randomAlphanumeric(20); String profileName2 = "profile2_" + randomAlphanumeric(20); String profileName3 = "profile3_" + randomAlphanumeric(20); BuiltInQPChangeNotificationBuilder notification = new BuiltInQPChangeNotificationBuilder() .addProfile(Profile.newBuilder().setProfileName(profileName3).setLanguageKey(languageKey2).setLanguageName(languageName2).build()) .addProfile(Profile.newBuilder().setProfileName(profileName2).setLanguageKey(languageKey1).setLanguageName(languageName1).build()) .addProfile(Profile.newBuilder().setProfileName(profileName1).setLanguageKey(languageKey2).setLanguageName(languageName2).build()); EmailMessage emailMessage = underTest.format(notification.build()); assertThat(emailMessage.getMessage()).containsSubsequence( "\"" + profileName2 + "\" - " + languageName1, "\"" + profileName1 + "\" - " + languageName2, "\"" + profileName3 + "\" - " + languageName2); }
@Override public void collect(MetricsEmitter metricsEmitter) { for (Map.Entry<MetricKey, KafkaMetric> entry : ledger.getMetrics()) { MetricKey metricKey = entry.getKey(); KafkaMetric metric = entry.getValue(); try { collectMetric(metricsEmitter, metricKey, metric); } catch (Exception e) { // catch and log to continue processing remaining metrics log.error("Error processing Kafka metric {}", metricKey, e); } } }
@Test public void testMeasurableTotalDeltaMetrics() { Sensor sensor = metrics.sensor("test"); sensor.add(metricName, new CumulativeSum()); sensor.record(10L); sensor.record(5L); // Collect metrics. testEmitter.onlyDeltaMetrics(true); collector.collect(testEmitter); List<SinglePointMetric> result = testEmitter.emittedMetrics(); // Should get exactly 2 Kafka measurables since Metrics always includes a count measurable. assertEquals(2, result.size()); Metric counter = result.stream() .flatMap(metrics -> Stream.of(metrics.builder().build())) .filter(metric -> metric.getName().equals("test.domain.group1.name1")).findFirst().get(); assertTrue(counter.hasSum()); assertEquals(tags, getTags(counter.getSum().getDataPoints(0).getAttributesList())); assertEquals(15, counter.getSum().getDataPoints(0).getAsDouble(), 0.0); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseAlignToStartOfDay() throws Exception { final DateTimeFormatter df = DateTimeFormat.forPattern("HH:mm:ss"); for(String test: testsThatAlignToStartOfDay) { NaturalDateParser.Result result = naturalDateParser.parse(test); assertNotNull(result.getFrom()); assertNotNull(result.getTo()); assertThat(df.print(result.getFrom())).as("time part of date should equal 00:00:00 in").isEqualTo("00:00:00"); assertThat(df.print(result.getTo())).as("time part of date should equal 00:00:00 in").isEqualTo("00:00:00"); } }
public Span handleSend(HttpClientRequest request) { if (request == null) throw new NullPointerException("request == null"); return handleSend(request, tracer.nextSpan(httpSampler, request)); }
@Test void handleSend_neverSamplerSpecialCased() { Sampler sampler = mock(Sampler.class); init(httpTracingBuilder(tracingBuilder().sampler(sampler)) .clientSampler(SamplerFunctions.neverSample())); assertThat(handler.handleSend(request).isNoop()).isTrue(); verifyNoMoreInteractions(sampler); }
public static SeaTunnelRow convert( InternalRow rowData, SeaTunnelRowType seaTunnelRowType, TableSchema tableSchema) { Object[] objects = new Object[seaTunnelRowType.getTotalFields()]; for (int i = 0; i < objects.length; i++) { // judge the field is or not equals null if (rowData.isNullAt(i)) { objects[i] = null; continue; } SeaTunnelDataType<?> fieldType = seaTunnelRowType.getFieldType(i); String fieldName = seaTunnelRowType.getFieldName(i); switch (fieldType.getSqlType()) { case TINYINT: objects[i] = rowData.getByte(i); break; case SMALLINT: objects[i] = rowData.getShort(i); break; case INT: objects[i] = rowData.getInt(i); break; case BIGINT: objects[i] = rowData.getLong(i); break; case FLOAT: objects[i] = rowData.getFloat(i); break; case DOUBLE: objects[i] = rowData.getDouble(i); break; case DECIMAL: Decimal decimal = rowData.getDecimal( i, ((DecimalType) fieldType).getPrecision(), ((DecimalType) fieldType).getScale()); objects[i] = decimal.toBigDecimal(); break; case STRING: objects[i] = rowData.getString(i).toString(); break; case BOOLEAN: objects[i] = rowData.getBoolean(i); break; case BYTES: objects[i] = rowData.getBinary(i); break; case DATE: int dateInt = rowData.getInt(i); objects[i] = DateTimeUtils.toLocalDate(dateInt); break; case TIMESTAMP: int precision = TimestampType.DEFAULT_PRECISION; Optional<DataField> precisionOptional = tableSchema.fields().stream() .filter(dataField -> dataField.name().equals(fieldName)) .findFirst(); if (precisionOptional.isPresent()) { precision = ((TimestampType) precisionOptional.get().type()).getPrecision(); } Timestamp timestamp = rowData.getTimestamp(i, precision); objects[i] = timestamp.toLocalDateTime(); break; case ARRAY: InternalArray paimonArray = rowData.getArray(i); ArrayType<?, ?> seatunnelArray = (ArrayType<?, ?>) fieldType; objects[i] = convertArrayType( fieldName, paimonArray, seatunnelArray.getElementType()); break; case MAP: MapType<?, ?> mapType = (MapType<?, ?>) fieldType; InternalMap map = rowData.getMap(i); InternalArray keyArray = map.keyArray(); InternalArray valueArray = map.valueArray(); SeaTunnelDataType<?> keyType = mapType.getKeyType(); SeaTunnelDataType<?> valueType = mapType.getValueType(); Object[] key = (Object[]) convertArrayType(fieldName, keyArray, keyType); Object[] value = (Object[]) convertArrayType(fieldName, valueArray, valueType); Map<Object, Object> mapData = new HashMap<>(); for (int j = 0; j < key.length; j++) { mapData.put(key[j], value[j]); } objects[i] = mapData; break; case ROW: SeaTunnelDataType<?> rowType = seaTunnelRowType.getFieldType(i); InternalRow row = rowData.getRow(i, ((SeaTunnelRowType) rowType).getTotalFields()); objects[i] = convert(row, (SeaTunnelRowType) rowType, tableSchema); break; default: throw CommonError.unsupportedDataType( PaimonConfig.CONNECTOR_IDENTITY, fieldType.getSqlType().toString(), fieldName); } } return new SeaTunnelRow(objects); }
@Test public void paimonToSeaTunnel() { SeaTunnelRow convert = RowConverter.convert(internalRow, seaTunnelRowType, tableSchema); Assertions.assertEquals(convert, seaTunnelRow); }
public static String[] split(String str, char separatorChar) { return splitWorker(str, separatorChar, false); }
@Test public void testSplit() { Assert.assertNull(EagleEyeCoreUtils.split(null, 'a')); Assert.assertArrayEquals(new String[0], EagleEyeCoreUtils.split("", ',')); Assert.assertArrayEquals(new String[]{"foo", "bar", "baz"}, EagleEyeCoreUtils.split("foo,bar,baz", ',')); }
public Map<String, Map<String, String>> get() { try (LockResource r = new LockResource(mLock.readLock())) { return mState.getProperties(); } }
@Test public void empty() { PathProperties properties = new PathProperties(); Assert.assertTrue(properties.get().isEmpty()); }
@Override public void seek(long newPos) { if (newPos < 0) { throw new IllegalArgumentException("New position cannot be negative"); } if (newPos == 0) { _currentPage = _pages.get(0); _offsetInPage = 0; } else { int pageIdx = (int) (newPos / _pageSize); if (pageIdx >= _pages.size()) { _pages.ensureCapacity(pageIdx + 1); while (_pages.size() <= pageIdx) { _pages.add(_allocator.allocate().order(ByteOrder.BIG_ENDIAN)); } } int offsetInPage = (int) (newPos % _pageSize); _currentPage = _pages.get(pageIdx); _currentPageStartOffset = pageIdx * (long) _pageSize; _offsetInPage = offsetInPage; } _written = Math.max(_written, newPos); }
@Test void seekIntoNegative() { assertThrows(IllegalArgumentException.class, () -> _pagedPinotOutputStream.seek(-1)); }
public IndicesStatsResponse indicesStats(String... indices) { return execute(() -> { Request request = new Request("GET", "/" + (indices.length > 0 ? (String.join(",", indices) + "/") : "") + "_stats"); request.addParameter("level", "shards"); Response response = restHighLevelClient.getLowLevelClient().performRequest(request); return IndicesStatsResponse.toIndicesStatsResponse(gson.fromJson(EntityUtils.toString(response.getEntity()), JsonObject.class)); }, () -> computeDetailsAsString(indices)); }
@Test public void should_call_indices_stat_api() throws Exception { HttpEntity entity = mock(HttpEntity.class); when(entity.getContent()).thenReturn(new ByteArrayInputStream(EXAMPLE_INDICES_STATS_JSON.getBytes())); Response response = mock(Response.class); when(response.getEntity()).thenReturn(entity); when(restClient.performRequest(argThat(new RawRequestMatcher( "GET", "/_stats")))) .thenReturn(response); assertThat(underTest.indicesStats()).isNotNull(); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListPlaceholderDot() throws Exception { final Path container = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path placeholder = new GoogleStorageDirectoryFeature(session).mkdir( new Path(container, ".", EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new GoogleStorageObjectListService(session).list(container, new DisabledListProgressListener()).contains(placeholder)); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(placeholder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
void fetchSCMMetaData(GoPluginDescriptor pluginDescriptor) { try { SCMPropertyConfiguration scmConfiguration = scmExtension.getSCMConfiguration(pluginDescriptor.id()); if (scmConfiguration == null) { throw new RuntimeException(format("Plugin[%s] returned null SCM configuration", pluginDescriptor.id())); } SCMView scmView = scmExtension.getSCMView(pluginDescriptor.id()); if (scmView == null) { throw new RuntimeException(format("Plugin[%s] returned null SCM view", pluginDescriptor.id())); } scmMetadataStore.addMetadataFor(pluginDescriptor.id(), new SCMConfigurations(scmConfiguration), scmView); } catch (GoPluginFrameworkException e) { LOGGER.error("Failed to fetch SCM metadata for plugin : {}", pluginDescriptor.id(), e); } }
@Test public void shouldThrowExceptionWhenNullSCMViewReturned() { when(scmExtension.getSCMConfiguration(pluginDescriptor.id())).thenReturn(new SCMPropertyConfiguration()); when(scmExtension.getSCMView(pluginDescriptor.id())).thenReturn(null); try { metadataLoader.fetchSCMMetaData(pluginDescriptor); } catch (Exception e) { assertThat(e.getMessage()).isEqualTo("Plugin[plugin-id] returned null SCM view"); } assertThat(SCMMetadataStore.getInstance().getConfigurationMetadata(pluginDescriptor.id())).isNull(); }
public static Read read() { return Read.create(); }
@Test public void testReadingDisplayData() { RowFilter rowFilter = RowFilter.newBuilder().setRowKeyRegexFilter(ByteString.copyFromUtf8("foo.*")).build(); ByteKeyRange keyRange = ByteKeyRange.ALL_KEYS.withEndKey(ByteKey.of(0xab, 0xcd)); BigtableIO.Read read = BigtableIO.read() .withBigtableOptions(BIGTABLE_OPTIONS) .withTableId("fooTable") .withRowFilter(rowFilter) .withKeyRange(keyRange); DisplayData displayData = DisplayData.from(read); assertThat( displayData, hasDisplayItem( allOf(hasKey("tableId"), hasLabel("Bigtable Table Id"), hasValue("fooTable")))); assertThat(displayData, hasDisplayItem("rowFilter", rowFilter.toString())); assertThat( displayData, hasDisplayItem("keyRanges", "[ByteKeyRange{startKey=[], endKey=[abcd]}]")); // BigtableIO adds user-agent to options; assert only on key and not value. assertThat(displayData, hasDisplayItem("bigtableOptions")); }
static void validateUploadDirectoryPath(String uploadDirectoryPath) { if (uploadDirectoryPath != null) { Path path = Paths.get(uploadDirectoryPath); if (!Files.exists(path)) { String errorMessage = String.format("The upload directory path does not exist: %s", path); throw new JetException(errorMessage); } } }
@Test public void testValidateTempDirectoryPath() { assertThatThrownBy(() -> JarOnClientValidator.validateUploadDirectoryPath("foo")) .isInstanceOf(JetException.class) .hasMessageContaining("The upload directory path does not exist: foo"); }
public static List<String> getOrderedLanguageKeys(Languages languages) { Language[] all = languages.all(); return Arrays.stream(all) .map(Language::getKey) .sorted() .toList(); }
@Test public void getOrderedLanguageKeys() { assertThat(LanguageParamUtils.getOrderedLanguageKeys(new Languages())).isEmpty(); Languages languages = new Languages( new TestLanguage("java"), new TestLanguage("abap"), new TestLanguage("js"), new TestLanguage("cobol")); assertThat(LanguageParamUtils.getOrderedLanguageKeys(languages)).containsExactly("abap", "cobol", "java", "js"); }
@Override public Long run(final Session<?> session) throws BackgroundException { for(Path next : files) { if(this.isCanceled()) { throw new ConnectionCanceledException(); } if(TransferStatus.UNKNOWN_LENGTH == next.attributes().getSize()) { continue; } total += next.attributes().getSize(); } return total; }
@Test public void testRun() throws Exception { final Path d = new Path("/d", EnumSet.of(Path.Type.directory)); d.attributes().setSize(-1L); final ReadSizeWorker worker = new ReadSizeWorker(Collections.singletonList(d)) { @Override public void cleanup(final Long result) { // } }; assertEquals(0L, worker.run(new NullSession(new Host(new TestProtocol()))), 0L); }
public Properties getProperties() { return properties; }
@Test void testUriWithHttpProxy() throws SQLException { PrestoDriverUri parameters = createDriverUri("presto://localhost:8080?httpProxy=localhost:5678"); assertUriPortScheme(parameters, 8080, "http"); Properties properties = parameters.getProperties(); assertEquals(properties.getProperty(HTTP_PROXY.getKey()), "localhost:5678"); }
@Override public void render(Block html) { setTitle("Applications"); try { fetchData(); } catch (YarnException | IOException | InterruptedException e) { String message = "Failed to read the applications."; LOG.error(message, e); html.p().__(message).__(); return; } renderData(html); }
@Test(expected = IllegalArgumentException.class) public void testInvalidAppState() { AppsBlock appBlock = new AppsBlock(null, null) { // override this so that apps block can fetch app state. @Override public Map<String, String> moreParams() { Map<String, String> map = new HashMap<>(); map.put(YarnWebParams.APP_STATE, "ACCEPTEDPING"); return map; } @Override protected void renderData(Block html) { } }; // set up the test block to render AppsBlock OutputStream outputStream = new ByteArrayOutputStream(); HtmlBlock.Block block = createBlockToCreateTo(outputStream); // If application state is invalid it should throw exception // instead of catching it. appBlock.render(block); }
@Override public boolean isOutput() { return false; }
@Test public void testIsOutput() throws Exception { assertFalse( analyzer.isOutput() ); }
@Override @Transactional(rollbackFor = Exception.class) @CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为此时不知道 id 对应的 permission 是多少。直接清理,简单有效 public void deleteMenu(Long id) { // 校验是否还有子菜单 if (menuMapper.selectCountByParentId(id) > 0) { throw exception(MENU_EXISTS_CHILDREN); } // 校验删除的菜单是否存在 if (menuMapper.selectById(id) == null) { throw exception(MENU_NOT_EXISTS); } // 标记删除 menuMapper.deleteById(id); // 删除授予给角色的权限 permissionService.processMenuDeleted(id); }
@Test public void testDeleteMenu_success() { // mock 数据 MenuDO menuDO = randomPojo(MenuDO.class); menuMapper.insert(menuDO); // 准备参数 Long id = menuDO.getId(); // 调用 menuService.deleteMenu(id); // 断言 MenuDO dbMenuDO = menuMapper.selectById(id); assertNull(dbMenuDO); verify(permissionService).processMenuDeleted(id); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStateParameterWrongStateType() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("WatermarkHoldState"); thrown.expectMessage("reference to"); thrown.expectMessage("supertype"); thrown.expectMessage("ValueState"); thrown.expectMessage("my-id"); thrown.expectMessage("myProcessElement"); thrown.expectMessage("index 1"); thrown.expectMessage(not(mentionsTimers())); DoFnSignatures.getSignature( new DoFn<KV<String, Integer>, Long>() { @StateId("my-id") private final StateSpec<ValueState<Integer>> myfield = StateSpecs.value(VarIntCoder.of()); @ProcessElement public void myProcessElement( ProcessContext context, @StateId("my-id") WatermarkHoldState watermark) {} }.getClass()); }
List<TaskDirectory> listAllTaskDirectories() { return listTaskDirectories(pathname -> pathname.isDirectory() && TASK_DIR_PATH_NAME.matcher(pathname.getName()).matches()); }
@Test public void shouldReturnEmptyArrayForNonPersistentApp() throws IOException { initializeStateDirectory(false, false); assertTrue(directory.listAllTaskDirectories().isEmpty()); }
public static Optional<Boolean> parseBooleanExact(final String value) { if (booleanStringMatches(value, true)) { return Optional.of(true); } if (booleanStringMatches(value, false)) { return Optional.of(false); } return Optional.empty(); }
@Test public void shouldParseExactTrueAsTrue() { assertThat(SqlBooleans.parseBooleanExact("trU"), is(Optional.of(true))); }
public KsqlEntityList execute( final KsqlSecurityContext securityContext, final List<ParsedStatement> statements, final SessionProperties sessionProperties ) { final KsqlEntityList entities = new KsqlEntityList(); for (final ParsedStatement parsed : statements) { final PreparedStatement<?> prepared = ksqlEngine.prepare( parsed, (isVariableSubstitutionEnabled(sessionProperties) ? sessionProperties.getSessionVariables() : Collections.emptyMap()) ); executeStatement( securityContext, prepared, sessionProperties, entities ).ifPresent(entities::add); } return entities; }
@Test public void shouldThrowOnCreateStreamIfFeatureFlagIsDisabled() { // Given final StatementExecutor<CreateStream> customExecutor = givenReturningExecutor(CreateStream.class, mock(KsqlEntity.class)); when(ksqlConfig.getBoolean(KsqlConfig.KSQL_HEADERS_COLUMNS_ENABLED)).thenReturn(false); givenRequestHandler(ImmutableMap.of(CreateStream.class, customExecutor)); // When final List<ParsedStatement> statements = KSQL_PARSER.parse("CREATE STREAM x (c1 ARRAY<STRUCT<`KEY` STRING, `VALUE` BYTES>> HEADERS) " + "WITH (value_format='json', kafka_topic='x');"); final Exception e = assertThrows( KsqlException.class, () -> handler.execute(securityContext, statements, sessionProperties)); // Then assertThat(e.getMessage(), containsString( "Cannot create Stream because schema with headers columns is disabled.")); }
@SuppressWarnings("unchecked") public static <S, F> S visit(final Schema schema, final Visitor<S, F> visitor) { final BiFunction<Visitor<?, ?>, Schema, Object> handler = HANDLER.get(schema.type()); if (handler == null) { throw new UnsupportedOperationException("Unsupported schema type: " + schema.type()); } return (S) handler.apply(visitor, schema); }
@Test public void shouldVisitBoolean() { // Given: final Schema schema = Schema.OPTIONAL_BOOLEAN_SCHEMA; when(visitor.visitBoolean(any())).thenReturn("Expected"); // When: final String result = SchemaWalker.visit(schema, visitor); // Then: verify(visitor).visitBoolean(same(schema)); assertThat(result, is("Expected")); }
public static boolean mapEquals(Map<?, ?> map1, Map<?, ?> map2) { if (map1 == null && map2 == null) { return true; } if (map1 == null || map2 == null) { return false; } if (map1.size() != map2.size()) { return false; } for (Map.Entry<?, ?> entry : map1.entrySet()) { Object key = entry.getKey(); Object value1 = entry.getValue(); Object value2 = map2.get(key); if (!objectEquals(value1, value2)) { return false; } } return true; }
@Test void testMapEquals() { assertTrue(CollectionUtils.mapEquals(null, null)); assertFalse(CollectionUtils.mapEquals(null, new HashMap<String, String>())); assertFalse(CollectionUtils.mapEquals(new HashMap<String, String>(), null)); assertTrue(CollectionUtils.mapEquals( CollectionUtils.toStringMap("1", "a", "2", "b"), CollectionUtils.toStringMap("1", "a", "2", "b"))); assertFalse(CollectionUtils.mapEquals( CollectionUtils.toStringMap("1", "a"), CollectionUtils.toStringMap("1", "a", "2", "b"))); }
@Override public void onPartitionsLost(final Collection<TopicPartition> partitions) { log.info("at state {}: partitions {} lost due to missed rebalance.\n" + "\tlost active tasks: {}\n" + "\tlost assigned standby tasks: {}\n", streamThread.state(), partitions, taskManager.activeTaskIds(), taskManager.standbyTaskIds()); final long start = time.milliseconds(); try { // close all active tasks as lost but don't try to commit offsets as we no longer own them taskManager.handleLostAll(); } finally { log.info("partitions lost took {} ms.", time.milliseconds() - start); } }
@Test public void shouldHandleLostPartitions() { streamsRebalanceListener.onPartitionsLost(Collections.singletonList(new TopicPartition("topic", 0))); verify(taskManager).handleLostAll(); }
Duration getLockAtMostFor(AnnotationData annotation) { return getValue( annotation.getLockAtMostFor(), annotation.getLockAtMostForString(), this.defaultLockAtMostFor, "lockAtMostForString"); }
@Test public void shouldLockTimeFromAnnotation() throws NoSuchMethodException { noopResolver(); SpringLockConfigurationExtractor.AnnotationData annotation = getAnnotation("annotatedMethod"); TemporalAmount lockAtMostFor = extractor.getLockAtMostFor(annotation); assertThat(lockAtMostFor).isEqualTo(Duration.of(100, MILLIS)); }
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) { if (!feedBlockEnabled) { return null; } var nodeInfos = cluster.getNodeInfos(); var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos); if (exhaustions.isEmpty()) { return null; } int maxDescriptions = 3; String description = exhaustions.stream() .limit(maxDescriptions) .map(NodeResourceExhaustion::toExhaustionAddedDescription) .collect(Collectors.joining(", ")); if (exhaustions.size() > maxDescriptions) { description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions); } description = decoratedMessage(cluster, description); // FIXME we currently will trigger a cluster state recomputation even if the number of // exhaustions is greater than what is returned as part of the description. Though at // that point, cluster state recomputations will be the least of your worries...! return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions); }
@Test void retain_node_feed_block_status_when_within_hysteresis_window_under_limit_edge_case() { var curFeedBlock = ClusterStateBundle.FeedBlock.blockedWith("foo", setOf(exhaustion(1, "memory", 0.49))); var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.5)), curFeedBlock, 0.1); // Node 1 goes from 0.49 to 0.48, NOT crossing the 0.5 threshold. Should still be blocked. // Node 2 is at 0.49 but was not previously blocked and should not be blocked now either. var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.3), usage("memory", 0.48)), forNode(2, usage("disk", 0.3), usage("memory", 0.49))); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster()); assertNotNull(feedBlock); assertEquals(decorate(cf, "memory on node 1 [storage.1.local] is 48.0% full (the configured limit is 40.0%)"), feedBlock.getDescription()); }
@VisibleForTesting static CompletableFuture<JobResult> pollJobResultAsync( final Supplier<CompletableFuture<JobStatus>> jobStatusSupplier, final Supplier<CompletableFuture<JobResult>> jobResultSupplier, final ScheduledExecutor scheduledExecutor, final long retryMsTimeout) { return pollJobResultAsync( jobStatusSupplier, jobResultSupplier, scheduledExecutor, new CompletableFuture<>(), retryMsTimeout, 0); }
@Test void testPolling() { final int maxAttemptCounter = 3; final ScheduledExecutorService executor = Executors.newSingleThreadScheduledExecutor(); try { final ScheduledExecutor scheduledExecutor = new ScheduledExecutorServiceAdapter(executor); final CallCountingJobStatusSupplier jobStatusSupplier = new CallCountingJobStatusSupplier(maxAttemptCounter); final CompletableFuture<JobResult> result = JobStatusPollingUtils.pollJobResultAsync( jobStatusSupplier, () -> CompletableFuture.completedFuture( createSuccessfulJobResult(new JobID(0, 0))), scheduledExecutor, 10); result.join(); assertThat(jobStatusSupplier.getAttemptCounter()).isEqualTo(maxAttemptCounter); } finally { ExecutorUtils.gracefulShutdown(5, TimeUnit.SECONDS, executor); } }
public Optional<String> getGenerateKeyColumn() { return Optional.ofNullable(generateKeyColumn); }
@Test void assertCreateFullShardingTable() { ShardingTableRuleConfiguration shardingTableRuleConfig = new ShardingTableRuleConfiguration("LOGIC_TABLE", "ds${0..1}.table_${0..2}"); shardingTableRuleConfig.setDatabaseShardingStrategy(new NoneShardingStrategyConfiguration()); shardingTableRuleConfig.setTableShardingStrategy(new NoneShardingStrategyConfiguration()); shardingTableRuleConfig.setKeyGenerateStrategy(new KeyGenerateStrategyConfiguration("col_1", "increment")); ShardingTable actual = new ShardingTable(shardingTableRuleConfig, Arrays.asList("ds0", "ds1"), null); assertThat(actual.getLogicTable(), is("LOGIC_TABLE")); assertThat(actual.getActualDataNodes().size(), is(6)); assertTrue(actual.getActualDataNodes().contains(new DataNode("ds0", "table_0"))); assertTrue(actual.getActualDataNodes().contains(new DataNode("ds0", "table_1"))); assertTrue(actual.getActualDataNodes().contains(new DataNode("ds0", "table_2"))); assertTrue(actual.getActualDataNodes().contains(new DataNode("ds1", "table_0"))); assertTrue(actual.getActualDataNodes().contains(new DataNode("ds1", "table_1"))); assertTrue(actual.getActualDataNodes().contains(new DataNode("ds1", "table_2"))); assertTrue(actual.getGenerateKeyColumn().isPresent()); assertThat(actual.getGenerateKeyColumn().get(), is("col_1")); assertThat(actual.getKeyGeneratorName(), is("increment")); }
public AggregationType computeAggregationType(String name) { // findAdderMethod() capitalizes name's 1st letter before search Method addMethod = findAdderMethod(name); // if the if (addMethod != null) { AggregationType type = computeRawAggregationType(addMethod); switch (type) { case NOT_FOUND: return AggregationType.NOT_FOUND; case AS_BASIC_PROPERTY: return AggregationType.AS_BASIC_PROPERTY_COLLECTION; case AS_COMPLEX_PROPERTY: return AggregationType.AS_COMPLEX_PROPERTY_COLLECTION; case AS_BASIC_PROPERTY_COLLECTION: case AS_COMPLEX_PROPERTY_COLLECTION: addError("Unexpected AggregationType " + type); break; } } Method setterMethod = findSetterMethod(name); if (setterMethod != null) { return computeRawAggregationType(setterMethod); } else { // we have failed return AggregationType.NOT_FOUND; } }
@Test public void testCanAggregateComponent() { assertEquals(AggregationType.AS_COMPLEX_PROPERTY, setter .computeAggregationType("door")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("count")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("Count")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("name")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("Name")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("Duration")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("fs")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("open")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("Open")); assertEquals(AggregationType.AS_COMPLEX_PROPERTY_COLLECTION, setter .computeAggregationType("Window")); assertEquals(AggregationType.AS_BASIC_PROPERTY_COLLECTION, setter .computeAggregationType("adjective")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("filterReply")); assertEquals(AggregationType.AS_BASIC_PROPERTY, setter .computeAggregationType("houseColor")); System.out.println(); }
@Override public long getMax() { if (values.length == 0) { return 0; } return values[values.length - 1]; }
@Test public void calculatesTheMaximumValue() throws Exception { assertThat(snapshot.getMax()) .isEqualTo(5); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { try { new DeepboxAttributesFinderFeature(session, fileid).find(file, listener); return true; } catch(NotfoundException e) { return false; } }
@Test public void testFindDirectory() throws Exception { final DeepboxIdProvider nodeid = new DeepboxIdProvider(session); final Path box = new Path("/ORG 4 - DeepBox Desktop App/ORG3:Box1/Documents", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path folder = new DeepboxDirectoryFeature(session, nodeid).mkdir( new Path(box, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new DeepboxFindFeature(session, nodeid).find(folder)); assertFalse(new DeepboxFindFeature(session, nodeid).find(new Path(folder.getAbsolute(), EnumSet.of(Path.Type.file)))); new DeepboxDeleteFeature(session, nodeid).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public Meter submit(MeterRequest request) { checkNotNull(request, "request cannot be null."); MeterCellId cellId; if (request.index().isPresent()) { checkArgument(userDefinedIndex, "Index cannot be provided when userDefinedIndex mode is disabled"); // User provides index if (request.scope().isGlobal()) { cellId = MeterId.meterId(request.index().get()); } else { cellId = PiMeterCellId.ofIndirect( PiMeterId.of(request.scope().id()), request.index().get()); } } else { checkArgument(!userDefinedIndex, "Index cannot be allocated when userDefinedIndex mode is enabled"); // Allocate an id cellId = allocateMeterId(request.deviceId(), request.scope()); } Meter.Builder mBuilder = DefaultMeter.builder() .forDevice(request.deviceId()) .fromApp(request.appId()) .withBands(request.bands()) .withCellId(cellId) .withUnit(request.unit()); if (request.isBurst()) { mBuilder.burst(); } if (request.annotations() != null && !request.annotations().keys().isEmpty()) { mBuilder.withAnnotations(request.annotations()); } DefaultMeter m = (DefaultMeter) mBuilder.build(); // Meter installation logic (happy ending case) // PENDING -> stats -> ADDED -> future completes m.setState(MeterState.PENDING_ADD); store.addOrUpdateMeter(m).whenComplete((result, error) -> onComplete.accept(request, result, error)); return m; }
@Test(expected = IllegalArgumentException.class) public void testWrongAddInUserDefinedIndexMode() { initMeterStore(); testEnableUserDefinedIndex(); manager.submit(m1Request.add()); }
protected long convertLong(Object value) { if (value instanceof Number) { return ((Number) value).longValue(); } else if (value instanceof String) { return Long.parseLong((String) value); } throw new IllegalArgumentException("Cannot convert to long: " + value.getClass().getName()); }
@Test public void testLongConversion() { Table table = mock(Table.class); when(table.schema()).thenReturn(SIMPLE_SCHEMA); RecordConverter converter = new RecordConverter(table, config); long expectedLong = 123L; ImmutableList.of("123", 123.0f, 123.0d, 123, expectedLong) .forEach( input -> { long val = converter.convertLong(input); assertThat(val).isEqualTo(expectedLong); }); }
public static String truncateSequencesIfNecessary(String text, int maxConsecutiveLength) { char prev = 0; int sequenceCount = 1; for (int i = 0, m = text.length(); i < m ; i++) { char curr = text.charAt(i); if (prev == curr) { sequenceCount++; if (sequenceCount > maxConsecutiveLength) { return truncateSequences(text, maxConsecutiveLength, i); } } else { sequenceCount = 1; prev = curr; } } return text; }
@Test public void testTruncation() { String a = "abbc"; assertSame(a, StringUtilities.truncateSequencesIfNecessary(a, 2)); assertNotSame(a, StringUtilities.truncateSequencesIfNecessary(a, 1)); assertEquals("abc", StringUtilities.truncateSequencesIfNecessary(a, 1)); assertEquals("abc", StringUtilities.truncateSequencesIfNecessary("aabbccc", 1)); assertEquals("abc", StringUtilities.truncateSequencesIfNecessary("abcc", 1)); assertEquals("abc", StringUtilities.truncateSequencesIfNecessary("aabc", 1)); assertEquals("abcb", StringUtilities.truncateSequencesIfNecessary("abcb", 1)); assertEquals("g g g g g g g g g g\n g g g g g g g g g g\n g g g g g g g g g g", StringUtilities.truncateSequencesIfNecessary("g g g g g g g g g g\n g g g g g g g g g g\n g g g g g g g g g g", 5)); }
@SuppressWarnings({"checkstyle:OperatorWrap"}) public static Optional<Task.Status> checkProgress( Map<String, Task> realTaskMap, WorkflowSummary summary, WorkflowRuntimeOverview overview, boolean isFinal) { boolean allDone = true; boolean isFailed = false; // highest order boolean isTimeout = false; boolean isStopped = false; // lowest order boolean allTerminal = isFinal && realTaskMap.values().stream().allMatch(task -> task.getStatus().isTerminal()); for (Map.Entry<StepInstance.Status, WorkflowStepStatusSummary> entry : overview.getStepOverview().entrySet()) { if (entry.getKey() != StepInstance.Status.NOT_CREATED && entry.getValue().getCnt() > 0) { if (!entry.getKey().isTerminal() || (!allTerminal && entry.getKey().isRetryable())) { allDone = false; break; } else if (entry.getKey() == StepInstance.Status.FATALLY_FAILED || entry.getKey() == StepInstance.Status.INTERNALLY_FAILED || entry.getKey() == StepInstance.Status.USER_FAILED || entry.getKey() == StepInstance.Status.PLATFORM_FAILED || entry.getKey() == StepInstance.Status.TIMEOUT_FAILED) { isFailed = true; } else if (entry.getKey() == StepInstance.Status.TIMED_OUT) { isTimeout = true; } else if (entry.getKey() == StepInstance.Status.STOPPED) { isStopped = true; } } } if (allDone && overview.existsNotCreatedStep()) { allDone = confirmDone(realTaskMap, summary); } // It's unexpected. Can happen if conductor fails the run before running maestro task logic if (allDone && !isFailed && !isTimeout && !isStopped && !overview.existsCreatedStep()) { LOG.warn( "There are no created steps in the workflow [{}] and mark it as failed.", summary.getIdentity()); Monitors.error(TaskHelper.class.getName(), "checkProgress"); isFailed = true; } LOG.trace( "Check task status: done [{}] and with flags: [isFailed: {}], [isTimeout: {}], [isStopped: {}] " + "with real task map: [{}] and workflow summary: [{}]", allDone, isFailed, isTimeout, isStopped, realTaskMap, summary); if (allDone) { if (isFailed) { return Optional.of(Task.Status.FAILED); } else if (isTimeout) { return Optional.of(Task.Status.TIMED_OUT); } else if (isStopped) { return Optional.of(Task.Status.CANCELED); } else { // Use this special status to indicate workflow succeeded. // So all dummy (NOT_CREATED) tasks will be cancelled. return Optional.of(Task.Status.FAILED_WITH_TERMINAL_ERROR); } } return Optional.empty(); }
@Test public void testCheckProgress() throws Exception { Task t1 = new Task(); t1.setTaskType(Constants.MAESTRO_TASK_NAME); t1.setSeq(1); t1.setReferenceTaskName("job1"); t1.setStatus(Task.Status.COMPLETED); t1.setOutputData( Collections.singletonMap( Constants.STEP_RUNTIME_SUMMARY_FIELD, threeItemMap( "runtime_state", Collections.singletonMap("status", "SUCCEEDED"), "type", "NOOP", "step_id", "job1"))); Task t2 = new Task(); t2.setTaskType(Constants.MAESTRO_TASK_NAME); t2.setSeq(2); t2.setReferenceTaskName("job3"); t2.setStatus(Task.Status.COMPLETED); t2.setOutputData( Collections.singletonMap( Constants.STEP_RUNTIME_SUMMARY_FIELD, threeItemMap( "runtime_state", Collections.singletonMap("status", "SUCCEEDED"), "type", "NOOP", "step_id", "job3"))); Map<String, Task> realTaskMap = twoItemMap("job1", t1, "job3", t2); WorkflowSummary workflowSummary = loadObject("fixtures/parameters/sample-wf-summary-params.json", WorkflowSummary.class); WorkflowRuntimeOverview overview = TaskHelper.computeOverview( MAPPER, workflowSummary, new WorkflowRollupOverview(), realTaskMap); Optional<Task.Status> actual = TaskHelper.checkProgress(realTaskMap, workflowSummary, overview, true); Assert.assertFalse(actual.isPresent()); Task t3 = new Task(); t3.setTaskType(Constants.MAESTRO_TASK_NAME); t3.setSeq(2); t3.setReferenceTaskName("job.2"); t3.setStatus(Task.Status.FAILED); t3.setOutputData( Collections.singletonMap( Constants.STEP_RUNTIME_SUMMARY_FIELD, threeItemMap( "runtime_state", Collections.singletonMap("status", "FATALLY_FAILED"), "type", "NOOP", "step_id", "job.2"))); realTaskMap.put("job.2", t3); overview = TaskHelper.computeOverview( MAPPER, workflowSummary, new WorkflowRollupOverview(), realTaskMap); actual = TaskHelper.checkProgress(realTaskMap, workflowSummary, overview, true); Assert.assertEquals(Task.Status.FAILED, actual.get()); }
@Override public void discoverSchemaTransform( ExpansionApi.DiscoverSchemaTransformRequest request, StreamObserver<ExpansionApi.DiscoverSchemaTransformResponse> responseObserver) { if (!checkedAllServices) { try { waitForAllServicesToBeReady(); } catch (TimeoutException e) { throw new RuntimeException(e); } checkedAllServices = true; } try { responseObserver.onNext(processDiscover(request)); responseObserver.onCompleted(); } catch (RuntimeException exn) { responseObserver.onNext( ExpansionApi.DiscoverSchemaTransformResponse.newBuilder() .setError(Throwables.getStackTraceAsString(exn)) .build()); responseObserver.onCompleted(); } }
@Test public void testObserverMultipleEndpointsReturn() { ExpansionServiceClient expansionServiceClient = Mockito.mock(ExpansionServiceClient.class); Mockito.when(clientFactory.getExpansionServiceClient(Mockito.any())) .thenReturn(expansionServiceClient); Mockito.when(expansionServiceClient.discover(Mockito.any())) .thenReturn( DiscoverSchemaTransformResponse.newBuilder() .putSchemaTransformConfigs( "schematransform_key_1", SchemaTransformConfig.newBuilder().build()) .build()) .thenReturn( DiscoverSchemaTransformResponse.newBuilder() .putSchemaTransformConfigs( "schematransform_key_2", SchemaTransformConfig.newBuilder().build()) .build()); DiscoverSchemaTransformRequest request = DiscoverSchemaTransformRequest.newBuilder().build(); StreamObserver<DiscoverSchemaTransformResponse> responseObserver = Mockito.mock(StreamObserver.class); expansionService.discoverSchemaTransform(request, responseObserver); Mockito.verify(expansionServiceClient, Mockito.times(2)).discover(request); ArgumentCaptor<DiscoverSchemaTransformResponse> discoverResponseCapture = ArgumentCaptor.forClass(DiscoverSchemaTransformResponse.class); Mockito.verify(responseObserver).onNext(discoverResponseCapture.capture()); assertEquals(2, discoverResponseCapture.getValue().getSchemaTransformConfigsCount()); assertTrue( discoverResponseCapture .getValue() .getSchemaTransformConfigsMap() .containsKey("schematransform_key_1")); assertTrue( discoverResponseCapture .getValue() .getSchemaTransformConfigsMap() .containsKey("schematransform_key_2")); }
@ShellMethod(key = "show restores", value = "List all restore instants") public String showRestores( @ShellOption(value = {"--limit"}, help = "Limit #rows to be displayed", defaultValue = "10") Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly, @ShellOption(value = {"--includeInflights"}, help = "Also list restores that are in flight", defaultValue = "false") final boolean includeInflights) { HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline(); List<HoodieInstant> restoreInstants = getRestoreInstants(activeTimeline, includeInflights); final List<Comparable[]> outputRows = new ArrayList<>(); for (HoodieInstant restoreInstant : restoreInstants) { populateOutputFromRestoreInstant(restoreInstant, outputRows, activeTimeline); } TableHeader header = createResultHeader(); return HoodiePrintHelper.print(header, new HashMap<>(), sortByField, descending, limit, headerOnly, outputRows); }
@Test public void testShowRestores() { Object result = shell.evaluate(() -> "show restores"); assertTrue(ShellEvaluationResultUtil.isSuccess(result)); // get restored instants HoodieActiveTimeline activeTimeline = HoodieCLI.getTableMetaClient().getActiveTimeline(); Stream<HoodieInstant> restores = activeTimeline.getRestoreTimeline().filterCompletedInstants().getInstantsAsStream(); List<Comparable[]> rows = new ArrayList<>(); restores.sorted().forEach(instant -> { try { HoodieRestoreMetadata metadata = TimelineMetadataUtils .deserializeAvroMetadata(activeTimeline.getInstantDetails(instant).get(), HoodieRestoreMetadata.class); metadata.getInstantsToRollback().forEach(c -> { Comparable[] row = new Comparable[4]; row[0] = metadata.getStartRestoreTime(); row[1] = c; row[2] = metadata.getTimeTakenInMillis(); row[3] = HoodieInstant.State.COMPLETED.toString(); rows.add(row); }); } catch (IOException e) { e.printStackTrace(); } }); TableHeader header = new TableHeader() .addTableHeaderField(HoodieTableHeaderFields.HEADER_INSTANT) .addTableHeaderField(HoodieTableHeaderFields.HEADER_RESTORE_INSTANT) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TIME_TOKEN_MILLIS) .addTableHeaderField(HoodieTableHeaderFields.HEADER_RESTORE_STATE); String expected = HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(result.toString()); assertEquals(expected, got); }
private static void serialization(XmlGenerator gen, SerializationConfig serialization) { gen.open("serialization") .node("portable-version", serialization.getPortableVersion()) .node("use-native-byte-order", serialization.isUseNativeByteOrder()) .node("byte-order", serialization.getByteOrder()) .node("enable-compression", serialization.isEnableCompression()) .node("enable-shared-object", serialization.isEnableSharedObject()) .node("allow-unsafe", serialization.isAllowUnsafe()) .node("allow-override-default-serializers", serialization.isAllowOverrideDefaultSerializers()) .node("check-class-def-errors", serialization.isCheckClassDefErrors()); Map<Integer, String> dsfClasses = serialization.getDataSerializableFactoryClasses(); Map<Integer, DataSerializableFactory> dsfImpls = serialization.getDataSerializableFactories(); if (!dsfClasses.isEmpty() || !dsfImpls.isEmpty()) { gen.open("data-serializable-factories"); for (Map.Entry<Integer, String> entry : dsfClasses.entrySet()) { gen.node("data-serializable-factory", entry.getValue(), "factory-id", entry.getKey()); } for (Map.Entry<Integer, DataSerializableFactory> entry : dsfImpls.entrySet()) { gen.node("data-serializable-factory", entry.getValue().getClass().getName(), "factory-id", entry.getKey()); } gen.close(); } Map<Integer, String> portableClasses = serialization.getPortableFactoryClasses(); Map<Integer, PortableFactory> portableImpls = serialization.getPortableFactories(); if (!portableClasses.isEmpty() || !portableImpls.isEmpty()) { gen.open("portable-factories"); for (Map.Entry<Integer, String> entry : portableClasses.entrySet()) { gen.node("portable-factory", entry.getValue(), "factory-id", entry.getKey()); } for (Map.Entry<Integer, PortableFactory> entry : portableImpls.entrySet()) { gen.node("portable-factory", entry.getValue().getClass().getName(), "factory-id", entry.getKey()); } gen.close(); } serializers(gen, serialization); ConfigXmlGeneratorHelper.compactSerialization(gen, serialization.getCompactSerializationConfig()); //close serialization gen.close(); }
@Test public void serialization() { SerializationConfig expected = buildSerializationConfig() .addSerializerConfig(new SerializerConfig() .setClassName(TestSerializer.class.getName()).setTypeClassName(TestType.class.getName())); clientConfig.setSerializationConfig(expected); SerializationConfig actual = newConfigViaGenerator().getSerializationConfig(); assertEquals(expected.getPortableVersion(), actual.getPortableVersion()); assertEquals(expected.isUseNativeByteOrder(), actual.isUseNativeByteOrder()); assertEquals(expected.getByteOrder(), actual.getByteOrder()); assertEquals(expected.isEnableCompression(), actual.isEnableCompression()); assertEquals(expected.isEnableSharedObject(), actual.isEnableSharedObject()); assertEquals(expected.isAllowUnsafe(), actual.isAllowUnsafe()); assertEquals(expected.isAllowOverrideDefaultSerializers(), actual.isAllowOverrideDefaultSerializers()); assertEquals(expected.isCheckClassDefErrors(), actual.isCheckClassDefErrors()); assertEquals(expected.getGlobalSerializerConfig(), actual.getGlobalSerializerConfig()); assertCollection(expected.getSerializerConfigs(), actual.getSerializerConfigs()); assertMap(expected.getDataSerializableFactoryClasses(), actual.getDataSerializableFactoryClasses()); assertMap(expected.getPortableFactoryClasses(), actual.getPortableFactoryClasses()); }
@Override public List<String> assignSegment(String segmentName, Map<String, Map<String, String>> currentAssignment, InstancePartitions instancePartitions, InstancePartitionsType instancePartitionsType) { int numPartitions = instancePartitions.getNumPartitions(); checkReplication(instancePartitions, _replication, _tableName); int partitionId; if (_partitionColumn == null || numPartitions == 1) { partitionId = 0; } else { // Uniformly spray the segment partitions over the instance partitions if (_tableConfig.getTableType() == TableType.OFFLINE) { partitionId = SegmentAssignmentUtils .getOfflineSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } else { partitionId = SegmentAssignmentUtils .getRealtimeSegmentPartitionId(segmentName, _tableName, _helixManager, _partitionColumn) % numPartitions; } } return SegmentAssignmentUtils.assignSegmentWithReplicaGroup(currentAssignment, instancePartitions, partitionId); }
@Test public void testBootstrapTableWithPartition() { Map<String, Map<String, String>> currentAssignment = new TreeMap<>(); for (String segmentName : SEGMENTS) { List<String> instancesAssigned = _segmentAssignmentWithPartition .assignSegment(segmentName, currentAssignment, _instancePartitionsMapWithPartition); currentAssignment .put(segmentName, SegmentAssignmentUtils.getInstanceStateMap(instancesAssigned, SegmentStateModel.ONLINE)); } // Bootstrap table should reassign all segments based on their alphabetical order within the partition RebalanceConfig rebalanceConfig = new RebalanceConfig(); rebalanceConfig.setBootstrap(true); Map<String, Map<String, String>> newAssignment = _segmentAssignmentWithPartition.rebalanceTable(currentAssignment, _instancePartitionsMapWithPartition, null, null, rebalanceConfig); assertEquals(newAssignment.size(), NUM_SEGMENTS); int numSegmentsPerPartition = NUM_SEGMENTS / NUM_PARTITIONS; String[][] partitionIdToSegmentsMap = new String[NUM_PARTITIONS][numSegmentsPerPartition]; for (int i = 0; i < NUM_SEGMENTS; i++) { partitionIdToSegmentsMap[i % NUM_PARTITIONS][i / NUM_PARTITIONS] = SEGMENTS.get(i); } String[][] partitionIdToSortedSegmentsMap = new String[NUM_PARTITIONS][numSegmentsPerPartition]; for (int i = 0; i < NUM_PARTITIONS; i++) { String[] sortedSegments = new String[numSegmentsPerPartition]; System.arraycopy(partitionIdToSegmentsMap[i], 0, sortedSegments, 0, numSegmentsPerPartition); Arrays.sort(sortedSegments); partitionIdToSortedSegmentsMap[i] = sortedSegments; } for (int i = 0; i < NUM_PARTITIONS; i++) { for (int j = 0; j < numSegmentsPerPartition; j++) { assertEquals(newAssignment.get(partitionIdToSortedSegmentsMap[i][j]), currentAssignment.get(partitionIdToSegmentsMap[i][j])); } } }
public static PlacementRule getPlacementRule(String ruleStr, Configuration conf) throws ClassNotFoundException { Class<? extends PlacementRule> ruleClass = Class.forName(ruleStr) .asSubclass(PlacementRule.class); LOG.info("Using PlacementRule implementation - " + ruleClass); return ReflectionUtils.newInstance(ruleClass, conf); }
@Test(expected = ClassNotFoundException.class) public void testGetNonExistRuleText() throws ClassNotFoundException { final String nonExist = "my.placement.Rule"; PlacementFactory.getPlacementRule(nonExist, null); }
public static int appendToLabel( final AtomicBuffer metaDataBuffer, final int counterId, final String value) { Objects.requireNonNull(metaDataBuffer); if (counterId < 0) { throw new IllegalArgumentException("counter id " + counterId + " is negative"); } final int maxCounterId = (metaDataBuffer.capacity() / CountersReader.METADATA_LENGTH) - 1; if (counterId > maxCounterId) { throw new IllegalArgumentException( "counter id " + counterId + " out of range: 0 - maxCounterId=" + maxCounterId); } final int counterMetaDataOffset = CountersReader.metaDataOffset(counterId); final int state = metaDataBuffer.getIntVolatile(counterMetaDataOffset); if (CountersReader.RECORD_ALLOCATED != state) { throw new IllegalArgumentException("counter id " + counterId + " is not allocated, state: " + state); } final int existingLabelLength = metaDataBuffer.getInt(counterMetaDataOffset + CountersReader.LABEL_OFFSET); final int remainingLabelLength = CountersReader.MAX_LABEL_LENGTH - existingLabelLength; final int writtenLength = metaDataBuffer.putStringWithoutLengthAscii( counterMetaDataOffset + CountersReader.LABEL_OFFSET + SIZE_OF_INT + existingLabelLength, value, 0, remainingLabelLength); if (writtenLength > 0) { metaDataBuffer.putIntOrdered( counterMetaDataOffset + CountersReader.LABEL_OFFSET, existingLabelLength + writtenLength); } return writtenLength; }
@Test void appendToLabelIsANoOpIfThereIsNoSpaceInTheLabel() { final CountersManager countersManager = new CountersManager( new UnsafeBuffer(new byte[CountersReader.METADATA_LENGTH]), new UnsafeBuffer(ByteBuffer.allocateDirect(CountersReader.COUNTER_LENGTH)), StandardCharsets.US_ASCII); final String label = Tests.generateStringWithSuffix("", "a", CountersReader.MAX_LABEL_LENGTH); final int counterId = countersManager.allocate(label); final int length = AeronCounters.appendToLabel(countersManager.metaDataBuffer(), counterId, "test"); assertEquals(0, length); assertEquals(label, countersManager.getCounterLabel(counterId)); }
public static Map<String, String> getTags(String... keyValue) { if ((keyValue.length % 2) != 0) throw new IllegalArgumentException("keyValue needs to be specified in pairs"); Map<String, String> tags = new LinkedHashMap<>(keyValue.length / 2); for (int i = 0; i < keyValue.length; i += 2) tags.put(keyValue[i], keyValue[i + 1]); return tags; }
@Test public void testCreatingTagsWithOddNumberOfTags() { assertThrows(IllegalArgumentException.class, () -> MetricsUtils.getTags("k1", "v1", "k2", "v2", "extra")); }
public IpPortBasedClient getClient() { return client; }
@Test void testGetClient() { assertNotNull(healthCheckTaskV2.getClient()); }
public static int sampled(boolean sampled, int flags) { if (sampled) { flags |= FLAG_SAMPLED | FLAG_SAMPLED_SET; } else { flags |= FLAG_SAMPLED_SET; flags &= ~FLAG_SAMPLED; } return flags; }
@Test void set_sampled_false() { assertThat(sampled(false, FLAG_SAMPLED_SET | FLAG_SAMPLED)) .isEqualTo(FLAG_SAMPLED_SET); }
@Override public int ncol() { return n; }
@Test public void testNcols() { System.out.println("ncol"); assertEquals(3, matrix.ncol()); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesIntegerUsingJavaTypeIntegerPrimitive() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "integer"); objectNode.put("existingJavaType", "int"); when(config.isUsePrimitives()).thenReturn(false); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is("int")); }
public static SerdeFeatures buildKeyFeatures( final LogicalSchema schema, final Format keyFormat ) { return buildKeyFeatures(keyFormat, schema.key().size() == 1); }
@Test public void shouldNotSetUnwrappedKeysIfKeyFormatsSupportsOnlyWrapping() { // When: final SerdeFeatures result = SerdeFeaturesFactory.buildKeyFeatures( SINGLE_FIELD_SCHEMA, PROTOBUF ); // Then: assertThat(result.findAny(SerdeFeatures.WRAPPING_FEATURES), is(Optional.empty())); }
@Override public List<String> getAllTableNames(String dbName) { return client.getAllTableNames(dbName); }
@Test public void testGetAllTableNames() { HiveMetaClient client = new MockedHiveMetaClient(); HiveMetastore metastore = new HiveMetastore(client, "xxx", MetastoreType.HMS); List<String> databaseNames = metastore.getAllTableNames("xxx"); Assert.assertEquals(Lists.newArrayList("table1", "table2"), databaseNames); }
public static Metric metric(String name) { return MetricsImpl.metric(name, Unit.COUNT); }
@Test public void metricsAreRestartedIfPipelineIsRunTwice() { pipeline.readFrom(TestSources.items(0L, 1L, 2L, 3L, 4L)) .filter(l -> { Metrics.metric("total").increment(); return true; }) .writeTo(Sinks.noop()); Job job = runPipeline(pipeline.toDag()); new JobMetricsChecker(job).assertSummedMetricValue("total", 5); job = runPipeline(pipeline.toDag()); new JobMetricsChecker(job).assertSummedMetricValue("total", 5); }
@Override public String convertDestination(ProtocolConverter converter, Destination d) { if (d == null) { return null; } ActiveMQDestination activeMQDestination = (ActiveMQDestination)d; String physicalName = activeMQDestination.getPhysicalName(); String rc = converter.getCreatedTempDestinationName(activeMQDestination); if( rc!=null ) { return rc; } StringBuilder buffer = new StringBuilder(); if (activeMQDestination.isQueue()) { if (activeMQDestination.isTemporary()) { buffer.append("/remote-temp-queue/"); } else { buffer.append("/queue/"); } } else { if (activeMQDestination.isTemporary()) { buffer.append("/remote-temp-topic/"); } else { buffer.append("/topic/"); } } buffer.append(physicalName); return buffer.toString(); }
@Test(timeout = 10000) public void testConvertRemoteTempQueue() throws Exception { ActiveMQDestination destination = translator.convertDestination(converter, "/remote-temp-queue/test", false); assertFalse(destination.isComposite()); assertEquals("test", destination.getPhysicalName()); assertEquals(ActiveMQDestination.TEMP_QUEUE_TYPE, destination.getDestinationType()); }
public <T> Mono<CosmosItemResponse<T>> upsertItem( final T item, final PartitionKey partitionKey, final CosmosItemRequestOptions itemRequestOptions) { CosmosDbUtils.validateIfParameterIsNotEmpty(item, PARAM_ITEM); CosmosDbUtils.validateIfParameterIsNotEmpty(partitionKey, PARAM_PARTITION_KEY); return applyToContainer(container -> container.upsertItem(item, partitionKey, itemRequestOptions)); }
@Test void upsertItem() { final CosmosDbContainerOperations operations = new CosmosDbContainerOperations(Mono.just(mock(CosmosAsyncContainer.class))); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.upsertItem(null, null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.upsertItem("", null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.upsertItem("tes", null, null)); }
public static List<StreamedRow> toRows( final Buffer buff, final Function<StreamedRow, StreamedRow> addHostInfo ) { final List<StreamedRow> rows = new ArrayList<>(); int begin = 0; for (int i = 0; i <= buff.length(); i++) { if ((i == buff.length() && (i - begin > 1)) || (i < buff.length() && buff.getByte(i) == (byte) '\n')) { if (begin != i) { // Ignore random newlines - the server can send these final Buffer sliced = buff.slice(begin, i); final Buffer tidied = toJsonMsg(sliced, true); if (tidied.length() > 0) { final StreamedRow row = deserialize(tidied, StreamedRow.class); rows.add(addHostInfo.apply(row)); } } begin = i + 1; } } return rows; }
@Test public void toRows() { // When: final List<StreamedRow> rows = KsqlTargetUtil.toRows(Buffer.buffer( "[{\"header\":{\"queryId\":\"query_id_10\",\"schema\":\"`col1` STRING\"}},\n" + "{\"row\":{\"columns\":[\"Row1\"]}},\n" + "{\"row\":{\"columns\":[\"Row2\"]}},\n"), Functions.identity()); // Then: assertThat(rows.size(), is(3)); final StreamedRow row = rows.get(0); assertThat(row.getHeader().isPresent(), is(true)); assertThat(row.getHeader().get().getQueryId().toString(), is("query_id_10")); assertThat(row.getHeader().get().getSchema().key(), is(Collections.emptyList())); assertThat(row.getHeader().get().getSchema().value().size(), is(1)); assertThat(row.getHeader().get().getSchema().value().get(0), is (Column.of(ColumnName.of("col1"), SqlTypes.STRING, Namespace.VALUE, 0))); final StreamedRow row2 = rows.get(1); assertThat(row2.getRow().isPresent(), is(true)); assertThat(row2.getRow().get().getColumns(), is(ImmutableList.of("Row1"))); final StreamedRow row3 = rows.get(2); assertThat(row3.getRow().isPresent(), is(true)); assertThat(row3.getRow().get().getColumns(), is(ImmutableList.of("Row2"))); }
@Override public void run() { try { // We kill containers until the kernel reports the OOM situation resolved // Note: If the kernel has a delay this may kill more than necessary while (true) { String status = cgroups.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL); if (!status.contains(CGroupsHandler.UNDER_OOM)) { break; } boolean containerKilled = killContainer(); if (!containerKilled) { // This can happen, if SIGKILL did not clean up // non-PGID or containers or containers launched by other users // or if a process was put to the root YARN cgroup. throw new YarnRuntimeException( "Could not find any containers but CGroups " + "reserved for containers ran out of memory. " + "I am giving up"); } } } catch (ResourceHandlerException ex) { LOG.warn("Could not fetch OOM status. " + "This is expected at shutdown. Exiting.", ex); } }
@Test public void testKillOneLaterOpportunisticContainerUponOOM() throws Exception { ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<>(); int currentContainerId = 0; Container c1 = createContainer(currentContainerId++, false, 1, true); containers.put(c1.getContainerId(), c1); Container c2 = createContainer(currentContainerId++, false, 2, true); containers.put(c2.getContainerId(), c2); Container c3 = createContainer(currentContainerId++, true, 1, true); containers.put(c3.getContainerId(), c3); ContainerExecutor ex = createContainerExecutor(containers); Context context = mock(Context.class); when(context.getContainers()).thenReturn(containers); when(context.getContainerExecutor()).thenReturn(ex); CGroupsHandler cGroupsHandler = mock(CGroupsHandler.class); when(cGroupsHandler.getCGroupParam( CGroupsHandler.CGroupController.MEMORY, "", CGROUP_PARAM_MEMORY_OOM_CONTROL)) .thenReturn("under_oom 1") .thenReturn("under_oom 0"); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1234").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c1.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1235").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c2.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c3.getContainerId().toString(), CGROUP_PROCS_FILE)) .thenReturn("1236").thenReturn(""); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_USAGE_BYTES)) .thenReturn(getMB(9)); when(cGroupsHandler.getCGroupParam(CGroupsHandler.CGroupController.MEMORY, c3.getContainerId().toString(), CGROUP_PARAM_MEMORY_MEMSW_USAGE_BYTES)) .thenReturn(getMB(9)); DefaultOOMHandler handler = new DefaultOOMHandler(context, false) { @Override protected CGroupsHandler getCGroupsHandler() { return cGroupsHandler; } }; handler.run(); verify(ex, times(1)).signalContainer( new ContainerSignalContext.Builder() .setPid("1235") .setContainer(c2) .setSignal(ContainerExecutor.Signal.KILL) .build() ); verify(ex, times(1)).signalContainer(any()); }
public static JsonSerialization<IOStatisticsSnapshot> serializer() { return new JsonSerialization<>(IOStatisticsSnapshot.class, false, true); }
@Test public void testJsonRoundTrip() throws Throwable { JsonSerialization<IOStatisticsSnapshot> serializer = IOStatisticsSnapshot.serializer(); String json = serializer.toJson(snapshot); LOG.info("serialized form\n{}", json); IOStatisticsSnapshot deser = serializer.fromJson(json); verifyDeserializedInstance(deser); }
public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); }
@Test public void testReadFullyIfEofIsReached() throws IOException { final FileChannel channelMock = mock(FileChannel.class); final int bufferSize = 100; final String fileChannelContent = "abcdefghkl"; ByteBuffer buffer = ByteBuffer.allocate(bufferSize); when(channelMock.read(any(), anyLong())).then(invocation -> { ByteBuffer bufferArg = invocation.getArgument(0); bufferArg.put(fileChannelContent.getBytes()); return -1; }); Utils.readFully(channelMock, buffer, 0L); assertEquals("abcdefghkl", new String(buffer.array(), 0, buffer.position())); assertEquals(fileChannelContent.length(), buffer.position()); assertTrue(buffer.hasRemaining()); verify(channelMock, atLeastOnce()).read(any(), anyLong()); }
public static int ceilDiv(int x, int y) { return -Math.floorDiv(-x, y); }
@Test public void testCeilDiv() { Assert.assertEquals(MathUtils.ceilDiv(0, 1024), 0); Assert.assertEquals(MathUtils.ceilDiv(1, 1024), 1); Assert.assertEquals(MathUtils.ceilDiv(1023, 1024), 1); Assert.assertEquals(MathUtils.ceilDiv(1024, 1024), 1); Assert.assertEquals(MathUtils.ceilDiv(1025, 1024), 2); Assert.assertEquals(MathUtils.ceilDiv(0, Integer.MAX_VALUE), 0); Assert.assertEquals(MathUtils.ceilDiv(1, Integer.MAX_VALUE), 1); Assert.assertEquals(MathUtils.ceilDiv(Integer.MAX_VALUE - 1, Integer.MAX_VALUE), 1); Assert.assertEquals(MathUtils.ceilDiv(Integer.MAX_VALUE, Integer.MAX_VALUE), 1); }
public void update(int value) { update((long) value); }
@Test public void updatesTheReservoir() throws Exception { histogram.update(1); verify(reservoir).update(1); }
static String getAbbreviation(Exception ex, Integer statusCode, String storageErrorMessage) { String result = null; for (RetryReasonCategory retryReasonCategory : rankedReasonCategories) { final String abbreviation = retryReasonCategory.captureAndGetAbbreviation(ex, statusCode, storageErrorMessage); if (abbreviation != null) { result = abbreviation; } } return result; }
@Test public void testOperationLimitRetryReason() { Assertions.assertThat(RetryReason.getAbbreviation(null, HTTP_UNAVAILABLE, TPS_OVER_ACCOUNT_LIMIT.getErrorMessage())).isEqualTo( TPS_LIMIT_BREACH_ABBREVIATION ); }
@Override public void onHeartbeatSuccess(ShareGroupHeartbeatResponseData response) { if (response.errorCode() != Errors.NONE.code()) { String errorMessage = String.format( "Unexpected error in Heartbeat response. Expected no error, but received: %s", Errors.forCode(response.errorCode()) ); throw new IllegalArgumentException(errorMessage); } MemberState state = state(); if (state == MemberState.LEAVING) { log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} is " + "already leaving the group.", memberId, memberEpoch); return; } if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; } if (isNotInGroup()) { log.debug("Ignoring heartbeat response received from broker. Member {} is in {} state" + " so it's not a member of the group. ", memberId, state); return; } // Update the group member id label in the client telemetry reporter if the member id has // changed. Initially the member id is empty, and it is updated when the member joins the // group. This is done here to avoid updating the label on every heartbeat response. Also // check if the member id is null, as the schema defines it as nullable. if (response.memberId() != null && !response.memberId().equals(memberId)) { clientTelemetryReporter.ifPresent(reporter -> reporter.updateMetricsLabels( Collections.singletonMap(ClientTelemetryProvider.GROUP_MEMBER_ID, response.memberId()))); } this.memberId = response.memberId(); updateMemberEpoch(response.memberEpoch()); ShareGroupHeartbeatResponseData.Assignment assignment = response.assignment(); if (assignment != null) { if (!state.canHandleNewAssignment()) { // New assignment received but member is in a state where it cannot take new // assignments (ex. preparing to leave the group) log.debug("Ignoring new assignment {} received from server because member is in {} state.", assignment, state); return; } Map<Uuid, SortedSet<Integer>> newAssignment = new HashMap<>(); assignment.topicPartitions().forEach(topicPartition -> newAssignment.put(topicPartition.topicId(), new TreeSet<>(topicPartition.partitions()))); processAssignmentReceived(newAssignment); } }
@Test public void testRebalanceMetricsOnFailedRebalance() { ShareMembershipManager membershipManager = createMembershipManagerJoiningGroup(); ShareGroupHeartbeatResponse heartbeatResponse = createShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData.Assignment()); membershipManager.onHeartbeatSuccess(heartbeatResponse.data()); Uuid topicId = Uuid.randomUuid(); receiveAssignment(topicId, Arrays.asList(0, 1), membershipManager); // sleep for an arbitrary amount time.sleep(2300); assertTrue(rebalanceMetricsManager.rebalanceStarted()); membershipManager.onHeartbeatFailure(false); assertEquals(0d, getMetricValue(metrics, rebalanceMetricsManager.rebalanceTotal)); }
@Override public void deleteFileConfig(Long id) { // 校验存在 FileConfigDO config = validateFileConfigExists(id); if (Boolean.TRUE.equals(config.getMaster())) { throw exception(FILE_CONFIG_DELETE_FAIL_MASTER); } // 删除 fileConfigMapper.deleteById(id); // 清空缓存 clearCache(id, null); }
@Test public void testDeleteFileConfig_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> fileConfigService.deleteFileConfig(id), FILE_CONFIG_NOT_EXISTS); }
@Override protected String buildApiSuperPath(final Class<?> clazz, final ShenyuSofaClient beanShenyuClient) { if (Objects.nonNull(beanShenyuClient) && !StringUtils.isBlank(beanShenyuClient.path())) { return beanShenyuClient.path(); } return ""; }
@Test public void testBuildApiSuperPathWhenBeanShenyuClientPathIsEmpty() { Class<?> clazz = Class.class; given(shenyuSofaClient.path()).willReturn(""); String realSuperPath = sofaServiceEventListener.buildApiSuperPath(clazz, shenyuSofaClient); verify(shenyuSofaClient, times(1)).path(); assertEquals("", realSuperPath); }
public MultiSchemaInfo getTopicInfo(final String topicName) { final Set<SchemaInfo> keySchemasInfo = getTopicSchemas(topicName, IS_KEY_SCHEMA); final Set<SchemaInfo> valueSchemasInfo = getTopicSchemas(topicName, IS_VALUE_SCHEMA); return new MultiSchemaInfo(keySchemasInfo, valueSchemasInfo); }
@Test public void shouldThrowIfTopicNameIsNotFound() { // When final Exception e = assertThrows( IllegalArgumentException.class, () -> querySchemas.getTopicInfo("t1")); // Then assertThat(e.getMessage(), is("Unknown topic: t1")); }
static String headerLine(CSVFormat csvFormat) { return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader()); }
@Test public void givenQuoteModeAllNonNull_isNoop() { CSVFormat csvFormat = csvFormat().withNullString("N/A").withQuoteMode(QuoteMode.ALL_NON_NULL); PCollection<String> input = pipeline.apply( Create.of( headerLine(csvFormat), "\"a\",\"1\",N/A", "\"b\",\"2\",\"2.2\"", "\"c\",\"3\",\"3.3\"")); CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat); CsvIOParseResult<List<String>> result = input.apply(underTest); PAssert.that(result.getOutput()) .containsInAnyOrder( Arrays.asList( Arrays.asList("a", "1", null), Arrays.asList("b", "2", "2.2"), Arrays.asList("c", "3", "3.3"))); PAssert.that(result.getErrors()).empty(); pipeline.run(); }
public Optional<String> metadataLogDir() { return metadataLogDir; }
@Test public void testMetadataLogDirForFoo() { assertEquals(Optional.of("/tmp/dir4"), FOO.metadataLogDir()); }
@Override public Set<EmailRecipient> findSubscribedEmailRecipients(String dispatcherKey, String projectKey, SubscriberPermissionsOnProject subscriberPermissionsOnProject) { verifyProjectKey(projectKey); try (DbSession dbSession = dbClient.openSession(false)) { Set<EmailSubscriberDto> emailSubscribers = dbClient.propertiesDao().findEmailSubscribersForNotification( dbSession, dispatcherKey, EmailNotificationChannel.class.getSimpleName(), projectKey); return keepAuthorizedEmailSubscribers(dbSession, projectKey, subscriberPermissionsOnProject, emailSubscribers); } }
@Test public void findSubscribedEmailRecipients_does_not_call_db_for_project_permission_filtering_if_there_is_no_project_subscriber() { String dispatcherKey = randomAlphabetic(12); String globalPermission = randomAlphanumeric(4); String projectPermission = randomAlphanumeric(5); String projectKey = randomAlphabetic(6); Set<EmailSubscriberDto> subscribers = IntStream.range(0, 1 + new Random().nextInt(10)) .mapToObj(i -> EmailSubscriberDto.create("user" + i, true, "user" + i + "@sonarsource.com")) .collect(Collectors.toSet()); Set<String> logins = subscribers.stream().map(EmailSubscriberDto::getLogin).collect(Collectors.toSet()); when(propertiesDao.findEmailSubscribersForNotification(dbSession, dispatcherKey, "EmailNotificationChannel", projectKey)) .thenReturn(subscribers); when(authorizationDao.keepAuthorizedLoginsOnEntity(dbSession, logins, projectKey, globalPermission)) .thenReturn(logins); Set<EmailRecipient> emailRecipients = underTest.findSubscribedEmailRecipients(dispatcherKey, projectKey, new SubscriberPermissionsOnProject(globalPermission, projectPermission)); Set<EmailRecipient> expected = subscribers.stream().map(i -> new EmailRecipient(i.getLogin(), i.getEmail())).collect(Collectors.toSet()); assertThat(emailRecipients) .isEqualTo(expected); verify(authorizationDao, times(1)).keepAuthorizedLoginsOnEntity(eq(dbSession), anySet(), anyString(), eq(globalPermission)); verify(authorizationDao, times(0)).keepAuthorizedLoginsOnEntity(eq(dbSession), anySet(), anyString(), eq(projectPermission)); }
@Override public String execute(CommandContext commandContext, String[] args) { Channel channel = commandContext.getRemote(); if (ArrayUtils.isEmpty(args)) { return "Please input service name, eg: \r\ncd XxxService\r\ncd com.xxx.XxxService"; } String message = args[0]; StringBuilder buf = new StringBuilder(); if ("/".equals(message) || "..".equals(message)) { String service = channel.attr(SERVICE_KEY).getAndRemove(); buf.append("Cancelled default service ").append(service).append('.'); } else { boolean found = false; for (Exporter<?> exporter : dubboProtocol.getExporters()) { if (message.equals(exporter.getInvoker().getInterface().getSimpleName()) || message.equals(exporter.getInvoker().getInterface().getName()) || message.equals(exporter.getInvoker().getUrl().getPath()) || message.equals(exporter.getInvoker().getUrl().getServiceKey())) { found = true; break; } } if (found) { channel.attr(SERVICE_KEY).set(message); buf.append("Used the ") .append(message) .append(" as default.\r\nYou can cancel default service by command: cd /"); } else { buf.append("No such service ").append(message); } } return buf.toString(); }
@Test void testChangeSimpleName() { ExtensionLoader.getExtensionLoader(Protocol.class) .getExtension(DubboProtocol.NAME) .export(mockInvoker); String result = change.execute(mockCommandContext, new String[] {"DemoService"}); assertEquals("Used the DemoService as default.\r\nYou can cancel default service by command: cd /", result); }
protected boolean isNodeEmpty(JsonNode json) { if (json.isArray()) { return isListEmpty((ArrayNode) json); } else if (json.isObject()) { return isObjectEmpty((ObjectNode) json); } else { return isEmptyText(json); } }
@Test public void isNodeEmpty_objectNodeWithTextNode() { ObjectNode objectNode = new ObjectNode(factory); objectNode.set("key", new TextNode(VALUE)); assertThat(expressionEvaluator.isNodeEmpty(objectNode)).isFalse(); }
@Override public void createService(String serviceName) throws NacosException { createService(serviceName, Constants.DEFAULT_GROUP); }
@Test void testCreateService1() throws NacosException { //given String serviceName = "service1"; //when nacosNamingMaintainService.createService(serviceName); //then verify(serverProxy, times(1)).createService(argThat(new ArgumentMatcher<Service>() { @Override public boolean matches(Service service) { return service.getName().equals(serviceName) && service.getGroupName().equals(Constants.DEFAULT_GROUP) && Math.abs(service.getProtectThreshold() - Constants.DEFAULT_PROTECT_THRESHOLD) < 0.1f && service.getMetadata().size() == 0; } }), argThat(o -> o instanceof NoneSelector)); }
public MessageType convert(Schema avroSchema) { if (!avroSchema.getType().equals(Schema.Type.RECORD)) { throw new IllegalArgumentException("Avro schema must be a record."); } return new MessageType(avroSchema.getFullName(), convertFields(avroSchema.getFields(), "")); }
@Test public void testDateType() throws Exception { Schema date = LogicalTypes.date().addToSchema(Schema.create(INT)); Schema expected = Schema.createRecord( "myrecord", null, null, false, Arrays.asList(new Schema.Field("date", date, null, null))); testRoundTripConversion(expected, "message myrecord {\n" + " required int32 date (DATE);\n" + "}\n"); for (PrimitiveTypeName primitive : new PrimitiveTypeName[] {INT64, INT96, FLOAT, DOUBLE, BOOLEAN, BINARY, FIXED_LEN_BYTE_ARRAY}) { final PrimitiveType type; if (primitive == FIXED_LEN_BYTE_ARRAY) { type = new PrimitiveType(REQUIRED, primitive, 12, "test", DATE); } else { type = new PrimitiveType(REQUIRED, primitive, "test", DATE); } assertThrows( "Should not allow TIME_MICROS with " + primitive, IllegalArgumentException.class, () -> new AvroSchemaConverter().convert(message(type))); } }
public Map<NamespaceBundle, OwnedBundle> getOwnedBundles() { return this.ownedBundlesCache.synchronous().asMap(); }
@Test public void testConstructor() { OwnershipCache cache = new OwnershipCache(this.pulsar, nsService); assertNotNull(cache); assertNotNull(cache.getOwnedBundles()); }
@POST @ZeppelinApi public Response createNote(String message) throws IOException { String user = authenticationService.getPrincipal(); LOGGER.info("Creating new note by JSON {}", message); NewNoteRequest request = GSON.fromJson(message, NewNoteRequest.class); String defaultInterpreterGroup = request.getDefaultInterpreterGroup(); if (StringUtils.isBlank(defaultInterpreterGroup)) { defaultInterpreterGroup = zConf.getString(ZeppelinConfiguration.ConfVars.ZEPPELIN_INTERPRETER_GROUP_DEFAULT); } String noteId = notebookService.createNote( request.getName(), defaultInterpreterGroup, request.getAddingEmptyParagraph(), getServiceContext(), new RestServiceCallback<>()); return notebook.processNote(noteId, note -> { AuthenticationInfo subject = new AuthenticationInfo(authenticationService.getPrincipal()); if (request.getParagraphs() != null) { for (NewParagraphRequest paragraphRequest : request.getParagraphs()) { Paragraph p = note.addNewParagraph(subject); initParagraph(p, paragraphRequest, user); } } return new JsonResponse<>(Status.OK, "", note.getId()).build(); }); }
@Test void testRunParagraphJob() throws Exception { LOG.info("Running testRunParagraphJob"); String note1Id = null; try { note1Id = notebook.createNote("note1", anonymous); Paragraph p = notebook.processNote(note1Id, note1 -> { return note1.addNewParagraph(AuthenticationInfo.ANONYMOUS); }); // run blank paragraph CloseableHttpResponse post = httpPost("/notebook/job/" + note1Id + "/" + p.getId(), ""); assertThat(post, isAllowed()); Map<String, Object> resp = gson.fromJson(EntityUtils.toString(post.getEntity(), StandardCharsets.UTF_8), new TypeToken<Map<String, Object>>() {}.getType()); assertEquals("OK", resp.get("status")); post.close(); p.waitUntilFinished(); assertEquals(Job.Status.FINISHED, p.getStatus()); // run non-blank paragraph p.setText("%python \n print(\"hello"); post = httpPost("/notebook/job/" + note1Id + "/" + p.getId(), ""); assertThat(post, isAllowed()); resp = gson.fromJson(EntityUtils.toString(post.getEntity(), StandardCharsets.UTF_8), new TypeToken<Map<String, Object>>() {}.getType()); assertEquals("OK", resp.get("status")); post.close(); p.waitUntilFinished(); assertNotEquals(Job.Status.FINISHED, p.getStatus()); } finally { // cleanup if (null != note1Id) { notebook.removeNote(note1Id, anonymous); } } }
public static Containerizer from( CommonCliOptions commonCliOptions, ConsoleLogger logger, CacheDirectories cacheDirectories) throws InvalidImageReferenceException, FileNotFoundException { Containerizer containerizer = create(commonCliOptions, logger); applyHandlers(containerizer, logger); applyConfiguration(containerizer, commonCliOptions, cacheDirectories); return containerizer; }
@Test public void testApplyConfiguration_withValues() throws InvalidImageReferenceException, CacheDirectoryCreationException, FileNotFoundException { CommonCliOptions commonCliOptions = CommandLine.populateCommand( new CommonCliOptions(), "-t=test-image-ref", "--send-credentials-over-http", "--allow-insecure-registries", "--additional-tags=tag1,tag2", "--serialize"); ContainerizerTestProxy containerizer = new ContainerizerTestProxy( Containerizers.from(commonCliOptions, consoleLogger, cacheDirectories)); assertThat(Boolean.getBoolean(JibSystemProperties.SEND_CREDENTIALS_OVER_HTTP)).isTrue(); assertThat(Boolean.getBoolean(JibSystemProperties.SERIALIZE)).isTrue(); assertThat(containerizer.getAllowInsecureRegistries()).isTrue(); assertThat(containerizer.getBaseImageLayersCacheDirectory()).isEqualTo(baseImageCache); assertThat(containerizer.getApplicationsLayersCacheDirectory()).isEqualTo(applicationCache); assertThat(containerizer.getAdditionalTags()).isEqualTo(ImmutableSet.of("tag1", "tag2")); }
public <T> T convert(String property, Class<T> targetClass) { final AbstractPropertyConverter<?> converter = converterRegistry.get(targetClass); if (converter == null) { throw new MissingFormatArgumentException("converter not found, can't convert from String to " + targetClass.getCanonicalName()); } return (T) converter.convert(property); }
@Test void testConvertBooleanFalse() { assertFalse(compositeConverter.convert("false", Boolean.class)); assertFalse(compositeConverter.convert("off", Boolean.class)); assertFalse(compositeConverter.convert("no", Boolean.class)); assertFalse(compositeConverter.convert("0", Boolean.class)); }
@Override public void process(Exchange exchange) throws Exception { String operation = getOperation(exchange); switch (operation) { case GlanceConstants.RESERVE: doReserve(exchange); break; case OpenstackConstants.CREATE: doCreate(exchange); break; case OpenstackConstants.UPDATE: doUpdate(exchange); break; case GlanceConstants.UPLOAD: doUpload(exchange); break; case OpenstackConstants.GET: doGet(exchange); break; case OpenstackConstants.GET_ALL: doGetAll(exchange); break; case OpenstackConstants.DELETE: doDelete(exchange); break; default: throw new IllegalArgumentException("Unsupported operation " + operation); } }
@Test public void createTest() throws Exception { msg.setHeader(OpenstackConstants.OPERATION, OpenstackConstants.CREATE); msg.setHeader(OpenstackConstants.NAME, dummyImage.getName()); msg.setHeader(GlanceConstants.OWNER, dummyImage.getOwner()); msg.setHeader(GlanceConstants.MIN_DISK, dummyImage.getMinDisk()); msg.setHeader(GlanceConstants.MIN_RAM, dummyImage.getMinRam()); msg.setHeader(GlanceConstants.CHECKSUM, dummyImage.getChecksum()); msg.setHeader(GlanceConstants.DISK_FORMAT, dummyImage.getDiskFormat()); msg.setHeader(GlanceConstants.CONTAINER_FORMAT, dummyImage.getContainerFormat()); final InputStream is = new FileInputStream(File.createTempFile("image", ".iso")); msg.setBody(is); producer.process(exchange); verify(imageService).create(imageCaptor.capture(), payloadCaptor.capture()); assertEquals(is, payloadCaptor.getValue().open()); final Image result = msg.getBody(Image.class); assertNotNull(result.getId()); assertEqualsImages(dummyImage, result); }
@Override @Deprecated public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doFlatTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullValueTransformerWithKeySupplierOnFlatTransformValues() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransformValues((ValueTransformerWithKeySupplier<Object, Object, Iterable<Object>>) null)); assertThat(exception.getMessage(), equalTo("valueTransformerSupplier can't be null")); }
static void quoteExternalName(StringBuilder sb, String externalName) { List<String> parts = splitByNonQuotedDots(externalName); for (int i = 0; i < parts.size(); i++) { String unescaped = unescapeQuotes(parts.get(i)); String unquoted = unquoteIfQuoted(unescaped); DIALECT.quoteIdentifier(sb, unquoted); if (i < parts.size() - 1) { sb.append("."); } } }
@Test public void quoteExternalName_with3Quotes() { String externalName = "\"catalog\".\"custom_schema\".\"my_table\""; StringBuilder sb = new StringBuilder(); MappingHelper.quoteExternalName(sb, externalName); assertThat(sb.toString()).isEqualTo("\"catalog\".\"custom_schema\".\"my_table\""); }
public ZKClientConfig toConfig(Path configFile) throws IOException, QuorumPeerConfig.ConfigException { String configString = toConfigString(); Files.createDirectories(configFile.getParent()); Path tempFile = Files.createTempFile(configFile.toAbsolutePath().getParent(), "." + configFile.getFileName(), ".tmp"); Files.writeString(tempFile, configString); Files.move(tempFile, configFile, StandardCopyOption.ATOMIC_MOVE); return new ZKClientConfig(configFile.toString()); }
@Test void config_when_using_system_tls_context() { ZkClientConfigBuilder builder = new ZkClientConfigBuilder(new MockTlsContext()); ZKClientConfig config = builder.toConfig(); assertEquals("true", config.getProperty(CLIENT_SECURE_PROPERTY)); assertEquals("org.apache.zookeeper.ClientCnxnSocketNetty", config.getProperty(CLIENT_CONNECTION_SOCKET)); assertEquals("TLSv1.3", config.getProperty(SSL_ENABLED_PROTOCOLS_PROPERTY)); assertEquals("TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", config.getProperty(SSL_ENABLED_CIPHERSUITES_PROPERTY)); assertEquals("NEED", config.getProperty(SSL_CLIENTAUTH_PROPERTY)); assertEquals(VespaSslContextProvider.class.getName(), config.getProperty(SSL_CONTEXT_SUPPLIER_CLASS_PROPERTY)); }
public static boolean isSystem(String topic, String group) { return TopicValidator.isSystemTopic(topic) || isSystemGroup(group); }
@Test public void testIsSystem_SystemTopicOrSystemGroup_ReturnsTrue() { String topic = "FooTopic"; String group = "FooGroup"; String systemTopic = TopicValidator.RMQ_SYS_TRANS_HALF_TOPIC; String systemGroup = MixAll.CID_RMQ_SYS_PREFIX + group; boolean resultTopic = BrokerMetricsManager.isSystem(systemTopic, group); assertThat(resultTopic).isTrue(); boolean resultGroup = BrokerMetricsManager.isSystem(topic, systemGroup); assertThat(resultGroup).isTrue(); }
@Udtf public <T> List<List<T>> cube(final List<T> columns) { if (columns == null) { return Collections.emptyList(); } return createAllCombinations(columns); }
@Test public void shouldHandleAllNulls() { // Given: final Object[] allNull = {null, null}; // When: final List<List<Object>> result = cubeUdtf.cube(Arrays.asList(allNull)); // Then: assertThat(result.size(), is(1)); assertThat(result.get(0), is(Arrays.asList(null, null))); }
public static long subtractClamped(long a, long b) { long diff = a - b; return diffHadOverflow(a, b, diff) ? (a >= 0 ? Long.MAX_VALUE : Long.MIN_VALUE) : diff; }
@Test public void when_subtractClamped_then_doesNotOverflow() { // no overflow assertEquals(0, subtractClamped(0, 0)); assertEquals(1, subtractClamped(1, 0)); assertEquals(-1, subtractClamped(-1, 0)); assertEquals(0, subtractClamped(Long.MAX_VALUE, Long.MAX_VALUE)); assertEquals(0, subtractClamped(Long.MIN_VALUE, Long.MIN_VALUE)); // overflow over MAX_VALUE assertEquals(Long.MAX_VALUE, subtractClamped(Long.MAX_VALUE, -1)); assertEquals(Long.MAX_VALUE, subtractClamped(Long.MAX_VALUE, Long.MIN_VALUE)); // overflow over MIN_VALUE assertEquals(Long.MIN_VALUE, subtractClamped(Long.MIN_VALUE, 1)); assertEquals(Long.MIN_VALUE, subtractClamped(Long.MIN_VALUE, Long.MAX_VALUE)); }
static ApplicationHistoryServer launchAppHistoryServer(String[] args) { Thread .setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(ApplicationHistoryServer.class, args, LOG); ApplicationHistoryServer appHistoryServer = null; try { appHistoryServer = new ApplicationHistoryServer(); ShutdownHookManager.get().addShutdownHook( new CompositeServiceShutdownHook(appHistoryServer), SHUTDOWN_HOOK_PRIORITY); YarnConfiguration conf = new YarnConfiguration(); new GenericOptionsParser(conf, args); appHistoryServer.init(conf); appHistoryServer.start(); } catch (Throwable t) { LOG.error("Error starting ApplicationHistoryServer", t); ExitUtil.terminate(-1, "Error starting ApplicationHistoryServer"); } return appHistoryServer; }
@Test @Timeout(60000) void testLaunch() throws Exception { ExitUtil.disableSystemExit(); ApplicationHistoryServer historyServer = null; try { // Not able to modify the config of this test case, // but others have been customized to avoid conflicts historyServer = ApplicationHistoryServer.launchAppHistoryServer(new String[0]); } catch (ExitUtil.ExitException e) { assertEquals(0, e.status); ExitUtil.resetFirstExitException(); fail(); } finally { if (historyServer != null) { historyServer.stop(); } } }
public boolean hasAnyPendingRequests() { return client.hasInFlightRequests() || !unsentRequests.isEmpty(); }
@Test public void testHasAnyPendingRequests() throws Exception { try (NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate()) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); networkClientDelegate.add(unsentRequest); // unsent assertTrue(networkClientDelegate.hasAnyPendingRequests()); assertFalse(networkClientDelegate.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); networkClientDelegate.poll(0, time.milliseconds()); // in-flight assertTrue(networkClientDelegate.hasAnyPendingRequests()); assertTrue(networkClientDelegate.unsentRequests().isEmpty()); assertTrue(client.hasInFlightRequests()); client.respond(FindCoordinatorResponse.prepareResponse(Errors.NONE, GROUP_ID, mockNode())); networkClientDelegate.poll(0, time.milliseconds()); // get response assertFalse(networkClientDelegate.hasAnyPendingRequests()); assertTrue(networkClientDelegate.unsentRequests().isEmpty()); assertFalse(client.hasInFlightRequests()); } }
@SuppressFBWarnings( value = "EI_EXPOSE_REP", justification = "groupingExpressions is ImmutableList" ) public List<Expression> getGroupingExpressions() { return groupingExpressions; }
@Test public void shouldMaintainGroupByOrder() { // Given: final List<Expression> original = ImmutableList.of(exp1, exp2); final GroupBy groupBy = new GroupBy(Optional.empty(), original); // When: final List<Expression> result = groupBy.getGroupingExpressions(); // Then: assertThat(result, is(original)); }