focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public <T> void notifyReadyAsync(Callable<T> callable, BiConsumer<T, Throwable> handler) { workerExecutor.execute( () -> { try { T result = callable.call(); executorToNotify.execute(() -> handler.accept(result, null)); } catch (Throwable t) { executorToNotify.execute(() -> handler.accept(null, t)); } }); }
@Test public void testExceptionInHandlerWhenHandlingException() throws InterruptedException { Exception exception1 = new Exception("Expected exception."); RuntimeException exception2 = new RuntimeException("Expected exception."); CountDownLatch latch = new CountDownLatch(1); notifier.notifyReadyAsync( () -> { throw exception1; }, (v, e) -> { assertEquals(exception1, e); assertNull(v); latch.countDown(); throw exception2; }); latch.await(); // The uncaught exception handler may fire after the executor has shutdown. // We need to wait on the countdown latch here. exceptionInHandlerLatch.await(10000L, TimeUnit.MILLISECONDS); assertEquals(exception2, exceptionInHandler); }
@Override public RFuture<V> pollAsync(long timeout, TimeUnit unit) { if (timeout < 0) { return new CompletableFutureWrapper<>((V) null); } return commandExecutor.writeAsync(getRawName(), codec, RedisCommands.BLPOP_VALUE, getRawName(), toSeconds(timeout, unit)); }
@Test public void testPollAsyncCancel() { Config config = createConfig(); config.useSingleServer().setConnectionMinimumIdleSize(1).setConnectionPoolSize(1); RedissonClient redisson = Redisson.create(config); RBlockingQueue<Integer> queue1 = getQueue(redisson); for (int i = 0; i < 10; i++) { RFuture<Integer> f = queue1.pollAsync(1, TimeUnit.SECONDS); f.cancel(true); } assertThat(queue1.add(1)).isTrue(); assertThat(queue1.add(2)).isTrue(); assertThat(queue1.size()).isEqualTo(2); redisson.shutdown(); }
public static MethodDeclaration getMethodDeclaration(final String methodName, final Map<String, ClassOrInterfaceType> parameterNameTypeMap) { MethodDeclaration toReturn = getMethodDeclaration(methodName); NodeList<Parameter> typeParameters = new NodeList<>(); parameterNameTypeMap.forEach((parameterName, classOrInterfaceType) -> { Parameter toAdd = new Parameter(); toAdd.setName(parameterName); toAdd.setType(classOrInterfaceType); typeParameters.add(toAdd); }); toReturn.setParameters(typeParameters); return toReturn; }
@Test void getParamMethodDeclaration() { String methodName = "METHOD_NAME"; final Map<String, ClassOrInterfaceType> parameterNameTypeMap = new HashMap<>(); parameterNameTypeMap.put("stringParam", parseClassOrInterfaceType(String.class.getName())); parameterNameTypeMap.put("kiePMMLNameValueParam", parseClassOrInterfaceType(KiePMMLNameValue.class.getName())); parameterNameTypeMap.put("listParam", new ClassOrInterfaceType(null, new SimpleName(List.class.getName()), NodeList.nodeList(parseClassOrInterfaceType(KiePMMLNameValue.class.getName())))); MethodDeclaration retrieved = CommonCodegenUtils.getMethodDeclaration(methodName, parameterNameTypeMap); commonValidateMethodDeclaration(retrieved, methodName); commonValidateMethodDeclarationParams(retrieved, parameterNameTypeMap); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testGettersAnnotatedWithInconsistentJsonIgnoreValue() throws Exception { // Initial construction is valid. GetterWithJsonIgnore options = PipelineOptionsFactory.as(GetterWithJsonIgnore.class); expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Property [object] is marked with contradictory annotations. Found [" + "[JsonIgnore(value=false) on org.apache.beam.sdk.options.PipelineOptionsFactoryTest" + "$GetterWithInconsistentJsonIgnoreValue#getObject()], " + "[JsonIgnore(value=true) on org.apache.beam.sdk.options.PipelineOptionsFactoryTest" + "$GetterWithJsonIgnore#getObject()]]."); // When we attempt to convert, we should error at this moment. options.as(GetterWithInconsistentJsonIgnoreValue.class); }
@Override public <T> List<SearchResult<T>> search(SearchRequest request, Class<T> typeFilter) { SearchSession<T> session = new SearchSession<>(request, Collections.singleton(typeFilter)); if (request.inParallel()) { ForkJoinPool commonPool = ForkJoinPool.commonPool(); getProviderTasks(request, session).stream().map(commonPool::submit).forEach(ForkJoinTask::join); } else { getProviderTasks(request, session).forEach(Runnable::run); } return session.getResults(); }
@Test public void testElement() { GraphGenerator generator = GraphGenerator.build().generateTinyGraph(); SearchRequest request = buildRequest(GraphGenerator.FIRST_NODE, generator); List<Element> results = toList(controller.search(request, Element.class)); Assert.assertEquals(2, results.size()); Assert.assertSame(generator.getGraph().getNode(GraphGenerator.FIRST_NODE), results.get(0)); Assert.assertSame(generator.getGraph().getEdge(GraphGenerator.FIRST_EDGE), results.get(1)); }
public static SearchTypeError parse(Query query, String searchTypeId, ElasticsearchException ex) { if (isSearchTypeAbortedError(ex)) { return new SearchTypeAbortedError(query, searchTypeId, ex); } Throwable possibleResultWindowException = ex; int attempt = 0; while (possibleResultWindowException != null && attempt < MAX_DEPTH_OF_EXCEPTION_CAUSE_ANALYSIS) { final Integer resultWindowLimit = parseResultLimit(possibleResultWindowException); if (resultWindowLimit != null) { return new ResultWindowLimitError(query, searchTypeId, resultWindowLimit); } possibleResultWindowException = possibleResultWindowException.getCause(); attempt++; } return new SearchTypeError(query, searchTypeId, ex); }
@Test void returnsSearchTypeErrorIfNoResultWindowLimitErrorPresent() { final ElasticsearchException elasticsearchException = new ElasticsearchException( "Something is wrong!", new IllegalStateException( "Oh my!!!", new ElasticsearchException("Your Elasticsearch is on a sick leave. If you want your data, ask Opensearch instead.") )); final SearchTypeError error = SearchTypeErrorParser.parse(query, "searchTypeId", elasticsearchException); assertThat(error) .satisfies(e -> assertEquals("searchTypeId", e.searchTypeId())) .satisfies(e -> assertEquals("test_query", e.queryId())); }
public MappingEntry build() { Mapping.Builder builder; builder = DefaultMapping.builder() .withId(buildKey().hashCode()) .forDevice(deviceId) .withKey(buildKey()) .withValue(buildValue()); // TODO: we assume that the mapping entry will be always // stored in routers without failure for now, which means // the mapping entry state will always be ADDED rather than // PENDING_ADD // we will revisit this part when LISP driver is finished return new DefaultMappingEntry(builder.build(), MappingEntryState.ADDED); }
@Test public void testMapNotifyConversion() { List<LispMapRecord> records = ImmutableList.of(getMapRecord(IP4, UNKNOWN)); NotifyBuilder notifyBuilder = new DefaultNotifyBuilder(); LispMapNotify mapNotify = notifyBuilder .withKeyId(UNIQUE_SHORT) .withAuthKey(AUTH_KEY) .withNonce(UNIQUE_LONG) .withMapRecords(records) .build(); List<LispMapRecord> notifyRecords = mapNotify.getMapRecords(); assertThat(notifyRecords.size(), is(1)); testMapRecordConversion(notifyRecords.get(0)); }
@Nullable @Override public GenericRow transform(GenericRow record) { GenericRow outputRecord = new GenericRow(); try { ExtraFieldsContainer extraFieldsContainer = new ExtraFieldsContainer(null != _transformerConfig.getUnindexableExtrasField()); for (Map.Entry<String, Object> recordEntry : record.getFieldToValueMap().entrySet()) { String recordKey = recordEntry.getKey(); Object recordValue = recordEntry.getValue(); processField(_schemaTree, recordKey, recordKey, recordValue, extraFieldsContainer, outputRecord); } putExtrasField(_transformerConfig.getIndexableExtrasField(), _indexableExtrasFieldType, extraFieldsContainer.getIndexableExtras(), outputRecord); putExtrasField(_transformerConfig.getUnindexableExtrasField(), _unindexableExtrasFieldType, extraFieldsContainer.getUnindexableExtras(), outputRecord); } catch (Exception e) { if (!_continueOnError) { throw e; } _logger.debug("Couldn't transform record: {}", record.toString(), e); outputRecord.putValue(GenericRow.INCOMPLETE_RECORD_KEY, true); } return outputRecord; }
@Test public void testIgnoringSpecialRowKeys() { // Configure a FilterTransformer and a SchemaConformingTransformer such that the filter will introduce a special // key $(SKIP_RECORD_KEY$) that the SchemaConformingTransformer should ignore IngestionConfig ingestionConfig = new IngestionConfig(); ingestionConfig.setFilterConfig(new FilterConfig("intField = 1")); SchemaConformingTransformerConfig schemaConformingTransformerConfig = new SchemaConformingTransformerConfig(INDEXABLE_EXTRAS_FIELD_NAME, UNINDEXABLE_EXTRAS_FIELD_NAME, UNINDEXABLE_FIELD_SUFFIX, null); ingestionConfig.setSchemaConformingTransformerConfig(schemaConformingTransformerConfig); TableConfig tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName("testTable").setIngestionConfig(ingestionConfig).build(); // Create a series of transformers: FilterTransformer -> SchemaConformingTransformer List<RecordTransformer> transformers = new LinkedList<>(); transformers.add(new FilterTransformer(tableConfig)); Schema schema = createDefaultSchemaBuilder().addSingleValueDimension("intField", DataType.INT).build(); transformers.add(new SchemaConformingTransformer(tableConfig, schema)); CompositeTransformer compositeTransformer = new CompositeTransformer(transformers); Map<String, Object> inputRecordMap = jsonStringToMap("{\"intField\":1}"); GenericRow inputRecord = createRowFromMap(inputRecordMap); GenericRow outputRecord = compositeTransformer.transform(inputRecord); Assert.assertNotNull(outputRecord); // Check that the transformed record has $SKIP_RECORD_KEY$ Assert.assertFalse(IngestionUtils.shouldIngestRow(outputRecord)); }
public static String next() { return next(false); }
@Test public void distinctTest() { //生成10000个id测试是否重复 HashSet<String> set = new HashSet<>(); for(int i = 0; i < 10000; i++) { set.add(ObjectId.next()); } assertEquals(10000, set.size()); }
public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); }
@Test public void testReadFullyWithPartialFileChannelReads() throws IOException { FileChannel channelMock = mock(FileChannel.class); final int bufferSize = 100; String expectedBufferContent = fileChannelMockExpectReadWithRandomBytes(channelMock, bufferSize); ByteBuffer buffer = ByteBuffer.allocate(bufferSize); Utils.readFully(channelMock, buffer, 0L); assertEquals(expectedBufferContent, new String(buffer.array()), "The buffer should be populated correctly."); assertFalse(buffer.hasRemaining(), "The buffer should be filled"); verify(channelMock, atLeastOnce()).read(any(), anyLong()); }
public byte[] getAuthenticationPluginData() { return Bytes.concat(authenticationPluginDataPart1, authenticationPluginDataPart2); }
@Test void assertGetAuthPluginData() { byte[] actualPart1 = {106, 105, 55, 122, 117, 98, 115, 109}; byte[] actualPart2 = {68, 102, 53, 122, 65, 49, 84, 79, 85, 115, 116, 113}; MySQLAuthenticationPluginData actual = new MySQLAuthenticationPluginData(actualPart1, actualPart2); assertThat(actual.getAuthenticationPluginDataPart1(), is(actualPart1)); assertThat(actual.getAuthenticationPluginDataPart2(), is(actualPart2)); assertThat(actual.getAuthenticationPluginData(), is(Bytes.concat(actualPart1, actualPart2))); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); try { final Input input = inputService.find(modelId.id()); final InputWithExtractors inputWithExtractors = InputWithExtractors.create(input, inputService.getExtractors(input)); return Optional.of(exportNativeEntity(inputWithExtractors, entityDescriptorIds)); } catch (NotFoundException e) { return Optional.empty(); } }
@Test @MongoDBFixtures("InputFacadeTest.json") public void exportEntity() { final ModelId id = ModelId.of("5acc84f84b900a4ff290d9a7"); final EntityDescriptor descriptor = EntityDescriptor.create(id, ModelTypes.INPUT_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Entity entity = facade.exportEntity(descriptor, entityDescriptorIds).orElseThrow(AssertionError::new); assertThat(entity).isInstanceOf(EntityV1.class); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.INPUT_V1); final EntityV1 entityV1 = (EntityV1) entity; final InputEntity inputEntity = objectMapper.convertValue(entityV1.data(), InputEntity.class); assertThat(inputEntity.title()).isEqualTo(ValueReference.of("Local Raw UDP")); assertThat(inputEntity.type()).isEqualTo(ValueReference.of("org.graylog2.inputs.raw.udp.RawUDPInput")); assertThat(inputEntity.global()).isEqualTo(ValueReference.of(false)); assertThat(inputEntity.configuration()) .containsEntry("bind_address", ValueReference.of("127.0.0.1")) .containsEntry("port", ValueReference.of(5555)); }
public static <T> void sortLoadedClasses(List<T> loaded) { loaded.sort(CompareUtils::compareClassName); }
@Test public void testSort() throws Exception { //OverrideDetector is moved to index 0 //by the private service loading in DefaultDetector. //This tests that a custom detector always comes first //and then reverse alphabetical order Detector[] detectors = new Detector[]{new MyCustomDetector(), new EmptyDetector(), new FileCommandDetector(), new OverrideDetector(), new ZeroSizeFileDetector()}; List<Detector> expected = Arrays.asList(detectors); List<Detector> shuffled = new ArrayList<>(expected); Random random = new Random(42); for (int i = 0; i < 10; i++) { Collections.shuffle(shuffled, random); ServiceLoaderUtils.sortLoadedClasses(shuffled); assertEquals(expected, shuffled, "failed on iteration " + i); } }
protected Map<String, String> formatResult(String url, String content) { return Map.of( "url", url, "content", trimContent(content) ); }
@Test void testFormatResultWithShortContent() { String url = "http://example.com"; String content = "Short content"; Map<String, String> result = rawBrowserAction.formatResult(url, content); assertEquals(url, result.get("url")); assertEquals(content, result.get("content")); }
@Override public MutableNetwork<Node, Edge> apply(MapTask mapTask) { List<ParallelInstruction> parallelInstructions = Apiary.listOrEmpty(mapTask.getInstructions()); MutableNetwork<Node, Edge> network = NetworkBuilder.directed() .allowsSelfLoops(false) .allowsParallelEdges(true) .expectedNodeCount(parallelInstructions.size() * 2) .build(); // Add all the instruction nodes and output nodes ParallelInstructionNode[] instructionNodes = new ParallelInstructionNode[parallelInstructions.size()]; InstructionOutputNode[][] outputNodes = new InstructionOutputNode[parallelInstructions.size()][]; for (int i = 0; i < parallelInstructions.size(); ++i) { // InstructionOutputNode's are the source of truth on instruction outputs. // Clear the instruction's outputs to reduce chance for confusion. List<InstructionOutput> outputs = Apiary.listOrEmpty(parallelInstructions.get(i).getOutputs()); outputNodes[i] = new InstructionOutputNode[outputs.size()]; JsonFactory factory = MoreObjects.firstNonNull(mapTask.getFactory(), Transport.getJsonFactory()); ParallelInstruction parallelInstruction = clone(factory, parallelInstructions.get(i)).setOutputs(null); ParallelInstructionNode instructionNode = ParallelInstructionNode.create(parallelInstruction, Nodes.ExecutionLocation.UNKNOWN); instructionNodes[i] = instructionNode; network.addNode(instructionNode); // Connect the instruction node output to the output PCollection node for (int j = 0; j < outputs.size(); ++j) { InstructionOutput instructionOutput = outputs.get(j); InstructionOutputNode outputNode = InstructionOutputNode.create( instructionOutput, "generatedPcollection" + this.idGenerator.getId()); network.addNode(outputNode); if (parallelInstruction.getParDo() != null) { network.addEdge( instructionNode, outputNode, MultiOutputInfoEdge.create( parallelInstruction.getParDo().getMultiOutputInfos().get(j))); } else { network.addEdge(instructionNode, outputNode, DefaultEdge.create()); } outputNodes[i][j] = outputNode; } } // Connect PCollections as inputs to instructions for (ParallelInstructionNode instructionNode : instructionNodes) { ParallelInstruction parallelInstruction = instructionNode.getParallelInstruction(); if (parallelInstruction.getFlatten() != null) { for (InstructionInput input : Apiary.listOrEmpty(parallelInstruction.getFlatten().getInputs())) { attachInput(input, network, instructionNode, outputNodes); } } else if (parallelInstruction.getParDo() != null) { attachInput( parallelInstruction.getParDo().getInput(), network, instructionNode, outputNodes); } else if (parallelInstruction.getPartialGroupByKey() != null) { attachInput( parallelInstruction.getPartialGroupByKey().getInput(), network, instructionNode, outputNodes); } else if (parallelInstruction.getRead() != null) { // Reads have no inputs so nothing to do } else if (parallelInstruction.getWrite() != null) { attachInput( parallelInstruction.getWrite().getInput(), network, instructionNode, outputNodes); } else { throw new IllegalArgumentException( String.format( "Unknown type of instruction %s for map task %s", parallelInstruction, mapTask)); } } return network; }
@Test public void testEmptyMapTask() { Network<Node, Edge> network = new MapTaskToNetworkFunction(IdGenerators.decrementingLongs()).apply(new MapTask()); assertTrue(network.isDirected()); assertTrue(network.allowsParallelEdges()); assertFalse(network.allowsSelfLoops()); assertThat(network.nodes(), emptyCollectionOf(Node.class)); }
@Override public Deserializer deserializer(String topic, Target type) { return new Deserializer() { @SneakyThrows @Override public DeserializeResult deserialize(RecordHeaders headers, byte[] data) { try { UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data); return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of()); } catch (Exception e) { throw new ValidationException(e.getMessage()); } } }; }
@Test void deserializeSimpleMessage() { var deserialized = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE) .deserialize(null, getProtobufMessage()); assertThat(deserialized.getResult()).isEqualTo("1: 5\n"); }
public static RLESparseResourceAllocation merge(ResourceCalculator resCalc, Resource clusterResource, RLESparseResourceAllocation a, RLESparseResourceAllocation b, RLEOperator operator, long start, long end) throws PlanningException { NavigableMap<Long, Resource> cumA = a.getRangeOverlapping(start, end).getCumulative(); NavigableMap<Long, Resource> cumB = b.getRangeOverlapping(start, end).getCumulative(); NavigableMap<Long, Resource> out = merge(resCalc, clusterResource, cumA, cumB, start, end, operator); return new RLESparseResourceAllocation(out, resCalc); }
@Test public void testMergeMin() throws PlanningException { TreeMap<Long, Resource> a = new TreeMap<>(); TreeMap<Long, Resource> b = new TreeMap<>(); setupArrays(a, b); RLESparseResourceAllocation rleA = new RLESparseResourceAllocation(a, new DefaultResourceCalculator()); RLESparseResourceAllocation rleB = new RLESparseResourceAllocation(b, new DefaultResourceCalculator()); RLESparseResourceAllocation out = RLESparseResourceAllocation.merge(new DefaultResourceCalculator(), Resource.newInstance(100 * 128 * 1024, 100 * 32), rleA, rleB, RLEOperator.min, 0, 60); System.out.println(out); long[] time = { 10, 22, 33, 40, 43, 50, 60 }; int[] alloc = { 5, 10, 15, 20, 10, 0 }; validate(out, time, alloc); }
@Override public void createIndex(String indexName, IndexOptions options, FieldIndex... fields) { commandExecutor.get(createIndexAsync(indexName, options, fields)); }
@Test public void testFieldText() { IndexOptions indexOptions = IndexOptions.defaults() .on(IndexType.JSON) .prefix(Arrays.asList("items")); FieldIndex[] fields = new FieldIndex[]{ FieldIndex.text("$.name") .noStem() .noIndex() .sortMode(SortMode.NORMALIZED) .as("name") }; RSearch s = redisson.getSearch(); s.createIndex("itemIndex", indexOptions, fields); }
public static URI parse(String featureIdentifier) { requireNonNull(featureIdentifier, "featureIdentifier may not be null"); if (featureIdentifier.isEmpty()) { throw new IllegalArgumentException("featureIdentifier may not be empty"); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(featureIdentifier)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(featureIdentifier)) { String standardized = replaceNonStandardPathSeparator(featureIdentifier); return parseAssumeFileScheme(standardized); } if (isWindowsOS() && pathContainsWindowsDrivePattern(featureIdentifier)) { return parseAssumeFileScheme(featureIdentifier); } if (probablyURI(featureIdentifier)) { return parseProbableURI(featureIdentifier); } return parseAssumeFileScheme(featureIdentifier); }
@Test void can_parse_absolute_path_form() { URI uri = FeaturePath.parse("/path/to/file.feature"); assertThat(uri.getScheme(), is(is("file"))); // Use File to work out the drive letter on windows. File file = new File("/path/to/file.feature"); assertThat(uri.getSchemeSpecificPart(), is(file.toURI().getSchemeSpecificPart())); }
@Override public void upgrade() { if (hasBeenRunSuccessfully()) { LOG.debug("Migration already completed."); return; } final Set<String> dashboardIdToViewId = new HashSet<>(); final Consumer<String> recordMigratedDashboardIds = dashboardIdToViewId::add; final Map<String, Set<String>> widgetIdMigrationMapping = new HashMap<>(); final Consumer<Map<String, Set<String>>> recordMigratedWidgetIds = widgetIdMigrationMapping::putAll; final Map<View, Search> newViews = this.dashboardsService.streamAll() .sorted(Comparator.comparing(Dashboard::id)) .map(dashboard -> migrateDashboard(dashboard, recordMigratedDashboardIds, recordMigratedWidgetIds)) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); writeViews(newViews); final MigrationCompleted migrationCompleted = MigrationCompleted.create(dashboardIdToViewId, widgetIdMigrationMapping); writeMigrationCompleted(migrationCompleted); }
@Test public void writesMigrationCompletedAfterSuccess() { this.migration.upgrade(); final MigrationCompleted migrationCompleted = captureMigrationCompleted(); assertThat(migrationCompleted.migratedDashboardIds()).isEmpty(); assertThat(migrationCompleted.widgetMigrationIds()).isEmpty(); verify(viewService, never()).save(any()); verify(searchService, never()).save(any()); }
public MessageListener messageListener(MessageListener messageListener, boolean addConsumerSpan) { if (messageListener instanceof TracingMessageListener) return messageListener; return new TracingMessageListener(messageListener, this, addConsumerSpan); }
@Test void messageListener_traces() { jmsTracing.messageListener(mock(MessageListener.class), false) .onMessage(message); assertThat(testSpanHandler.takeLocalSpan().name()).isEqualTo("on-message"); }
Template buildTemplate(IndexSet indexSet, IndexSetConfig indexSetConfig) throws IgnoreIndexTemplate { final IndexSetMappingTemplate indexSetMappingTemplate = getTemplateIndexSetConfig(indexSet, indexSetConfig, profileService); return indexMappingFactory.createIndexMapping(indexSetConfig) .toTemplate(indexSetMappingTemplate, 0L); }
@Test void testUsesCustomMappingsAndProfileWhileBuildingTemplate() { final TestIndexSet testIndexSet = indexSetConfig("test", "test-template-profiles", "custom", "000000000000000000000013", new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "string"), new CustomFieldMapping("f2", "long") ))); doReturn(Optional.of(new IndexFieldTypeProfile( "000000000000000000000013", "test_profile", "Test profile", new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "ip"), new CustomFieldMapping("f3", "ip") ))) )).when(profileService).get("000000000000000000000013"); IndexMappingTemplate indexMappingTemplateMock = mock(IndexMappingTemplate.class); doReturn(indexMappingTemplateMock).when(indexMappingFactory).createIndexMapping(testIndexSet.getConfig()); underTest.buildTemplate(testIndexSet, testIndexSet.getConfig()); verify(indexMappingTemplateMock).toTemplate( new IndexSetMappingTemplate("standard", "test_*", new CustomFieldMappings(List.of( new CustomFieldMapping("f1", "string"), //from individual custom mapping new CustomFieldMapping("f2", "long"), //from individual custom mapping new CustomFieldMapping("f3", "ip") //from profile ))), 0L ); }
synchronized long countPath() { return leasesById.size(); }
@Test public void testCountPath() { LeaseManager lm = new LeaseManager(makeMockFsNameSystem()); lm.addLease("holder1", 1); assertThat(lm.countPath(), is(1L)); lm.addLease("holder2", 2); assertThat(lm.countPath(), is(2L)); lm.addLease("holder2", 2); // Duplicate addition assertThat(lm.countPath(), is(2L)); assertThat(lm.countPath(), is(2L)); // Remove a couple of non-existing leases. countPath should not change. lm.removeLease("holder2", stubInodeFile(3)); lm.removeLease("InvalidLeaseHolder", stubInodeFile(1)); assertThat(lm.countPath(), is(2L)); INodeFile file = stubInodeFile(1); lm.reassignLease(lm.getLease(file), file, "holder2"); assertThat(lm.countPath(), is(2L)); // Count unchanged on reassign lm.removeLease("holder2", stubInodeFile(2)); // Remove existing assertThat(lm.countPath(), is(1L)); }
public String toEncodedMessage() { String output = limitSizeTo1KB(toMessage()); return encodeComponent(output); }
@Test void toEncodedMessage() { String message = TriRpcStatus.NOT_FOUND .withDescription("desc") .withCause(new IllegalStateException("test")) .toEncodedMessage(); Assertions.assertTrue(message.contains("desc")); Assertions.assertTrue(message.contains("test")); }
Duration timeSince(Instant timeAfterLastPointsTime) { return Duration.between(this.timeOfLatestPoint(), timeAfterLastPointsTime); }
@Test public void testTimeSince() { Point<?> firstPoint = Point.builder().time(Instant.now()).latLong(0.0, 0.0).build(); TrackUnderConstruction tip = new TrackUnderConstruction(firstPoint); //the "last point" should be the oldest point added assertEquals( Duration.ofHours(1), tip.timeSince(firstPoint.time().plus(Duration.ofHours(1))) ); }
public void timePasses() { var enumValues = WeatherType.values(); currentWeather = enumValues[(currentWeather.ordinal() + 1) % enumValues.length]; LOGGER.info("The weather changed to {}.", currentWeather); notifyObservers(); }
@Test void testTimePasses() { final var observer = mock(WeatherObserver.class); final var weather = new Weather(); weather.addObserver(observer); final var inOrder = inOrder(observer); final var weatherTypes = WeatherType.values(); for (var i = 1; i < 20; i++) { weather.timePasses(); inOrder.verify(observer).update(weatherTypes[i % weatherTypes.length]); } verifyNoMoreInteractions(observer); }
public static boolean isBirthday(int year, int month, int day) { // 验证年 int thisYear = DateUtil.thisYear(); if (year < 1900 || year > thisYear) { return false; } // 验证月 if (month < 1 || month > 12) { return false; } // 验证日 if (day < 1 || day > 31) { return false; } // 检查几个特殊月的最大天数 if (day == 31 && (month == 4 || month == 6 || month == 9 || month == 11)) { return false; } if (month == 2) { // 在2月,非闰年最大28,闰年最大29 return day < 29 || (day == 29 && DateUtil.isLeapYear(year)); } return true; }
@Test public void isBirthdayTest() { final boolean b = Validator.isBirthday("20150101"); assertTrue(b); final boolean b2 = Validator.isBirthday("2015-01-01"); assertTrue(b2); final boolean b3 = Validator.isBirthday("2015.01.01"); assertTrue(b3); final boolean b4 = Validator.isBirthday("2015年01月01日"); assertTrue(b4); final boolean b5 = Validator.isBirthday("2015.01.01"); assertTrue(b5); final boolean b6 = Validator.isBirthday("2018-08-15"); assertTrue(b6); //验证年非法 assertFalse(Validator.isBirthday("2095.05.01")); //验证月非法 assertFalse(Validator.isBirthday("2015.13.01")); //验证日非法 assertFalse(Validator.isBirthday("2015.02.29")); }
public static int shakeUp(String string, int upperLimit) { if (upperLimit < 1) { throw new IllegalArgumentException("upper limit must be greater than 0"); } if (string == null) { return 0; } return (string.hashCode() & Integer.MAX_VALUE) % upperLimit; }
@Test void testShakeUpException() { assertThrows(IllegalArgumentException.class, () -> { UtilsAndCommons.shakeUp(null, 0); }); }
@Override public OAuth2CodeDO consumeAuthorizationCode(String code) { OAuth2CodeDO codeDO = oauth2CodeMapper.selectByCode(code); if (codeDO == null) { throw exception(OAUTH2_CODE_NOT_EXISTS); } if (DateUtils.isExpired(codeDO.getExpiresTime())) { throw exception(OAUTH2_CODE_EXPIRE); } oauth2CodeMapper.deleteById(codeDO.getId()); return codeDO; }
@Test public void testConsumeAuthorizationCode_success() { // 准备参数 String code = "test_code"; // mock 数据 OAuth2CodeDO codeDO = randomPojo(OAuth2CodeDO.class).setCode(code) .setExpiresTime(LocalDateTime.now().plusDays(1)); oauth2CodeMapper.insert(codeDO); // 调用 OAuth2CodeDO result = oauth2CodeService.consumeAuthorizationCode(code); assertPojoEquals(codeDO, result); assertNull(oauth2CodeMapper.selectByCode(code)); }
public Set<String> terminalNodes() { return terminalNodes; }
@Test public void shouldGetTerminalNodes() { topology.addSource("source-1", "topic-1"); topology.addSource("source-2", "topic-2", "topic-3"); topology.addProcessor("processor-1", new MockApiProcessorSupplier<>(), "source-1"); topology.addProcessor("processor-2", new MockApiProcessorSupplier<>(), "source-1", "source-2"); topology.addSink("sink-1", "topic-3", "processor-1"); final ProcessorTopology processorTopology = topology.getInternalBuilder("X").buildTopology(); assertThat(processorTopology.terminalNodes(), equalTo(mkSet("processor-2", "sink-1"))); }
public static TFeMemoryRes listFeMemoryUsage(TFeMemoryReq request) throws TException { TAuthInfo auth = request.getAuth_info(); UserIdentity currentUser; if (auth.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(auth.getCurrent_user_ident()); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(auth.getUser(), auth.getUser_ip()); } try { Authorizer.checkSystemAction(currentUser, null, PrivilegeType.OPERATE); } catch (AccessDeniedException e) { throw new TException(e.getMessage(), e); } TFeMemoryRes response = new TFeMemoryRes(); MemoryUsageTracker.MEMORY_USAGE.forEach((moduleName, module) -> { if (module != null) { module.forEach((className, memoryStat) -> { TFeMemoryItem item = new TFeMemoryItem(); item.setModule_name(moduleName); item.setClass_name(className); item.setCurrent_consumption(memoryStat.getCurrentConsumption()); item.setPeak_consumption(memoryStat.getPeakConsumption()); item.setCounter_info(memoryStat.getCounterInfo()); response.addToItems(item); }); } }); return response; }
@Test public void testListFeMemoryUsage() throws TException, AccessDeniedException { TFeMemoryReq req = new TFeMemoryReq(); TAuthInfo auth = new TAuthInfo(); auth.setUser("root"); auth.setUser_ip("127.0.0.1"); req.setAuth_info(auth); AccessControlProvider accessControlProvider = Authorizer.getInstance(); new Expectations(accessControlProvider) { { Authorizer.checkSystemAction((UserIdentity) any, (Set<Long>) any, (PrivilegeType) any); result = null; minTimes = 0; } }; Map<String, Map<String, MemoryStat>> memoryUsage = MemoryUsageTracker.MEMORY_USAGE; MemoryStat memoryStat = new MemoryStat(); memoryUsage.put("test", ImmutableMap.of("test", memoryStat)); var res = SysFeMemoryUsage.listFeMemoryUsage(req); assertTrue(StringUtils.isNotEmpty(res.toString())); }
@Override protected boolean isNewMigration(NoSqlMigration noSqlMigration) { Document migration = migrationCollection.find(eq(toMongoId(Migrations.FIELD_ID), noSqlMigration.getClassName())).first(); return migration == null; }
@Test void testMigrationsHappyPath() { MongoDBCreator mongoDBCreator = new MongoDBCreator(mongoClient(), MongoDBStorageProvider.DEFAULT_DB_NAME); assertThat(mongoDBCreator.isNewMigration(new NoSqlMigrationByClass(M001_CreateJobCollection.class))).isTrue(); assertThat(mongoDBCreator.isNewMigration(new NoSqlMigrationByClass(M002_CreateRecurringJobCollection.class))).isTrue(); assertThatCode(mongoDBCreator::runMigrations).doesNotThrowAnyException(); assertThatCode(mongoDBCreator::runMigrations).doesNotThrowAnyException(); assertThat(mongoDBCreator.isNewMigration(new NoSqlMigrationByClass(M001_CreateJobCollection.class))).isFalse(); assertThat(mongoDBCreator.isNewMigration(new NoSqlMigrationByClass(M002_CreateRecurringJobCollection.class))).isFalse(); }
public static void checkNullOrNonNullNonEmptyEntries( @Nullable Collection<String> values, String propertyName) { if (values == null) { // pass return; } for (String value : values) { Preconditions.checkNotNull( value, "Property '" + propertyName + "' cannot contain null entries"); Preconditions.checkArgument( !value.trim().isEmpty(), "Property '" + propertyName + "' cannot contain empty strings"); } }
@Test public void testCheckNullNonNullNonEmptyEntries_valuesPass() { Validator.checkNullOrNonNullNonEmptyEntries(ImmutableList.of("first", "second"), "test"); // pass }
@Override public Host getHost(HostId hostId) { checkNotNull(hostId, HOST_NULL); Optional<VirtualHost> foundHost = manager.getVirtualHosts(this.networkId()) .stream() .filter(host -> hostId.equals(host.id())) .findFirst(); if (foundHost.isPresent()) { return foundHost.get(); } return null; }
@Test(expected = NullPointerException.class) public void testGetHostByNullId() { VirtualNetwork vnet = setupEmptyVnet(); HostService hostService = manager.get(vnet.id(), HostService.class); hostService.getHost(null); }
@Override public boolean betterThan(Num criterionValue1, Num criterionValue2) { return criterionValue1.isLessThan(criterionValue2); }
@Test public void betterThan() { AnalysisCriterion criterion = getCriterion(); assertTrue(criterion.betterThan(numOf(3), numOf(6))); assertFalse(criterion.betterThan(numOf(6), numOf(2))); }
public static <T> T getProperty(Object bean, String name, Class<T> clazz) throws Exception { Method method = ReflectUtils.getPropertyGetterMethod(bean.getClass(), name); if (method.isAccessible()) { return (T) method.invoke(bean); } else { try { method.setAccessible(true); return (T) method.invoke(bean); } finally { method.setAccessible(false); } } }
@Test public void testGetProperty() throws Exception { TestBean config = new TestBean(); config.setAlias("1111aaaa"); config.setHeartbeat(2000); config.setRegister(false); Assert.assertEquals(BeanUtils.getProperty(config, "alias", String.class), "1111aaaa"); Assert.assertTrue(BeanUtils.getProperty(config, "heartbeat", int.class) == 2000); Assert.assertTrue((Integer) BeanUtils.getProperty(config, "heartbeat", null) == 2000); Assert.assertFalse(BeanUtils.getProperty(config, "register", boolean.class)); boolean error = false; try { BeanUtils.getProperty(config, "xxx", String.class); } catch (Exception e) { error = true; } Assert.assertTrue(error); }
public synchronized List<SplunkEvent> getEvents() { return getEvents("search"); }
@Test public void testGetEventsShouldThrowErrorWhenXmlReaderFailsToParseResponse() { Job mockJob = clientFactory.getServiceClient(any(ServiceArgs.class)).getJobs().create(anyString()); when(mockJob.isDone()).thenReturn(true); when(mockJob.getEvents()) .thenReturn( new InputStream() { @Override public int read() throws IOException { throw new IOException(); } }); assertThrows(SplunkResourceManagerException.class, () -> testManager.getEvents(QUERY)); }
private CoordinatorResult<ConsumerGroupHeartbeatResponseData, CoordinatorRecord> consumerGroupHeartbeat( String groupId, String memberId, int memberEpoch, String instanceId, String rackId, int rebalanceTimeoutMs, String clientId, String clientHost, List<String> subscribedTopicNames, String assignorName, List<ConsumerGroupHeartbeatRequestData.TopicPartitions> ownedTopicPartitions ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the consumer group. boolean createIfNotExists = memberEpoch == 0; final ConsumerGroup group = getOrMaybeCreateConsumerGroup(groupId, createIfNotExists, records); throwIfConsumerGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); final ConsumerGroupMember member; if (instanceId == null) { member = getOrMaybeSubscribeDynamicConsumerGroupMember( group, memberId, memberEpoch, ownedTopicPartitions, createIfNotExists, false ); } else { member = getOrMaybeSubscribeStaticConsumerGroupMember( group, memberId, memberEpoch, instanceId, ownedTopicPartitions, createIfNotExists, false, records ); } // 1. Create or update the member. If the member is new or has changed, a ConsumerGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ConsumerGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ConsumerGroupMetadataValue record to the partition. ConsumerGroupMember updatedMember = new ConsumerGroupMember.Builder(member) .maybeUpdateInstanceId(Optional.ofNullable(instanceId)) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateRebalanceTimeoutMs(ofSentinel(rebalanceTimeoutMs)) .maybeUpdateServerAssignorName(Optional.ofNullable(assignorName)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .setClassicMemberMetadata(null) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); Map<String, Integer> subscribedTopicNamesMap = group.subscribedTopicNames(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId()) && !group.hasStaticMember(updatedMember.instanceId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newConsumerGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newConsumerGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); metrics.record(CONSUMER_GROUP_REBALANCES_SENSOR_NAME); } group.setMetadataRefreshDeadline(currentTimeMs + consumerGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, member, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId(), updatedMember.instanceId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, group::currentPartitionEpoch, targetAssignmentEpoch, targetAssignment, ownedTopicPartitions, records ); scheduleConsumerGroupSessionTimeout(groupId, memberId); // Prepare the response. ConsumerGroupHeartbeatResponseData response = new ConsumerGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(consumerGroupHeartbeatIntervalMs(groupId)); // The assignment is only provided in the following cases: // 1. The member sent a full request. It does so when joining or rejoining the group with zero // as the member epoch; or on any errors (e.g. timeout). We use all the non-optional fields // (rebalanceTimeoutMs, subscribedTopicNames and ownedTopicPartitions) to detect a full request // as those must be set in a full request. // 2. The member's assignment has been updated. boolean isFullRequest = memberEpoch == 0 || (rebalanceTimeoutMs != -1 && subscribedTopicNames != null && ownedTopicPartitions != null); if (isFullRequest || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createConsumerGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testUnknownGroupId() { String groupId = "fooup"; // Use a static member id as it makes the test easier. String memberId = Uuid.randomUuid().toString(); MockPartitionAssignor assignor = new MockPartitionAssignor("range"); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withConsumerGroupAssignors(Collections.singletonList(assignor)) .build(); assertThrows(GroupIdNotFoundException.class, () -> context.consumerGroupHeartbeat( new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(100) // Epoch must be > 0. .setRebalanceTimeoutMs(5000) .setSubscribedTopicNames(Arrays.asList("foo", "bar")) .setTopicPartitions(Collections.emptyList()))); }
@SuppressWarnings({"unchecked", "rawtypes"}) public static String toString(final Properties props) { StringBuilder result = new StringBuilder(); Iterator<String> iterator = new TreeMap(props).keySet().iterator(); while (iterator.hasNext()) { String key = iterator.next(); Object value = props.get(key); if (null == value) { continue; } result.append(String.format("'%s'='%s'", key, value)); if (iterator.hasNext()) { result.append(",").append(' '); } } return result.toString(); }
@Test void assertToStringWithSingleKey() { assertThat(PropertiesUtils.toString(PropertiesBuilder.build(new Property("key", "value"))), is("'key'='value'")); }
public static String getPartitionNameFromPartitionType(MetadataPartitionType partitionType, HoodieTableMetaClient metaClient, String indexName) { if (MetadataPartitionType.FUNCTIONAL_INDEX.equals(partitionType)) { checkArgument(metaClient.getIndexMetadata().isPresent(), "Index definition is not present"); return metaClient.getIndexMetadata().get().getIndexDefinitions().get(indexName).getIndexName(); } return partitionType.getPartitionPath(); }
@Test public void testGetFunctionalIndexPath() { MetadataPartitionType partitionType = MetadataPartitionType.FUNCTIONAL_INDEX; HoodieTableMetaClient metaClient = mock(HoodieTableMetaClient.class); String indexName = "testIndex"; Map<String, HoodieIndexDefinition> indexDefinitions = new HashMap<>(); indexDefinitions.put( indexName, new HoodieIndexDefinition("func_index_testIndex", "column_stats", "lower", Collections.singletonList("name"), null)); HoodieIndexMetadata indexMetadata = new HoodieIndexMetadata(indexDefinitions); when(metaClient.getIndexMetadata()).thenReturn(Option.of(indexMetadata)); String result = HoodieIndexUtils.getPartitionNameFromPartitionType(partitionType, metaClient, indexName); assertEquals("func_index_testIndex", result); }
public URLNormalizer removeDefaultPort() { URL u = toURL(); if ("http".equalsIgnoreCase(u.getProtocol()) && u.getPort() == HttpURL.DEFAULT_HTTP_PORT) { url = url.replaceFirst(":" + HttpURL.DEFAULT_HTTP_PORT, ""); } else if ("https".equalsIgnoreCase(u.getProtocol()) && u.getPort() == HttpURL.DEFAULT_HTTPS_PORT) { url = url.replaceFirst(":" + HttpURL.DEFAULT_HTTPS_PORT, ""); } return this; }
@Test public void testRemoveDefaultPort() { s = "http://www.example.com:80/bar.html"; t = "http://www.example.com/bar.html"; assertEquals(t, n(s).removeDefaultPort().toString()); s = "https://www.example.com:443/bar.html"; t = "https://www.example.com/bar.html"; assertEquals(t, n(s).removeDefaultPort().toString()); s = "http://www.example.com/bar.html"; t = "http://www.example.com/bar.html"; assertEquals(t, n(s).removeDefaultPort().toString()); s = "http://www.example.com:7080/bar.html"; t = "http://www.example.com:7080/bar.html"; assertEquals(t, n(s).removeDefaultPort().toString()); s = "http://www.example.com:80"; t = "http://www.example.com"; assertEquals(t, n(s).removeDefaultPort().toString()); s = "http://www.example.com"; t = "http://www.example.com"; assertEquals(t, n(s).removeDefaultPort().toString()); s = "http://www.example.com/bar/:80"; t = "http://www.example.com/bar/:80"; assertEquals(t, n(s).removeDefaultPort().toString()); }
public static boolean areCompatible(final SqlArgument actual, final ParamType declared) { return areCompatible(actual, declared, false); }
@Test public void shouldNotPassInCompatibleSchemasWithImplicitCasting() { assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.BIGINT), ParamTypes.INTEGER, true), is(false)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.DOUBLE), ParamTypes.LONG, true), is(false)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.DOUBLE), ParamTypes.DECIMAL, true), is(false)); assertThat(ParamTypes.areCompatible(SqlArgument.of(SqlTypes.INTEGER), ParamTypes.INTERVALUNIT, true), is(false)); }
Queue() { }
@Test (timeout=5000) public void testQueue() throws IOException { File f = null; try { f = writeFile(); QueueManager manager = new QueueManager(f.getCanonicalPath(), true); manager.setSchedulerInfo("first", "queueInfo"); manager.setSchedulerInfo("second", "queueInfoqueueInfo"); Queue root = manager.getRoot(); assertThat(root.getChildren().size()).isEqualTo(2); Iterator<Queue> iterator = root.getChildren().iterator(); Queue firstSubQueue = iterator.next(); assertEquals("first", firstSubQueue.getName()); assertEquals( firstSubQueue.getAcls().get("mapred.queue.first.acl-submit-job") .toString(), "Users [user1, user2] and members of the groups [group1, group2] are allowed"); Queue secondSubQueue = iterator.next(); assertEquals("second", secondSubQueue.getName()); assertThat(secondSubQueue.getProperties().getProperty("key")) .isEqualTo("value"); assertThat(secondSubQueue.getProperties().getProperty("key1")) .isEqualTo("value1"); // test status assertThat(firstSubQueue.getState().getStateName()) .isEqualTo("running"); assertThat(secondSubQueue.getState().getStateName()) .isEqualTo("stopped"); Set<String> template = new HashSet<String>(); template.add("first"); template.add("second"); assertEquals(manager.getLeafQueueNames(), template); // test user access UserGroupInformation mockUGI = mock(UserGroupInformation.class); when(mockUGI.getShortUserName()).thenReturn("user1"); String[] groups = { "group1" }; when(mockUGI.getGroupNames()).thenReturn(groups); assertTrue(manager.hasAccess("first", QueueACL.SUBMIT_JOB, mockUGI)); assertFalse(manager.hasAccess("second", QueueACL.SUBMIT_JOB, mockUGI)); assertFalse(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI)); when(mockUGI.getShortUserName()).thenReturn("user3"); assertTrue(manager.hasAccess("first", QueueACL.ADMINISTER_JOBS, mockUGI)); QueueAclsInfo[] qai = manager.getQueueAcls(mockUGI); assertThat(qai.length).isEqualTo(1); // test refresh queue manager.refreshQueues(getConfiguration(), null); iterator = root.getChildren().iterator(); Queue firstSubQueue1 = iterator.next(); Queue secondSubQueue1 = iterator.next(); // tets equal method assertThat(firstSubQueue).isEqualTo(firstSubQueue1); assertThat(firstSubQueue1.getState().getStateName()) .isEqualTo("running"); assertThat(secondSubQueue1.getState().getStateName()) .isEqualTo("stopped"); assertThat(firstSubQueue1.getSchedulingInfo()) .isEqualTo("queueInfo"); assertThat(secondSubQueue1.getSchedulingInfo()) .isEqualTo("queueInfoqueueInfo"); // test JobQueueInfo assertThat(firstSubQueue.getJobQueueInfo().getQueueName()) .isEqualTo("first"); assertThat(firstSubQueue.getJobQueueInfo().getState().toString()) .isEqualTo("running"); assertThat(firstSubQueue.getJobQueueInfo().getSchedulingInfo()) .isEqualTo("queueInfo"); assertThat(secondSubQueue.getJobQueueInfo().getChildren().size()) .isEqualTo(0); // test assertThat(manager.getSchedulerInfo("first")).isEqualTo("queueInfo"); Set<String> queueJobQueueInfos = new HashSet<String>(); for(JobQueueInfo jobInfo : manager.getJobQueueInfos()){ queueJobQueueInfos.add(jobInfo.getQueueName()); } Set<String> rootJobQueueInfos = new HashSet<String>(); for(Queue queue : root.getChildren()){ rootJobQueueInfos.add(queue.getJobQueueInfo().getQueueName()); } assertEquals(queueJobQueueInfos, rootJobQueueInfos); // test getJobQueueInfoMapping assertThat(manager.getJobQueueInfoMapping().get("first").getQueueName()) .isEqualTo("first"); // test dumpConfiguration Writer writer = new StringWriter(); Configuration conf = getConfiguration(); conf.unset(DeprecatedQueueConfigurationParser.MAPRED_QUEUE_NAMES_KEY); QueueManager.dumpConfiguration(writer, f.getAbsolutePath(), conf); String result = writer.toString(); assertTrue(result .indexOf("\"name\":\"first\",\"state\":\"running\",\"acl_submit_job\":\"user1,user2 group1,group2\",\"acl_administer_jobs\":\"user3,user4 group3,group4\",\"properties\":[],\"children\":[]") > 0); writer = new StringWriter(); QueueManager.dumpConfiguration(writer, conf); result = writer.toString(); assertTrue(result.contains("{\"queues\":[{\"name\":\"default\",\"state\":\"running\",\"acl_submit_job\":\"*\",\"acl_administer_jobs\":\"*\",\"properties\":[],\"children\":[]},{\"name\":\"q1\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[],\"children\":[{\"name\":\"q1:q2\",\"state\":\"running\",\"acl_submit_job\":\" \",\"acl_administer_jobs\":\" \",\"properties\":[")); assertTrue(result.contains("{\"key\":\"capacity\",\"value\":\"20\"}")); assertTrue(result.contains("{\"key\":\"user-limit\",\"value\":\"30\"}")); assertTrue(result.contains("],\"children\":[]}]}]}")); // test constructor QueueAclsInfo QueueAclsInfo qi = new QueueAclsInfo(); assertNull(qi.getQueueName()); } finally { if (f != null) { f.delete(); } } }
public static ObjectNode convertFromGHResponse(GHResponse ghResponse, TranslationMap translationMap, Locale locale, DistanceConfig distanceConfig) { ObjectNode json = JsonNodeFactory.instance.objectNode(); if (ghResponse.hasErrors()) throw new IllegalStateException( "If the response has errors, you should use the method NavigateResponseConverter#convertFromGHResponseError"); PointList waypoints = ghResponse.getBest().getWaypoints(); final ArrayNode routesJson = json.putArray("routes"); List<ResponsePath> paths = ghResponse.getAll(); for (int i = 0; i < paths.size(); i++) { ResponsePath path = paths.get(i); ObjectNode pathJson = routesJson.addObject(); putRouteInformation(pathJson, path, i, translationMap, locale, distanceConfig); } final ArrayNode waypointsJson = json.putArray("waypoints"); for (int i = 0; i < waypoints.size(); i++) { ObjectNode waypointJson = waypointsJson.addObject(); // TODO get names waypointJson.put("name", ""); putLocation(waypoints.getLat(i), waypoints.getLon(i), waypointJson); } json.put("code", "Ok"); // TODO: Maybe we need a different format... uuid: "cji4ja4f8004o6xrsta8w4p4h" json.put("uuid", UUID.randomUUID().toString().replaceAll("-", "")); return json; }
@Test public void roundaboutDegreesTest() { GHResponse rsp = hopper.route(new GHRequest(42.554851, 1.536198, 42.510071, 1.548128).setProfile(profile)); ObjectNode json = NavigateResponseConverter.convertFromGHResponse(rsp, trMap, Locale.ENGLISH, distanceConfig); JsonNode steps = json.get("routes").get(0).get("legs").get(0).get("steps"); JsonNode step = steps.get(5); JsonNode bannerInstructions = step.get("bannerInstructions"); JsonNode primary = bannerInstructions.get(0).get("primary"); assertEquals("roundabout", primary.get("type").asText()); assertEquals("At roundabout, take exit 2 onto CG-3", primary.get("text").asText()); assertEquals("right", primary.get("modifier").asText()); assertEquals(222, primary.get("degrees").asDouble(), 1); }
public PathData[] getDirectoryContents() throws IOException { checkIfExists(FileTypeRequirement.SHOULD_BE_DIRECTORY); FileStatus[] stats = fs.listStatus(path); PathData[] items = new PathData[stats.length]; for (int i=0; i < stats.length; i++) { // preserve relative paths String child = getStringForChildPath(stats[i].getPath()); items[i] = new PathData(fs, child, stats[i]); } Arrays.sort(items); return items; }
@Test (timeout = 30000) public void testCwdContents() throws Exception { String dirString = Path.CUR_DIR; PathData item = new PathData(dirString, conf); PathData[] items = item.getDirectoryContents(); assertEquals( sortedString("d1", "d2"), sortedString(items) ); }
@Override public void readFully(long position, byte[] buffer, int offset, int length) throws IOException { int totalBytesRead = 0; while (totalBytesRead < length) { int bytesRead = read( position + totalBytesRead, buffer, offset + totalBytesRead, length - totalBytesRead); if (bytesRead == -1) { throw new EOFException(); } totalBytesRead += bytesRead; } }
@Test public void testValidateDataEnabledWithDataMismatch() throws IOException { byte[] inputData = new byte[] {1, 2, 3}; byte[] corruptedData = new byte[] {1, 3, 3}; FSDataInputStream dataTierInputStream = new TestFSDataInputStream(inputData); FSDataInputStream fileInStream = new TestFSDataInputStream(corruptedData); CacheValidatingInputStream fileInputStream = new CacheValidatingInputStream(fileInStream, dataTierInputStream); byte[] buffer = new byte[3]; try { fileInputStream.readFully(0, buffer, 0, buffer.length); fail("Data validation didn't work for mismatched data."); } catch (VerifyException ex) { assertEquals(ex.getMessage(), "corrupted buffer at position 1"); } }
@Override public void doSendMail(MailSendMessage message) { // 1. 创建发送账号 MailAccountDO account = validateMailAccount(message.getAccountId()); MailAccount mailAccount = buildMailAccount(account, message.getNickname()); // 2. 发送邮件 try { String messageId = MailUtil.send(mailAccount, message.getMail(), message.getTitle(), message.getContent(), true); // 3. 更新结果(成功) mailLogService.updateMailSendResult(message.getLogId(), messageId, null); } catch (Exception e) { // 3. 更新结果(异常) mailLogService.updateMailSendResult(message.getLogId(), null, e); } }
@Test public void testDoSendMail_exception() { try (MockedStatic<MailUtil> mailUtilMock = mockStatic(MailUtil.class)) { // 准备参数 MailSendMessage message = randomPojo(MailSendMessage.class, o -> o.setNickname("芋艿")); // mock 方法(获得邮箱账号) MailAccountDO account = randomPojo(MailAccountDO.class, o -> o.setMail("7685@qq.com")); when(mailAccountService.getMailAccountFromCache(eq(message.getAccountId()))) .thenReturn(account); // mock 方法(发送邮件) Exception e = new NullPointerException("啦啦啦"); mailUtilMock.when(() -> MailUtil.send(argThat(mailAccount -> { assertEquals("芋艿 <7685@qq.com>", mailAccount.getFrom()); assertTrue(mailAccount.isAuth()); assertEquals(account.getUsername(), mailAccount.getUser()); assertEquals(account.getPassword(), mailAccount.getPass()); assertEquals(account.getHost(), mailAccount.getHost()); assertEquals(account.getPort(), mailAccount.getPort()); assertEquals(account.getSslEnable(), mailAccount.isSslEnable()); return true; }), eq(message.getMail()), eq(message.getTitle()), eq(message.getContent()), eq(true))) .thenThrow(e); // 调用 mailSendService.doSendMail(message); // 断言 verify(mailLogService).updateMailSendResult(eq(message.getLogId()), isNull(), same(e)); } }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { return new BoxAttributesFinderFeature(session, fileid).find(file, listener) != PathAttributes.EMPTY; } catch(NotfoundException e) { return false; } }
@Test public void testFindFile() throws Exception { final BoxFileidProvider fileid = new BoxFileidProvider(session); final Path file = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new BoxTouchFeature(session, fileid).touch(file, new TransferStatus()); assertTrue(new BoxFindFeature(session, fileid).find(file)); assertFalse(new BoxFindFeature(session, fileid).find(new Path(file.getAbsolute(), EnumSet.of(Path.Type.directory)))); new BoxDeleteFeature(session, fileid).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
protected File initRootProjectWorkDir(File baseDir, Map<String, String> rootProperties) { String workDir = rootProperties.get(CoreProperties.WORKING_DIRECTORY); if (StringUtils.isBlank(workDir)) { return new File(baseDir, CoreProperties.WORKING_DIRECTORY_DEFAULT_VALUE); } File customWorkDir = new File(workDir); if (customWorkDir.isAbsolute()) { return customWorkDir; } return new File(baseDir, customWorkDir.getPath()); }
@Test public void shouldInitRootWorkDir() { ProjectReactorBuilder builder = new ProjectReactorBuilder(new ScannerProperties(emptyMap()), mock(AnalysisWarnings.class)); File baseDir = new File("target/tmp/baseDir"); File workDir = builder.initRootProjectWorkDir(baseDir, emptyMap()); assertThat(workDir).isEqualTo(new File(baseDir, ".sonar")); }
public static Status unblock( final UnsafeBuffer logMetaDataBuffer, final UnsafeBuffer termBuffer, final int blockedOffset, final int tailOffset, final int termId) { Status status = NO_ACTION; int frameLength = frameLengthVolatile(termBuffer, blockedOffset); if (frameLength < 0) { resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, -frameLength); status = UNBLOCKED; } else if (0 == frameLength) { int currentOffset = blockedOffset + FRAME_ALIGNMENT; while (currentOffset < tailOffset) { frameLength = frameLengthVolatile(termBuffer, currentOffset); if (frameLength != 0) { if (scanBackToConfirmZeroed(termBuffer, currentOffset, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED; } break; } currentOffset += FRAME_ALIGNMENT; } if (currentOffset == termBuffer.capacity()) { if (0 == frameLengthVolatile(termBuffer, blockedOffset)) { final int length = currentOffset - blockedOffset; resetHeader(logMetaDataBuffer, termBuffer, blockedOffset, termId, length); status = UNBLOCKED_TO_END; } } } return status; }
@Test void shouldNotUnblockGapWithMessageRaceOnSecondMessageIncreasingTailThenInterrupting() { final int messageLength = HEADER_LENGTH * 4; final int termOffset = 0; final int tailOffset = messageLength * 3; when(mockTermBuffer.getIntVolatile(messageLength)) .thenReturn(0) .thenReturn(messageLength); when(mockTermBuffer.getIntVolatile(messageLength * 2)) .thenReturn(messageLength); assertEquals( NO_ACTION, TermUnblocker.unblock(mockLogMetaDataBuffer, mockTermBuffer, termOffset, tailOffset, TERM_ID)); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schams to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using either has a cost which we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.addLast(start); Object current; while ((current = dq.pollLast()) != null) { if (current instanceof Supplier) { // we are executing a non terminal post visit. SchemaVisitorAction action = ((Supplier<SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SUBTREE: throw new UnsupportedOperationException(); case SKIP_SIBLINGS: while (dq.getLast() instanceof Schema) { dq.removeLast(); } break; case TERMINATE: return visitor.get(); default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (!visited.containsKey(schema)) { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; case NULL: case BOOLEAN: case BYTES: case DOUBLE: case ENUM: case FIXED: case FLOAT: case INT: case LONG: case STRING: terminate = visitTerminal(visitor, schema, dq); break; default: throw new UnsupportedOperationException("Invalid type " + type); } } else { terminate = visitTerminal(visitor, schema, dq); } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test void textCloning() { Schema recSchema = new Schema.Parser().parse(SCHEMA); CloningVisitor cv = new CloningVisitor(recSchema); Schema trimmed = Schemas.visit(recSchema, cv); assertNull(trimmed.getDoc()); assertNotNull(recSchema.getDoc()); SchemaCompatibility.SchemaCompatibilityType compat = SchemaCompatibility .checkReaderWriterCompatibility(trimmed, recSchema).getType(); assertEquals(SchemaCompatibility.SchemaCompatibilityType.COMPATIBLE, compat); compat = SchemaCompatibility.checkReaderWriterCompatibility(recSchema, trimmed).getType(); assertEquals(SchemaCompatibility.SchemaCompatibilityType.COMPATIBLE, compat); assertNotNull(cv.toString()); }
public static String getHeader(boolean qOption) { return qOption ? ALL_HEADER : SUMMARY_HEADER; }
@Test public void testGetHeaderNoQuota() { String header = " DIR_COUNT FILE_COUNT CONTENT_SIZE "; assertEquals(header, ContentSummary.getHeader(false)); }
@Override public OAuth2ClientDO getOAuth2Client(Long id) { return oauth2ClientMapper.selectById(id); }
@Test public void testGetOAuth2Client() { // mock 数据 OAuth2ClientDO clientDO = randomPojo(OAuth2ClientDO.class); oauth2ClientMapper.insert(clientDO); // 准备参数 Long id = clientDO.getId(); // 调用,并断言 OAuth2ClientDO dbClientDO = oauth2ClientService.getOAuth2Client(id); assertPojoEquals(clientDO, dbClientDO); }
public Exchange createDbzExchange(DebeziumConsumer consumer, final SourceRecord sourceRecord) { final Exchange exchange; if (consumer != null) { exchange = consumer.createExchange(false); } else { exchange = super.createExchange(); } final Message message = exchange.getIn(); final Schema valueSchema = sourceRecord.valueSchema(); final Object value = sourceRecord.value(); // extract values from SourceRecord final Map<String, Object> sourceMetadata = extractSourceMetadataValueFromValueStruct(valueSchema, value); final Object operation = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.OPERATION); final Object before = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.BEFORE); final Object body = extractBodyValueFromValueStruct(valueSchema, value); final Object timestamp = extractValueFromValueStruct(valueSchema, value, Envelope.FieldName.TIMESTAMP); final Object ddl = extractValueFromValueStruct(valueSchema, value, HistoryRecord.Fields.DDL_STATEMENTS); // set message headers message.setHeader(DebeziumConstants.HEADER_IDENTIFIER, sourceRecord.topic()); message.setHeader(DebeziumConstants.HEADER_KEY, sourceRecord.key()); message.setHeader(DebeziumConstants.HEADER_SOURCE_METADATA, sourceMetadata); message.setHeader(DebeziumConstants.HEADER_OPERATION, operation); message.setHeader(DebeziumConstants.HEADER_BEFORE, before); message.setHeader(DebeziumConstants.HEADER_TIMESTAMP, timestamp); message.setHeader(DebeziumConstants.HEADER_DDL_SQL, ddl); message.setHeader(Exchange.MESSAGE_TIMESTAMP, timestamp); message.setBody(body); return exchange; }
@Test void testIfCreatesExchangeFromSourceDdlRecord() { final SourceRecord sourceRecord = createDdlSQLRecord(); final Exchange exchange = debeziumEndpoint.createDbzExchange(null, sourceRecord); final Message inMessage = exchange.getIn(); assertNotNull(exchange); // assert headers assertEquals("dummy", inMessage.getHeader(DebeziumConstants.HEADER_IDENTIFIER)); assertEquals("SET character_set_server=utf8, collation_server=utf8_bin", inMessage.getHeader(DebeziumConstants.HEADER_DDL_SQL)); }
synchronized LRUCacheEntry get(final Bytes key) { if (key == null) { return null; } final LRUNode node = getInternal(key); if (node == null) { return null; } updateLRU(node); return node.entry; }
@Test public void shouldReturnNullIfKeyIsNull() { assertNull(cache.get(null)); }
public LogRecord getLogRecord() { return logRecord; }
@Test public void testGetLogRecord() { assertEquals(logRecord, logEvent.getLogRecord()); }
public static DistCpOptions parse(String[] args) throws IllegalArgumentException { CommandLineParser parser = new CustomParser(); CommandLine command; try { command = parser.parse(cliOptions, args, true); } catch (ParseException e) { throw new IllegalArgumentException("Unable to parse arguments. " + Arrays.toString(args), e); } DistCpOptions.Builder builder = parseSourceAndTargetPaths(command); builder .withAtomicCommit( command.hasOption(DistCpOptionSwitch.ATOMIC_COMMIT.getSwitch())) .withSyncFolder( command.hasOption(DistCpOptionSwitch.SYNC_FOLDERS.getSwitch())) .withDeleteMissing( command.hasOption(DistCpOptionSwitch.DELETE_MISSING.getSwitch())) .withIgnoreFailures( command.hasOption(DistCpOptionSwitch.IGNORE_FAILURES.getSwitch())) .withOverwrite( command.hasOption(DistCpOptionSwitch.OVERWRITE.getSwitch())) .withAppend( command.hasOption(DistCpOptionSwitch.APPEND.getSwitch())) .withSkipCRC( command.hasOption(DistCpOptionSwitch.SKIP_CRC.getSwitch())) .withBlocking( !command.hasOption(DistCpOptionSwitch.BLOCKING.getSwitch())) .withVerboseLog( command.hasOption(DistCpOptionSwitch.VERBOSE_LOG.getSwitch())) .withDirectWrite( command.hasOption(DistCpOptionSwitch.DIRECT_WRITE.getSwitch())) .withUseIterator( command.hasOption(DistCpOptionSwitch.USE_ITERATOR.getSwitch())) .withUpdateRoot( command.hasOption(DistCpOptionSwitch.UPDATE_ROOT.getSwitch())); if (command.hasOption(DistCpOptionSwitch.DIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.DIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseDiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.RDIFF.getSwitch())) { String[] snapshots = getVals(command, DistCpOptionSwitch.RDIFF.getSwitch()); checkSnapshotsArgs(snapshots); builder.withUseRdiff(snapshots[0], snapshots[1]); } if (command.hasOption(DistCpOptionSwitch.FILTERS.getSwitch())) { builder.withFiltersFile( getVal(command, DistCpOptionSwitch.FILTERS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.LOG_PATH.getSwitch())) { builder.withLogPath( new Path(getVal(command, DistCpOptionSwitch.LOG_PATH.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.WORK_PATH.getSwitch())) { final String workPath = getVal(command, DistCpOptionSwitch.WORK_PATH.getSwitch()); if (workPath != null && !workPath.isEmpty()) { builder.withAtomicWorkPath(new Path(workPath)); } } if (command.hasOption(DistCpOptionSwitch.TRACK_MISSING.getSwitch())) { builder.withTrackMissing( new Path(getVal( command, DistCpOptionSwitch.TRACK_MISSING.getSwitch()))); } if (command.hasOption(DistCpOptionSwitch.BANDWIDTH.getSwitch())) { try { final Float mapBandwidth = Float.parseFloat( getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch())); builder.withMapBandwidth(mapBandwidth); } catch (NumberFormatException e) { throw new IllegalArgumentException("Bandwidth specified is invalid: " + getVal(command, DistCpOptionSwitch.BANDWIDTH.getSwitch()), e); } } if (command.hasOption( DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())) { try { final Integer numThreads = Integer.parseInt(getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch())); builder.withNumListstatusThreads(numThreads); } catch (NumberFormatException e) { throw new IllegalArgumentException( "Number of liststatus threads is invalid: " + getVal(command, DistCpOptionSwitch.NUM_LISTSTATUS_THREADS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.MAX_MAPS.getSwitch())) { try { final Integer maps = Integer.parseInt( getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch())); builder.maxMaps(maps); } catch (NumberFormatException e) { throw new IllegalArgumentException("Number of maps is invalid: " + getVal(command, DistCpOptionSwitch.MAX_MAPS.getSwitch()), e); } } if (command.hasOption(DistCpOptionSwitch.COPY_STRATEGY.getSwitch())) { builder.withCopyStrategy( getVal(command, DistCpOptionSwitch.COPY_STRATEGY.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())) { builder.preserve( getVal(command, DistCpOptionSwitch.PRESERVE_STATUS.getSwitch())); } if (command.hasOption(DistCpOptionSwitch.FILE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.FILE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.SIZE_LIMIT.getSwitch())) { LOG.warn(DistCpOptionSwitch.SIZE_LIMIT.getSwitch() + " is a deprecated" + " option. Ignoring."); } if (command.hasOption(DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch())) { final String chunkSizeStr = getVal(command, DistCpOptionSwitch.BLOCKS_PER_CHUNK.getSwitch().trim()); try { int csize = Integer.parseInt(chunkSizeStr); csize = csize > 0 ? csize : 0; LOG.info("Set distcp blocksPerChunk to " + csize); builder.withBlocksPerChunk(csize); } catch (NumberFormatException e) { throw new IllegalArgumentException("blocksPerChunk is invalid: " + chunkSizeStr, e); } } if (command.hasOption(DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch())) { final String copyBufferSizeStr = getVal(command, DistCpOptionSwitch.COPY_BUFFER_SIZE.getSwitch().trim()); try { int copyBufferSize = Integer.parseInt(copyBufferSizeStr); builder.withCopyBufferSize(copyBufferSize); } catch (NumberFormatException e) { throw new IllegalArgumentException("copyBufferSize is invalid: " + copyBufferSizeStr, e); } } return builder.build(); }
@Test public void testSourceListingAndSourcePath() { try { OptionsParser.parse(new String[] { "-f", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/source/first", "hdfs://localhost:8020/target/"}); Assert.fail("Both source listing & source paths allowed"); } catch (IllegalArgumentException ignore) {} }
public static DumpedPrivateKey fromBase58(@Nullable Network network, String base58) throws AddressFormatException, AddressFormatException.WrongNetwork { byte[] versionAndDataBytes = Base58.decodeChecked(base58); int version = versionAndDataBytes[0] & 0xFF; byte[] bytes = Arrays.copyOfRange(versionAndDataBytes, 1, versionAndDataBytes.length); if (network == null) { for (NetworkParameters p : Networks.get()) if (version == p.getDumpedPrivateKeyHeader()) return new DumpedPrivateKey(p.network(), bytes); throw new AddressFormatException.InvalidPrefix("No network found for version " + version); } else { NetworkParameters params = NetworkParameters.of(network); if (version == params.getDumpedPrivateKeyHeader()) return new DumpedPrivateKey(network, bytes); throw new AddressFormatException.WrongNetwork(version); } }
@Test public void checkNetwork() { DumpedPrivateKey.fromBase58(MAINNET, "5HtUCLMFWNueqN9unpgX2DzjMg6SDNZyKRb8s3LJgpFg5ubuMrk"); }
public static MetricsSource makeSource(Object source) { return new MetricsSourceBuilder(source, DefaultMetricsFactory.getAnnotatedMetricsFactory()).build(); }
@Test(expected=MetricsException.class) public void testBadHybrid() { MetricsAnnotations.makeSource(new BadHybridMetrics()); }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { return payload.getByteBuf().readByte(); }
@Test void assertRead() { when(payload.getByteBuf()).thenReturn(byteBuf); when(byteBuf.readByte()).thenReturn((byte) 1); MySQLTinyBinlogProtocolValue actual = new MySQLTinyBinlogProtocolValue(); assertThat(actual.read(columnDef, payload), is((byte) 1)); }
@Override public boolean add(final Long value) { return add(value.longValue()); }
@Test public void failsWhenOverCapacity() { final LongHashSet set = new LongHashSet(1, 0); set.add(1); assertThrows(IllegalStateException.class, () -> set.add(2)); }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertNamespaceInfo() { NamespaceInfo info = new NamespaceInfo(37, "clusterID", "bpID", 2300); NamespaceInfoProto proto = PBHelper.convert(info); NamespaceInfo info2 = PBHelper.convert(proto); compare(info, info2); //Compare the StorageInfo assertEquals(info.getBlockPoolID(), info2.getBlockPoolID()); assertEquals(info.getBuildVersion(), info2.getBuildVersion()); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/{executionId}") @Operation(tags = {"Executions"}, summary = "Get an execution") public Execution get( @Parameter(description = "The execution id") @PathVariable String executionId ) { return executionRepository .findById(tenantService.resolveTenant(), executionId) .orElse(null); }
@Test void restartFromLastFailed() throws TimeoutException { final String flowId = "restart_last_failed"; // Run execution until it ends Execution firstExecution = runnerUtils.runOne(null, TESTS_FLOW_NS, flowId, null, null); assertThat(firstExecution.getTaskRunList().get(2).getState().getCurrent(), is(State.Type.FAILED)); assertThat(firstExecution.getState().getCurrent(), is(State.Type.FAILED)); // Update task's command to make second execution successful Optional<Flow> flow = flowRepositoryInterface.findById(null, TESTS_FLOW_NS, flowId); assertThat(flow.isPresent(), is(true)); // Restart execution and wait until it finishes Execution finishedRestartedExecution = runnerUtils.awaitExecution( execution -> execution.getId().equals(firstExecution.getId()) && execution.getTaskRunList().size() == 4 && execution.getState().isTerminated(), () -> { Execution restartedExec = client.toBlocking().retrieve( HttpRequest .POST("/api/v1/executions/" + firstExecution.getId() + "/restart", ImmutableMap.of()), Execution.class ); assertThat(restartedExec, notNullValue()); assertThat(restartedExec.getId(), is(firstExecution.getId())); assertThat(restartedExec.getParentId(), nullValue()); assertThat(restartedExec.getTaskRunList().size(), is(3)); assertThat(restartedExec.getState().getCurrent(), is(State.Type.RESTARTED)); IntStream .range(0, 2) .mapToObj(value -> restartedExec.getTaskRunList().get(value)).forEach(taskRun -> { assertThat(taskRun.getState().getCurrent(), is(State.Type.SUCCESS)); assertThat(taskRun.getAttempts().size(), is(1)); assertThat(restartedExec.getTaskRunList().get(2).getState().getCurrent(), is(State.Type.RESTARTED)); assertThat(restartedExec.getTaskRunList().get(2).getAttempts().size(), is(1)); }); }, Duration.ofSeconds(15) ); assertThat(finishedRestartedExecution, notNullValue()); assertThat(finishedRestartedExecution.getId(), is(firstExecution.getId())); assertThat(finishedRestartedExecution.getParentId(), nullValue()); assertThat(finishedRestartedExecution.getTaskRunList().size(), is(4)); assertThat(finishedRestartedExecution.getTaskRunList().getFirst().getAttempts().size(), is(1)); assertThat(finishedRestartedExecution.getTaskRunList().get(1).getAttempts().size(), is(1)); assertThat(finishedRestartedExecution.getTaskRunList().get(2).getAttempts().size(), is(2)); assertThat(finishedRestartedExecution.getTaskRunList().get(3).getAttempts().size(), is(1)); finishedRestartedExecution .getTaskRunList() .stream() .map(TaskRun::getState) .forEach(state -> assertThat(state.getCurrent(), is(State.Type.SUCCESS))); }
public String bsnkActivate(String bsn) throws BsnkException { ProvidePPPPCAOptimizedRequest request = bsnkUtils.createPpPpcaRequest(bsn); List<PolymorphicPseudonymType> response; response = activateBsnkClient.providePPRequest(request); ASN1Sequence signedPip = bsnkUtils.signedPipFromPplist(response); if (!bsnkUtils.verifySignedPip(signedPip)) { try { throw new BsnkException("SignedpipSignatureFault", String.format("Signed pip not verified: '%s'", Base64.getEncoder().encodeToString(signedPip.getEncoded())), null); } catch (IOException ex) { throw new BsnkException("signedPipSignatureEncodeFault", "Signed pip not verified and not not base64 encodeable", ex); } } ASN1Sequence pip = bsnkUtils.retrievePipFromSignedPip(signedPip); try { return Base64.getEncoder().encodeToString(pip.getEncoded()); } catch (IOException ex) { throw new BsnkException("PipEncodeFault", "Couldn't base64 encode pip", ex); } }
@Test public void bsnkActivateTest() throws IOException, BsnkException { String signedPipBase64 = "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"; String pipbase64 = "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"; ASN1Sequence signedPip = (ASN1Sequence) ASN1Sequence.fromByteArray(Base64.getDecoder().decode(signedPipBase64)); ASN1Sequence pip = (ASN1Sequence) ASN1Sequence.fromByteArray(Base64.getDecoder().decode(pipbase64)); Mockito.when(bsnkUtils.signedPipFromPplist(any())).thenReturn(signedPip); Mockito.when(bsnkUtils.verifySignedPip(any())).thenReturn(true); Mockito.when(bsnkUtils.retrievePipFromSignedPip(any())).thenReturn(pip); String result = service.bsnkActivate("PPPPPPPPP"); assertEquals(pipbase64, result); }
@Override public HashSlotCursor12byteKey cursor() { return new CursorIntKey2(); }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void testCursor_key2_withoutAdvance() { HashSlotCursor12byteKey cursor = hsa.cursor(); cursor.key2(); }
@Override public boolean setIfExists(V value) { return get(setIfExistsAsync(value)); }
@Test public void testSetIfExists() throws InterruptedException { RBucket<String> r1 = redisson.getBucket("test1"); assertThat(r1.setIfExists("0")).isFalse(); assertThat(r1.isExists()).isFalse(); r1.set("1"); assertThat(r1.setIfExists("2")).isTrue(); assertThat(r1.get()).isEqualTo("2"); RBucket<String> r2 = redisson.getBucket("test2"); r2.set("1"); assertThat(r2.setIfExists("2", Duration.ofSeconds(1))).isTrue(); assertThat(r2.get()).isEqualTo("2"); Thread.sleep(1000); assertThat(r2.isExists()).isFalse(); }
@Override public void login(final LoginCallback prompt, final CancelCallback cancel) throws BackgroundException { final Credentials credentials = host.getCredentials(); if(host.getProtocol().isPasswordConfigurable()) { final String domain, username; if(credentials.getUsername().contains("\\")) { domain = StringUtils.substringBefore(credentials.getUsername(), "\\"); username = StringUtils.substringAfter(credentials.getUsername(), "\\"); } else { username = credentials.getUsername(); domain = new HostPreferences(host).getProperty("webdav.ntlm.domain"); } for(String scheme : Arrays.asList(AuthSchemes.NTLM, AuthSchemes.SPNEGO)) { client.setCredentials( new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, scheme), new NTCredentials(username, credentials.getPassword(), preferences.getProperty("webdav.ntlm.workstation"), domain) ); } for(String scheme : Arrays.asList(AuthSchemes.BASIC, AuthSchemes.DIGEST, AuthSchemes.KERBEROS)) { client.setCredentials( new AuthScope(AuthScope.ANY_HOST, AuthScope.ANY_PORT, AuthScope.ANY_REALM, scheme), new UsernamePasswordCredentials(username, credentials.getPassword())); } if(preferences.getBoolean("webdav.basic.preemptive")) { client.enablePreemptiveAuthentication(host.getHostname(), host.getPort(), host.getPort(), Charset.forName(preferences.getProperty("http.credentials.charset")) ); } else { client.disablePreemptiveAuthentication(); } } if(credentials.isPassed()) { if(log.isWarnEnabled()) { log.warn(String.format("Skip verifying credentials with previous successful authentication event for %s", this)); } return; } try { final Path home = new DelegatingHomeFeature(new WorkdirHomeFeature(host), new DefaultPathHomeFeature(host)).find(); final HttpHead head = new HttpHead(new DAVPathEncoder().encode(home)); try { client.execute(head, new MicrosoftIISFeaturesResponseHandler(capabilities)); } catch(SardineException e) { switch(e.getStatusCode()) { case HttpStatus.SC_NOT_FOUND: if(log.isWarnEnabled()) { log.warn(String.format("Ignore failure %s", e)); } break; case HttpStatus.SC_NOT_IMPLEMENTED: case HttpStatus.SC_FORBIDDEN: case HttpStatus.SC_UNSUPPORTED_MEDIA_TYPE: case HttpStatus.SC_METHOD_NOT_ALLOWED: if(log.isWarnEnabled()) { log.warn(String.format("Failed HEAD request to %s with %s. Retry with PROPFIND.", host, e.getResponsePhrase())); } cancel.verify(); // Possibly only HEAD requests are not allowed final ListService list = this.getFeature(ListService.class); list.list(home, new DisabledListProgressListener() { @Override public void chunk(final Path parent, final AttributedList<Path> list) throws ListCanceledException { try { cancel.verify(); } catch(ConnectionCanceledException e) { throw new ListCanceledException(list, e); } } }); break; case HttpStatus.SC_BAD_REQUEST: if(preferences.getBoolean("webdav.basic.preemptive")) { if(log.isWarnEnabled()) { log.warn(String.format("Disable preemptive authentication for %s due to failure %s", host, e.getResponsePhrase())); } cancel.verify(); client.disablePreemptiveAuthentication(); client.execute(head, new MicrosoftIISFeaturesResponseHandler(capabilities)); } else { throw new DAVExceptionMappingService().map(e); } break; default: throw new DAVExceptionMappingService().map(e); } } } catch(SardineException e) { throw new DAVExceptionMappingService().map(e); } catch(IOException e) { throw new HttpExceptionMappingService().map(e); } }
@Test(expected = BackgroundException.class) public void testHtmlResponse() throws Exception { final Host host = new Host(new DAVProtocol(), "cyberduck.ch"); final DAVSession session = new DAVSession(host, new DisabledX509TrustManager(), new DefaultX509KeyManager()); try { session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); new DAVListService(session).list(new DefaultHomeFinderService(session).find(), new DisabledListProgressListener()); } catch(InteroperabilityException | ConflictException e) { assertEquals("Unexpected response (405 Method Not Allowed). Please contact your web hosting service provider for assistance.", e.getDetail()); throw e; } }
public static VersionMessage read(ByteBuffer payload) throws BufferUnderflowException, ProtocolException { int clientVersion = (int) ByteUtils.readUint32(payload); check(clientVersion >= ProtocolVersion.MINIMUM.intValue(), ProtocolException::new); Services localServices = Services.read(payload); Instant time = Instant.ofEpochSecond(ByteUtils.readInt64(payload)); Services receivingServices = Services.read(payload); InetAddress receivingInetAddress = PeerAddress.getByAddress(Buffers.readBytes(payload, 16)); int receivingPort = ByteUtils.readUint16BE(payload); InetSocketAddress receivingAddr = new InetSocketAddress(receivingInetAddress, receivingPort); Buffers.skipBytes(payload, NETADDR_BYTES); // addr_from // uint64 localHostNonce (random data) // We don't care about the localhost nonce. It's used to detect connecting back to yourself in cases where // there are NATs and proxies in the way. However we don't listen for inbound connections so it's // irrelevant. Buffers.skipBytes(payload, 8); // string subVer (currently "") String subVer = Buffers.readLengthPrefixedString(payload); // int bestHeight (size of known block chain). long bestHeight = ByteUtils.readUint32(payload); boolean relayTxesBeforeFilter = clientVersion >= ProtocolVersion.BLOOM_FILTER.intValue() ? payload.get() != 0 : true; return new VersionMessage(clientVersion, localServices, time, receivingServices, receivingAddr, subVer, bestHeight, relayTxesBeforeFilter); }
@Test public void roundTrip_ipv4() throws Exception { VersionMessage ver = new VersionMessage(TESTNET, 1234); ver.time = Instant.ofEpochSecond(23456); ver.subVer = "/bitcoinj/"; ver.localServices = Services.of(1); ver.receivingAddr = new InetSocketAddress(InetAddress.getByName("4.3.2.1"), 8333); byte[] serialized = ver.serialize(); VersionMessage ver2 = VersionMessage.read(ByteBuffer.wrap(serialized)); assertEquals(1234, ver2.bestHeight); assertEquals(Instant.ofEpochSecond(23456), ver2.time); assertEquals("/bitcoinj/", ver2.subVer); assertEquals(ProtocolVersion.CURRENT.intValue(), ver2.clientVersion); assertEquals(1, ver2.localServices.bits()); assertEquals("4.3.2.1", ver2.receivingAddr.getHostName()); assertEquals(8333, ver2.receivingAddr.getPort()); }
void validateMinResourcesRequired(Function.FunctionDetails functionDetails) { if (functionInstanceMinResources != null) { Double minCpu = functionInstanceMinResources.getCpu(); Long minRam = functionInstanceMinResources.getRam(); if (minCpu != null) { if (functionDetails.getResources() == null) { throw new IllegalArgumentException( String.format("Per instance CPU requested is not specified. " + "Must specify CPU requested for function to be at least %s", minCpu)); } else if (functionDetails.getResources().getCpu() < minCpu) { throw new IllegalArgumentException( String.format("Per instance CPU requested, %s, " + "for function is less than the minimum required, %s", functionDetails.getResources().getCpu(), minCpu)); } } if (minRam != null) { if (functionDetails.getResources() == null) { throw new IllegalArgumentException( String.format("Per instance RAM requested is not specified. " + "Must specify RAM requested for function to be at least %s", minRam)); } else if (functionDetails.getResources().getRam() < minRam) { throw new IllegalArgumentException( String.format("Per instance RAM requested, %s, " + "for function is less than the minimum required, %s", functionDetails.getResources().getRam(), minRam)); } } } }
@Test public void testValidateMinResourcesRequired() throws Exception { factory = createKubernetesRuntimeFactory(null, null, null, null, false); FunctionDetails functionDetailsBase = createFunctionDetails(); // min resources are not set try { factory.validateMinResourcesRequired(functionDetailsBase); } catch (Exception e) { fail(); } testMinResource(0.2, 2048L, false, null); testMinResource(0.05, 2048L, true, "Per instance CPU requested, 0.05, for function is less than the minimum required, 0.1"); testMinResource(0.2, 512L, true, "Per instance RAM requested, 512, for function is less than the minimum required, 1024"); testMinResource(0.05, 512L, true, "Per instance CPU requested, 0.05, for function is less than the minimum required, 0.1"); testMinResource(null, null, true, "Per instance CPU requested, 0.0, for function is less than the minimum required, 0.1"); testMinResource(0.2, null, true, "Per instance RAM requested, 0, for function is less than the minimum required, 1024"); testMinResource(0.05, null, true, "Per instance CPU requested, 0.05, for function is less than the minimum required, 0.1"); testMinResource(null, 2048L, true, "Per instance CPU requested, 0.0, for function is less than the minimum required, 0.1"); testMinResource(null, 512L, true, "Per instance CPU requested, 0.0, for function is less than the minimum required, 0.1"); }
@Override public alluxio.grpc.JobInfo toProto() { ByteBuffer result = null; try { result = mResult == null ? null : ByteBuffer.wrap(SerializationUtils.serialize(mResult)); } catch (IOException e) { // TODO(bradley) better error handling LOG.error("Failed to serialize {}", mResult, e); } alluxio.grpc.JobInfo.Builder taskInfoBuilder = alluxio.grpc.JobInfo.newBuilder().setParentId(mJobId).setId(mTaskId) .setStatus(mStatus.toProto()).setErrorMessage(mErrorMessage) .setErrorType(mErrorType).setLastUpdated(mLastUpdated).setWorkerHost(mWorkerHost) .setType(JobType.TASK).setDescription(mDescription); if (result != null) { taskInfoBuilder.setResult(ByteString.copyFrom(result)); } return taskInfoBuilder.build(); }
@Test public void testToProto() throws IOException { TaskInfo taskInfo = createRandom(); assertEquals(taskInfo, new TaskInfo(taskInfo.toProto())); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void verify_changeOnMyIssues_notification_dispatcher_key() { NotificationDispatcherMetadata metadata = ChangesOnMyIssueNotificationHandler.newMetadata(); assertThat(metadata.getDispatcherKey()).isEqualTo(CHANGE_ON_MY_ISSUES_DISPATCHER_KEY); }
@Override public byte[] serializeJavaObject(Object obj) { MemoryBuffer buf = getBuffer(); buf.writerIndex(0); serializeJavaObject(buf, obj); byte[] bytes = buf.getBytes(0, buf.writerIndex()); resetBuffer(); return bytes; }
@Test public void testSerializeJavaObject() { Fury fury = Fury.builder().requireClassRegistration(false).withLanguage(Language.JAVA).build(); BeanA beanA = BeanA.createBeanA(2); assertEquals(fury.deserializeJavaObject(fury.serializeJavaObject(beanA), BeanA.class), beanA); assertEquals( fury.deserializeJavaObjectAndClass(fury.serializeJavaObjectAndClass(beanA)), beanA); assertEquals( fury.deserializeJavaObjectAndClass( MemoryBuffer.fromByteArray(fury.serializeJavaObjectAndClass(beanA))), beanA); }
public static int hashToIndex(int hash, int length) { checkPositive("length", length); if (hash == Integer.MIN_VALUE) { return 0; } return abs(hash) % length; }
@Test public void hashToIndex_whenHashNegative() { int result = hashToIndex(-420, 100); assertEquals(20, result); }
public static String toURL(String protocol, String host, int port, String path) { StringBuilder sb = new StringBuilder(); sb.append(protocol).append("://"); sb.append(host).append(':').append(port); if (path.charAt(0) != '/') { sb.append('/'); } sb.append(path); return sb.toString(); }
@Test void testToURL() { String url = NetUtils.toURL("dubbo", "host", 1234, "foo"); assertThat(url, equalTo("dubbo://host:1234/foo")); }
@Override public void addChildren(Deque<Expression> expressions) { addChildren(expressions, 2); }
@Test public void testPrepare() throws IOException { And and = new And(); Expression first = mock(Expression.class); Expression second = mock(Expression.class); Deque<Expression> children = new LinkedList<Expression>(); children.add(second); children.add(first); and.addChildren(children); and.prepare(); verify(first).prepare(); verify(second).prepare(); verifyNoMoreInteractions(first); verifyNoMoreInteractions(second); }
public static Definitions getRootElement(DMNModelInstrumentedBase toNavigate) { if ( toNavigate instanceof Definitions ) { return (Definitions) toNavigate; } else if ( toNavigate.getParent() != null ) { return getRootElement(toNavigate.getParent()); } else { throw new RuntimeException("Failed to get Definitions parent for " + toNavigate); } }
@Test void getRootElement() { String localPart = "reference"; DMNElementReference elementReference = new TDMNElementReference(); String href = String.format("%s#%s", nameSpace, localPart); elementReference.setHref(href); elementReference.setParent(parent); Definitions retrieved = DMNCompilerImpl.getRootElement(elementReference); assertThat(retrieved).isNotNull().isEqualTo(parent); InformationRequirement informationRequirement = new TInformationRequirement(); elementReference.setParent(informationRequirement); assertThrows(RuntimeException.class, () -> DMNCompilerImpl.getRootElement(elementReference)); informationRequirement.setParent(parent); retrieved = DMNCompilerImpl.getRootElement(elementReference); assertThat(retrieved).isNotNull().isEqualTo(parent); }
@Override public void execute(Context context) { Set<String> qpKeys = analysisMetadataHolder.getQProfilesByLanguage().values().stream().map(QualityProfile::getQpKey).collect(Collectors.toSet()); try (DbSession dbSession = dbClient.openSession(false)) { Set<RuleKey> prioritizedRules = dbClient.activeRuleDao().selectPrioritizedRules(dbSession, qpKeys); prioritizedRulesHolder.setPrioritizedRules(prioritizedRules); } }
@Test void execute_whenNoPrioritizedRules_shouldHaveEmptyHolder() { when(dbClient.activeRuleDao()).thenReturn(mock()); when(dbClient.activeRuleDao().selectPrioritizedRules(any(), any())).thenReturn(new HashSet<>()); underTest.execute(mock()); assertThat(prioritizedRulesHolder.getPrioritizedRules()).isEmpty(); }
public static BufferedImage resizeCanvas(final BufferedImage image, final int newWidth, final int newHeight) { final BufferedImage dimg = new BufferedImage(newWidth, newHeight, BufferedImage.TYPE_INT_ARGB); final int centeredX = newWidth / 2 - image.getWidth() / 2; final int centeredY = newHeight / 2 - image.getHeight() / 2; final Graphics2D g2d = dimg.createGraphics(); g2d.drawImage(image, centeredX, centeredY, null); g2d.dispose(); return dimg; }
@Test public void resizeCanvas() { assertTrue(bufferedImagesEqual(centeredPixel(BLACK), ImageUtil.resizeCanvas(oneByOne(BLACK), 3, 3))); assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.resizeCanvas(oneByOne(BLACK), 1, 1))); assertTrue(bufferedImagesEqual(oneByOne(BLACK), ImageUtil.resizeCanvas(centeredPixel(BLACK), 1, 1))); BufferedImage expected = new BufferedImage(2, 1, BufferedImage.TYPE_INT_ARGB); expected.setRGB(1, 0, BLACK.getRGB()); assertTrue(bufferedImagesEqual(expected, ImageUtil.resizeCanvas(oneByOne(BLACK), 2, 1))); expected = new BufferedImage(1, 2, BufferedImage.TYPE_INT_ARGB); expected.setRGB(0, 1, BLACK.getRGB()); assertTrue(bufferedImagesEqual(expected, ImageUtil.resizeCanvas(oneByOne(BLACK), 1, 2))); }
static double toDouble(final JsonNode object) { if (object instanceof NumericNode) { return object.doubleValue(); } if (object instanceof TextNode) { try { return Double.parseDouble(object.textValue()); } catch (final NumberFormatException e) { throw failedStringCoercionException(SqlBaseType.DOUBLE); } } throw invalidConversionException(object, SqlBaseType.DOUBLE); }
@Test public void shouldNotIncludeValueInExceptionWhenFailingToDouble() { try { // When: JsonSerdeUtils.toDouble(JsonNodeFactory.instance.textNode("personal info: do not log me")); fail("Invalid test: should throw"); } catch (final Exception e) { assertThat(ExceptionUtils.getStackTrace(e), not(containsString("personal info"))); } }
public JsonNode resolve(URI uri) { if (CLASSPATH_SCHEMES.contains(uri.getScheme())) { return resolveFromClasspath(uri); } try { return objectMapper.readTree(uri.toURL()); } catch (JsonProcessingException e) { throw new IllegalArgumentException("Error parsing document: " + uri, e); } catch (IOException e) { throw new IllegalArgumentException("Unrecognised URI, can't resolve this: " + uri, e); } }
@Test public void fileLinkIsResolvedToContent() throws IOException { URI schemaFile = createSchemaFile(); JsonNode uriContent = resolver.resolve(schemaFile); assertThat(uriContent.path("type").asText(), is(equalTo("string"))); }
public CompatibilityInfoMap check(String prevSnapshotPath, String currSnapshotPath, CompatibilityLevel compatLevel) { return checkCompatibility(prevSnapshotPath, currSnapshotPath, compatLevel, false); }
@Test public void testIncompatibleRestLiDataAnnotations() { final Collection<CompatibilityInfo> errors = new HashSet<>(); errors.add(new CompatibilityInfo(Arrays.<Object>asList("", "annotations", "intB"), CompatibilityInfo.Type.ANNOTATION_CHANGE_BREAKS_OLD_CLIENT, "Cannot add ReadOnly annotation")); errors.add(new CompatibilityInfo(Arrays.<Object>asList("", "annotations", "validationDemoNext/intA"), CompatibilityInfo.Type.ANNOTATION_CHANGE_BREAKS_OLD_CLIENT, "Cannot add CreateOnly annotation")); // Removing ReadOnly annotation on a required field errors.add(new CompatibilityInfo(Arrays.<Object>asList("", "annotations", "validationDemoNext/stringB"), CompatibilityInfo.Type.ANNOTATION_CHANGE_BREAKS_NEW_SERVER, "Cannot remove ReadOnly annotation")); // Removing ReadOnly annotation on an optional field errors.add(new CompatibilityInfo(Arrays.<Object>asList("", "annotations", "intA"), CompatibilityInfo.Type.ANNOTATION_CHANGE_MAY_REQUIRE_CLIENT_CODE_CHANGE, "Cannot remove ReadOnly annotation")); // Removing CreateOnly annotation on a required field errors.add(new CompatibilityInfo(Arrays.<Object>asList("", "annotations", "stringB"), CompatibilityInfo.Type.ANNOTATION_CHANGE_MAY_REQUIRE_CLIENT_CODE_CHANGE, "Cannot remove CreateOnly annotation")); // Removing CreateOnly annotation on an optional field errors.add(new CompatibilityInfo(Arrays.<Object>asList("", "annotations", "intB"), CompatibilityInfo.Type.ANNOTATION_CHANGE_MAY_REQUIRE_CLIENT_CODE_CHANGE, "Cannot remove CreateOnly annotation")); final RestLiSnapshotCompatibilityChecker checker = new RestLiSnapshotCompatibilityChecker(); final CompatibilityInfoMap infoMap = checker.check(RESOURCES_DIR + FS + "snapshots" + FS + "prev-validationdemos.snapshot.json", RESOURCES_DIR + FS + "snapshots" + FS + "curr-validationdemos-fail.snapshot.json", CompatibilityLevel.BACKWARDS); final Collection<CompatibilityInfo> restSpecIncompatibles = infoMap.getRestSpecIncompatibles(); for (CompatibilityInfo e : errors) { Assert.assertTrue(restSpecIncompatibles.contains(e), "Reported restspec incompatibles should contain: " + e.toString()); restSpecIncompatibles.remove(e); } Assert.assertTrue(restSpecIncompatibles.isEmpty()); }
public static org.kie.pmml.api.models.OutputField convertToKieOutputField(final OutputField toConvert, final Field<?> field) { final String name = toConvert.getName() != null ?toConvert.getName() : null; final OP_TYPE opType = toConvert.getOpType() != null ? OP_TYPE.byName(toConvert.getOpType().value()) : null; final DATA_TYPE dataFieldDataType = field != null ? DATA_TYPE.byName(field.getDataType().value()) : null; final DATA_TYPE dataType = toConvert.getDataType() != null ? DATA_TYPE.byName(toConvert.getDataType().value()) : dataFieldDataType; final String targetField = toConvert.getTargetField() != null ?toConvert.getTargetField() : null; final RESULT_FEATURE resultFeature = toConvert.getResultFeature() != null ? RESULT_FEATURE.byName(toConvert.getResultFeature().value()) : null; final List<String> allowedValues = field instanceof DataField ? convertDataFieldValues(((DataField) field).getValues()) : null; return new org.kie.pmml.api.models.OutputField(name, opType, dataType, targetField, resultFeature, allowedValues); }
@Test void convertToKieOutputField() { final OutputField toConvert = getRandomOutputField(); org.kie.pmml.api.models.OutputField retrieved = org.kie.pmml.compiler.api.utils.ModelUtils.convertToKieOutputField(toConvert, null); assertThat(retrieved).isNotNull(); assertThat(retrieved.getName()).isEqualTo(toConvert.getName()); OP_TYPE expectedOpType = OP_TYPE.byName(toConvert.getOpType().value()); assertThat(retrieved.getOpType()).isEqualTo(expectedOpType); DATA_TYPE expectedDataType = DATA_TYPE.byName(toConvert.getDataType().value()); assertThat(retrieved.getDataType()).isEqualTo(expectedDataType); assertThat(retrieved.getTargetField()).isEqualTo(toConvert.getTargetField()); RESULT_FEATURE expectedResultFeature = RESULT_FEATURE.byName(toConvert.getResultFeature().value()); assertThat(retrieved.getResultFeature()).isEqualTo(expectedResultFeature); toConvert.setOpType(null); toConvert.setTargetField(null); retrieved = org.kie.pmml.compiler.api.utils.ModelUtils.convertToKieOutputField(toConvert, null); assertThat(retrieved.getOpType()).isNull(); assertThat(retrieved.getTargetField()).isNull(); }
public static String uncompress(byte[] compressedURL) { StringBuffer url = new StringBuffer(); switch (compressedURL[0] & 0x0f) { case EDDYSTONE_URL_PROTOCOL_HTTP_WWW: url.append(URL_PROTOCOL_HTTP_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTPS_WWW: url.append(URL_PROTOCOL_HTTPS_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTP: url.append(URL_PROTOCOL_HTTP_COLON_SLASH_SLASH); break; case EDDYSTONE_URL_PROTOCOL_HTTPS: url.append(URL_PROTOCOL_HTTPS_COLON_SLASH_SLASH); break; default: break; } byte lastByte = -1; for (int i = 1; i < compressedURL.length; i++) { byte b = compressedURL[i]; if (lastByte == 0 && b == 0 ) { break; } lastByte = b; String tld = topLevelDomainForByte(b); if (tld != null) { url.append(tld); } else { url.append((char) b); } } return url.toString(); }
@Test public void testUncompressWithSubdomains() throws MalformedURLException { String testURL = "http://www.forums.google.com"; byte[] testBytes = {0x00, 'f', 'o', 'r', 'u', 'm', 's', '.', 'g', 'o', 'o', 'g', 'l', 'e', 0x07}; assertEquals(testURL, UrlBeaconUrlCompressor.uncompress(testBytes)); }
public void useBundles(List<String> bundlePaths) { if (hasLoadedBundles) { log.log(Level.FINE, () -> "Platform bundles have already been installed." + "\nInstalled bundles: " + installedBundles + "\nGiven files: " + bundlePaths); return; } installedBundles = install(bundlePaths); BundleStarter.startBundles(installedBundles); hasLoadedBundles = true; }
@Test void bundles_cannot_be_added_by_later_calls() { bundleLoader.useBundles(List.of(BUNDLE_1_REF)); bundleLoader.useBundles(List.of(BUNDLE_2_REF)); // Should be a NOP assertEquals(1, osgi.getInstalledBundles().size()); assertEquals(BUNDLE_1.getSymbolicName(), osgi.getInstalledBundles().get(0).getSymbolicName()); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testTheatreOfBloodEntryMode() { when(client.getVarbitValue(Varbits.THEATRE_OF_BLOOD_ORB1)).thenReturn(1); ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Wave 'The Final Challenge' (Entry Mode) complete!<br>" + "Duration: <col=ff0000>2:42</col><br>" + "Theatre of Blood completion time: <col=ff0000>17:00</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Theatre of Blood total completion time: <col=ff0000>24:40.20</col>. Personal best: 20:45.00", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your completed Theatre of Blood: Entry Mode count is: <col=ff0000>73</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("killcount", "theatre of blood entry mode", 73); verify(configManager).setRSProfileConfiguration("personalbest", "theatre of blood entry mode", 17 * 60.); verify(configManager).setRSProfileConfiguration("personalbest", "theatre of blood entry mode solo", 17 * 60.); }
@Override public String getRomOAID() { Uri uri = Uri.parse("content://com.meizu.flyme.openidsdk/"); String oaid = null; try { Cursor cursor = mContext.getContentResolver().query(uri, null, null, new String[]{"oaid"}, null); if (cursor != null && cursor.moveToFirst()) { int index = cursor.getColumnIndex("value"); oaid = cursor.getString(index); SALog.i(TAG, "OAID query success: " + oaid); cursor.close(); } } catch (Throwable th) { SALog.i(TAG, th); } return oaid; }
@Test public void getRomOAID() { MeizuImpl meizu = new MeizuImpl(mApplication); // if (meizu.isSupported()) { // Assert.assertNull(meizu.getRomOAID()); // } }
public static ErrorCodes getErrorCode(ResponseException responseException) throws ResponseException { // Obtain the error response code. String errorContent = responseException.getContent(); if (errorContent == null) { throw responseException; } try { ErrorResponseTemplate errorResponse = JsonTemplateMapper.readJson(errorContent, ErrorResponseTemplate.class); List<ErrorEntryTemplate> errors = errorResponse.getErrors(); // There may be multiple error objects if (errors.size() == 1) { String errorCodeString = errors.get(0).getCode(); // May not get an error code back. if (errorCodeString != null) { // throws IllegalArgumentException if unknown error code return ErrorCodes.valueOf(errorCodeString); } } } catch (IOException | IllegalArgumentException ex) { // Parse exception: either isn't an error object or unknown error code } // rethrow the original exception throw responseException; }
@Test public void testGetErrorCode_invalidErrorObject() { Mockito.when(responseException.getContent()) .thenReturn("{\"type\":\"other\",\"message\":\"some other object\"}"); try { ErrorResponseUtil.getErrorCode(responseException); Assert.fail(); } catch (ResponseException ex) { Assert.assertSame(responseException, ex); } }
public String namespaceProperties(Namespace ns) { return SLASH.join("v1", prefix, "namespaces", RESTUtil.encodeNamespace(ns), "properties"); }
@Test public void testNamespacePropertiesWithMultipartNamespace() { Namespace ns = Namespace.of("n", "s"); assertThat(withPrefix.namespaceProperties(ns)) .isEqualTo("v1/ws/catalog/namespaces/n%1Fs/properties"); assertThat(withoutPrefix.namespaceProperties(ns)).isEqualTo("v1/namespaces/n%1Fs/properties"); }
@Override public CompletableFuture<GetMessageResult> getMessageAsync(String group, String topic, int queueId, long offset, int maxMsgNums, MessageFilter messageFilter) { // for system topic, force reading from local store if (topicFilter.filterTopic(topic)) { return next.getMessageAsync(group, topic, queueId, offset, maxMsgNums, messageFilter); } if (fetchFromCurrentStore(topic, queueId, offset, maxMsgNums)) { log.trace("GetMessageAsync from current store, " + "topic: {}, queue: {}, offset: {}, maxCount: {}", topic, queueId, offset, maxMsgNums); } else { log.trace("GetMessageAsync from remote store, " + "topic: {}, queue: {}, offset: {}, maxCount: {}", topic, queueId, offset, maxMsgNums); return next.getMessageAsync(group, topic, queueId, offset, maxMsgNums, messageFilter); } Stopwatch stopwatch = Stopwatch.createStarted(); return fetcher .getMessageAsync(group, topic, queueId, offset, maxMsgNums, messageFilter) .thenApply(result -> { Attributes latencyAttributes = TieredStoreMetricsManager.newAttributesBuilder() .put(TieredStoreMetricsConstant.LABEL_OPERATION, TieredStoreMetricsConstant.OPERATION_API_GET_MESSAGE) .put(TieredStoreMetricsConstant.LABEL_TOPIC, topic) .put(TieredStoreMetricsConstant.LABEL_GROUP, group) .build(); TieredStoreMetricsManager.apiLatency.record(stopwatch.elapsed(TimeUnit.MILLISECONDS), latencyAttributes); if (result.getStatus() == GetMessageStatus.OFFSET_FOUND_NULL || result.getStatus() == GetMessageStatus.NO_MATCHED_LOGIC_QUEUE) { if (next.checkInStoreByConsumeOffset(topic, queueId, offset)) { TieredStoreMetricsManager.fallbackTotal.add(1, latencyAttributes); log.debug("GetMessageAsync not found, then back to next store, result: {}, " + "topic: {}, queue: {}, queue offset: {}, offset range: {}-{}", result.getStatus(), topic, queueId, offset, result.getMinOffset(), result.getMaxOffset()); return next.getMessage(group, topic, queueId, offset, maxMsgNums, messageFilter); } } if (result.getStatus() != GetMessageStatus.FOUND && result.getStatus() != GetMessageStatus.NO_MESSAGE_IN_QUEUE && result.getStatus() != GetMessageStatus.NO_MATCHED_LOGIC_QUEUE && result.getStatus() != GetMessageStatus.OFFSET_TOO_SMALL && result.getStatus() != GetMessageStatus.OFFSET_OVERFLOW_ONE && result.getStatus() != GetMessageStatus.OFFSET_OVERFLOW_BADLY) { log.warn("GetMessageAsync not found and message is not in next store, result: {}, " + "topic: {}, queue: {}, queue offset: {}, offset range: {}-{}", result.getStatus(), topic, queueId, offset, result.getMinOffset(), result.getMaxOffset()); } if (result.getStatus() == GetMessageStatus.FOUND) { Attributes messagesOutAttributes = TieredStoreMetricsManager.newAttributesBuilder() .put(TieredStoreMetricsConstant.LABEL_TOPIC, topic) .put(TieredStoreMetricsConstant.LABEL_GROUP, group) .build(); TieredStoreMetricsManager.messagesOutTotal.add(result.getMessageCount(), messagesOutAttributes); if (next.getStoreStatsService() != null) { next.getStoreStatsService().getGetMessageTransferredMsgCount().add(result.getMessageCount()); } } // Fix min or max offset according next store at last long minOffsetInQueue = next.getMinOffsetInQueue(topic, queueId); if (minOffsetInQueue >= 0 && minOffsetInQueue < result.getMinOffset()) { result.setMinOffset(minOffsetInQueue); } // In general, the local cq offset is slightly greater than the commit offset in read message, // so there is no need to update the maximum offset to the local cq offset here, // otherwise it will cause repeated consumption after next start offset over commit offset. if (storeConfig.isRecordGetMessageResult()) { log.info("GetMessageAsync result, {}, group: {}, topic: {}, queueId: {}, offset: {}, count:{}", result, group, topic, queueId, offset, maxMsgNums); } return result; }).exceptionally(e -> { log.error("GetMessageAsync from tiered store failed", e); return next.getMessage(group, topic, queueId, offset, maxMsgNums, messageFilter); }); }
@Test public void testGetMessageAsync() { GetMessageResult expect = new GetMessageResult(); expect.setStatus(GetMessageStatus.FOUND); expect.setMinOffset(100L); expect.setMaxOffset(200L); // topic filter Mockito.when(defaultStore.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())) .thenReturn(CompletableFuture.completedFuture(expect)); String groupName = "groupName"; GetMessageResult result = currentStore.getMessage( groupName, TopicValidator.SYSTEM_TOPIC_PREFIX, mq.getQueueId(), 100, 0, null); Assert.assertSame(expect, result); // fetch from default Mockito.when(fetcher.getMessageAsync(anyString(), anyString(), anyInt(), anyLong(), anyInt(), any())) .thenReturn(CompletableFuture.completedFuture(expect)); result = currentStore.getMessage( groupName, mq.getTopic(), mq.getQueueId(), 100, 0, null); Assert.assertSame(expect, result); expect.setStatus(GetMessageStatus.NO_MATCHED_LOGIC_QUEUE); Assert.assertSame(expect, currentStore.getMessage( groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); expect.setStatus(GetMessageStatus.OFFSET_OVERFLOW_ONE); Assert.assertSame(expect, currentStore.getMessage( groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); expect.setStatus(GetMessageStatus.OFFSET_OVERFLOW_BADLY); Assert.assertSame(expect, currentStore.getMessage( groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); expect.setStatus(GetMessageStatus.OFFSET_RESET); Assert.assertSame(expect, currentStore.getMessage( groupName, mq.getTopic(), mq.getQueueId(), 0, 0, null)); }
@Override public Long clusterCountKeysInSlot(int slot) { RedisClusterNode node = clusterGetNodeForSlot(slot); MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort())); RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot); return syncFuture(f); }
@Test public void testClusterCountKeysInSlot() { Long t = connection.clusterCountKeysInSlot(1); assertThat(t).isZero(); }
@ScalarOperator(GREATER_THAN_OR_EQUAL) @SqlType(StandardTypes.BOOLEAN) public static boolean greaterThanOrEqual(@SqlType(StandardTypes.BIGINT) long left, @SqlType(StandardTypes.BIGINT) long right) { return left >= right; }
@Test public void testGreaterThanOrEqual() { assertFunction("100000000037 >= 100000000037", BOOLEAN, true); assertFunction("100000000037 >= 100000000017", BOOLEAN, true); assertFunction("100000000017 >= 100000000037", BOOLEAN, false); assertFunction("100000000017 >= 100000000017", BOOLEAN, true); }
static String pigSchemaToString(Schema pigSchema) { final String pigSchemaString = pigSchema.toString(); return pigSchemaString.substring(1, pigSchemaString.length() - 1); }
@Test public void testSchemaEvolution() { Map<String, Set<String>> map = new LinkedHashMap<String, Set<String>>(); map.put( "pig.schema", new LinkedHashSet<String>(Arrays.asList( "a:int, b:int, c:int, d:int, e:int, f:int", "aa:int, aaa:int, b:int, c:int, ee:int"))); Schema result = getPigSchemaFromMultipleFiles( new MessageType("file_schema", new PrimitiveType(OPTIONAL, INT32, "a")), map); assertEquals("a: int,b: int,c: int,d: int,e: int,f: int,aa: int,aaa: int,ee: int", pigSchemaToString(result)); }
@Bean public PluginDataHandler grpcPluginDataHandler() { return new GrpcPluginDataHandler(); }
@Test public void testGrpcPluginDataHandler() { applicationContextRunner.run(context -> { PluginDataHandler handler = context.getBean("grpcPluginDataHandler", PluginDataHandler.class); assertNotNull(handler); } ); }
@VisibleForTesting void mergeFields(IssueDto dbIssue, DefaultIssue issue) { resolveAssignee(dbIssue, issue); resolveSeverity(dbIssue, issue); resolveEffortToFix(dbIssue, issue); resolveResolution(dbIssue, issue); resolveStatus(dbIssue, issue); }
@Test public void should_keep_changes_made_by_user() { DefaultIssue issue = new DefaultIssue() .setKey("ABCDE") .setRuleKey(RuleKey.of("java", "AvoidCycles")) .setComponentKey("struts:org.apache.struts.Action") .setNew(false); // Before starting scan issue.setAssigneeUuid(null); issue.setCreationDate(DateUtils.parseDate("2012-01-01")); issue.setUpdateDate(DateUtils.parseDate("2012-02-02")); // Changed by scan issue.setLine(200); issue.setSeverity(Severity.BLOCKER); issue.setManualSeverity(false); issue.setAuthorLogin("simon"); issue.setChecksum("CHECKSUM-ABCDE"); issue.setResolution(null); issue.setStatus(Issue.STATUS_REOPENED); // Issue as seen and changed by end-user IssueDto dbIssue = new IssueDto() .setKee("ABCDE") .setRuleUuid("uuid-10") .setRuleKey("java", "AvoidCycles") .setComponentUuid("100") .setComponentKey("struts:org.apache.struts.Action") .setLine(10) .setResolution(Issue.RESOLUTION_FALSE_POSITIVE) .setStatus(Issue.STATUS_RESOLVED) .setAssigneeUuid("arthur") .setSeverity(Severity.MAJOR) .setManualSeverity(false); underTest.mergeFields(dbIssue, issue); assertThat(issue.key()).isEqualTo("ABCDE"); assertThat(issue.componentKey()).isEqualTo("struts:org.apache.struts.Action"); // Scan wins on : assertThat(issue.line()).isEqualTo(200); assertThat(issue.severity()).isEqualTo(Severity.BLOCKER); assertThat(issue.manualSeverity()).isFalse(); // User wins on : assertThat(issue.assignee()).isEqualTo("arthur"); assertThat(issue.resolution()).isEqualTo(Issue.RESOLUTION_FALSE_POSITIVE); assertThat(issue.status()).isEqualTo(Issue.STATUS_RESOLVED); }
public static boolean startsWithIgnoreCase(String text, String prefix) { if (text != null && prefix != null) { return prefix.length() <= text.length() && text.regionMatches(true, 0, prefix, 0, prefix.length()); } else { return text == null && prefix == null; } }
@Test public void testStartsWithIgnoreCase() { assertTrue(StringHelper.startsWithIgnoreCase(null, null)); assertFalse(StringHelper.startsWithIgnoreCase("foo", null)); assertFalse(StringHelper.startsWithIgnoreCase(null, "bar")); assertFalse(StringHelper.startsWithIgnoreCase("HelloWorld", "bar")); assertTrue(StringHelper.startsWithIgnoreCase("HelloWorld", "Hello")); assertTrue(StringHelper.startsWithIgnoreCase("HelloWorld", "hello")); assertFalse(StringHelper.startsWithIgnoreCase("HelloWorld", "Helo")); assertTrue(StringHelper.startsWithIgnoreCase("HelloWorld", "HelloWorld")); assertTrue(StringHelper.startsWithIgnoreCase("HelloWorld", "helloWORLD")); assertTrue(StringHelper.startsWithIgnoreCase("HelloWorld", "HELLO")); assertTrue(StringHelper.startsWithIgnoreCase("helloworld", "helloWORLD")); assertTrue(StringHelper.startsWithIgnoreCase("HELLOWORLD", "HELLO")); }
public Optional<DistroRecord> getRecordIfExist(String type) { return Optional.ofNullable(distroRecords.get(type)); }
@Test void testGetRecordIfExist() { Optional<DistroRecord> actual = DistroRecordsHolder.getInstance().getRecordIfExist("testGetRecordIfExist"); assertFalse(actual.isPresent()); DistroRecordsHolder.getInstance().getRecord("testGetRecordIfExist"); actual = DistroRecordsHolder.getInstance().getRecordIfExist("testGetRecordIfExist"); assertTrue(actual.isPresent()); }
@Override public DosFileAttributes readAttributes(File file) { return new Attributes(file); }
@Test public void testAttributes() { DosFileAttributes attrs = provider.readAttributes(file); assertThat(attrs.isHidden()).isFalse(); assertThat(attrs.isArchive()).isFalse(); assertThat(attrs.isReadOnly()).isFalse(); assertThat(attrs.isSystem()).isFalse(); file.setAttribute("dos", "hidden", true); attrs = provider.readAttributes(file); assertThat(attrs.isHidden()).isTrue(); assertThat(attrs.isArchive()).isFalse(); assertThat(attrs.isReadOnly()).isFalse(); assertThat(attrs.isSystem()).isFalse(); }
public static Write write() { return new AutoValue_SqsIO_Write.Builder().build(); }
@Test public void testWrite() { final AmazonSQS client = embeddedSqsRestServer.getClient(); final String queueUrl = embeddedSqsRestServer.getQueueUrl(); List<SendMessageRequest> messages = new ArrayList<>(); for (int i = 0; i < 100; i++) { final SendMessageRequest request = new SendMessageRequest(queueUrl, "This is a test " + i); messages.add(request); } pipeline.apply(Create.of(messages)).apply(SqsIO.write()); pipeline.run().waitUntilFinish(); List<String> received = new ArrayList<>(); while (received.size() < 100) { final ReceiveMessageResult receiveMessageResult = client.receiveMessage(queueUrl); if (receiveMessageResult.getMessages() != null) { for (Message message : receiveMessageResult.getMessages()) { received.add(message.getBody()); } } } assertEquals(100, received.size()); for (int i = 0; i < 100; i++) { assertTrue(received.contains("This is a test " + i)); } }
@Override protected void handleEndTxnOnSubscription(CommandEndTxnOnSubscription command) { checkArgument(state == State.Connected); final long requestId = command.getRequestId(); final long txnidMostBits = command.getTxnidMostBits(); final long txnidLeastBits = command.getTxnidLeastBits(); final String topic = command.getSubscription().getTopic(); final String subName = command.getSubscription().getSubscription(); final int txnAction = command.getTxnAction().getValue(); final TxnID txnID = new TxnID(txnidMostBits, txnidLeastBits); final long lowWaterMark = command.getTxnidLeastBitsOfLowWatermark(); if (log.isDebugEnabled()) { log.debug("[{}] [{}] handleEndTxnOnSubscription txnId: [{}], txnAction: [{}]", topic, subName, new TxnID(txnidMostBits, txnidLeastBits), txnAction); } CompletableFuture<Optional<Topic>> topicFuture = service.getTopicIfExists(TopicName.get(topic).toString()); topicFuture.thenAcceptAsync(optionalTopic -> { if (optionalTopic.isPresent()) { Subscription subscription = optionalTopic.get().getSubscription(subName); if (subscription == null) { log.warn("handleEndTxnOnSubscription fail! " + "topic {} subscription {} does not exist. txnId: [{}], txnAction: [{}]", optionalTopic.get().getName(), subName, txnID, TxnAction.valueOf(txnAction)); writeAndFlush( Commands.newEndTxnOnSubscriptionResponse(requestId, txnidLeastBits, txnidMostBits)); return; } // we only accept super user because this endpoint is reserved for tc to broker communication isSuperUser() .thenCompose(isOwner -> { if (!isOwner) { return failedFutureTxnTcNotAllowed(txnID); } return subscription.endTxn(txnidMostBits, txnidLeastBits, txnAction, lowWaterMark); }).whenComplete((ignored, e) -> { if (e != null) { e = FutureUtil.unwrapCompletionException(e); log.error("handleEndTxnOnSubscription fail ! topic: {}, subscription: {}" + "txnId: [{}], txnAction: [{}]", topic, subName, txnID, TxnAction.valueOf(txnAction), e.getCause()); writeAndFlush(Commands.newEndTxnOnSubscriptionResponse( requestId, txnidLeastBits, txnidMostBits, BrokerServiceException.getClientErrorCode(e), "Handle end txn on subscription failed: " + e.getMessage())); return; } writeAndFlush( Commands.newEndTxnOnSubscriptionResponse(requestId, txnidLeastBits, txnidMostBits)); }); } else { getBrokerService().getManagedLedgerFactory() .asyncExists(TopicName.get(topic).getPersistenceNamingEncoding()) .thenAccept((b) -> { if (b) { log.error("handleEndTxnOnSubscription fail! The topic {} does not exist in broker, " + "subscription: {}, txnId: [{}], txnAction: [{}]", topic, subName, txnID, TxnAction.valueOf(txnAction)); writeAndFlush(Commands.newEndTxnOnSubscriptionResponse( requestId, txnID.getLeastSigBits(), txnID.getMostSigBits(), ServerError.ServiceNotReady, "The topic " + topic + " does not exist in broker.")); } else { log.warn("handleEndTxnOnSubscription fail ! The topic {} has not been created, " + "subscription: {} txnId: [{}], txnAction: [{}]", topic, subName, txnID, TxnAction.valueOf(txnAction)); writeAndFlush(Commands.newEndTxnOnSubscriptionResponse(requestId, txnID.getLeastSigBits(), txnID.getMostSigBits())); } }).exceptionally(e -> { log.error("handleEndTxnOnSubscription fail ! topic {}, subscription: {}" + "txnId: [{}], txnAction: [{}]", topic, subName, txnID, TxnAction.valueOf(txnAction), e.getCause()); writeAndFlush(Commands.newEndTxnOnSubscriptionResponse( requestId, txnID.getLeastSigBits(), txnID.getMostSigBits(), ServerError.ServiceNotReady, e.getMessage())); return null; }); } }, ctx.executor()).exceptionally(e -> { log.error("handleEndTxnOnSubscription fail ! topic: {}, subscription: {}" + "txnId: [{}], txnAction: [{}]", topic, subName, txnID, TxnAction.valueOf(txnAction), e.getCause()); writeAndFlush(Commands.newEndTxnOnSubscriptionResponse( requestId, txnidLeastBits, txnidMostBits, ServerError.ServiceNotReady, "Handle end txn on subscription failed: " + e.getMessage())); return null; }); }
@Test(expectedExceptions = IllegalArgumentException.class) public void shouldFailHandleEndTxnOnSubscription() throws Exception { ServerCnx serverCnx = mock(ServerCnx.class, CALLS_REAL_METHODS); Field stateUpdater = ServerCnx.class.getDeclaredField("state"); stateUpdater.setAccessible(true); stateUpdater.set(serverCnx, ServerCnx.State.Failed); serverCnx.handleEndTxnOnSubscription(any()); }
public Bson parseSingleExpression(final String filterExpression, final List<EntityAttribute> attributes) { final Filter filter = singleFilterParser.parseSingleExpression(filterExpression, attributes); return filter.toBson(); }
@Test void parsesFilterExpressionCorrectlyForOpenDateRanges() { final String dateString = "2012-12-12 12:12:12"; final DateTime dateObject = new DateTime(2012, 12, 12, 12, 12, 12, DateTimeZone.UTC); final List<EntityAttribute> entityAttributes = List.of(EntityAttribute.builder() .id("created_at") .title("Creation Date") .type(SearchQueryField.Type.DATE) .filterable(true) .build()); assertEquals( Filters.and( Filters.gte("created_at", dateObject.toDate()) ), toTest.parseSingleExpression("created_at:" + dateString + RANGE_VALUES_SEPARATOR, entityAttributes )); assertEquals( Filters.and( Filters.lte("created_at", dateObject.toDate()) ), toTest.parseSingleExpression("created_at:" + RANGE_VALUES_SEPARATOR + dateString, entityAttributes )); }