focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static boolean shouldLoadInIsolation(String name) { return !(EXCLUDE.matcher(name).matches() && !INCLUDE.matcher(name).matches()); }
@Test public void testConnectApiClasses() { List<String> apiClasses = Arrays.asList( // Enumerate all packages and classes "org.apache.kafka.connect.", "org.apache.kafka.connect.components.", "org.apache.kafka.connect.components.Versioned", //"org.apache.kafka.connect.connector.policy.", isolated by default "org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy", "org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest", "org.apache.kafka.connect.connector.policy.ConnectorClientConfigRequest$ClientType", "org.apache.kafka.connect.connector.", "org.apache.kafka.connect.connector.Connector", "org.apache.kafka.connect.connector.ConnectorContext", "org.apache.kafka.connect.connector.ConnectRecord", "org.apache.kafka.connect.connector.Task", "org.apache.kafka.connect.data.", "org.apache.kafka.connect.data.ConnectSchema", "org.apache.kafka.connect.data.Date", "org.apache.kafka.connect.data.Decimal", "org.apache.kafka.connect.data.Field", "org.apache.kafka.connect.data.Schema", "org.apache.kafka.connect.data.SchemaAndValue", "org.apache.kafka.connect.data.SchemaBuilder", "org.apache.kafka.connect.data.SchemaProjector", "org.apache.kafka.connect.data.Struct", "org.apache.kafka.connect.data.Time", "org.apache.kafka.connect.data.Timestamp", "org.apache.kafka.connect.data.Values", "org.apache.kafka.connect.errors.", "org.apache.kafka.connect.errors.AlreadyExistsException", "org.apache.kafka.connect.errors.ConnectException", "org.apache.kafka.connect.errors.DataException", "org.apache.kafka.connect.errors.IllegalWorkerStateException", "org.apache.kafka.connect.errors.NotFoundException", "org.apache.kafka.connect.errors.RetriableException", "org.apache.kafka.connect.errors.SchemaBuilderException", "org.apache.kafka.connect.errors.SchemaProjectorException", "org.apache.kafka.connect.header.", "org.apache.kafka.connect.header.ConnectHeader", "org.apache.kafka.connect.header.ConnectHeaders", "org.apache.kafka.connect.header.Header", "org.apache.kafka.connect.header.Headers", "org.apache.kafka.connect.health.", "org.apache.kafka.connect.health.AbstractState", "org.apache.kafka.connect.health.ConnectClusterDetails", "org.apache.kafka.connect.health.ConnectClusterState", "org.apache.kafka.connect.health.ConnectorHealth", "org.apache.kafka.connect.health.ConnectorState", "org.apache.kafka.connect.health.ConnectorType", "org.apache.kafka.connect.health.TaskState", "org.apache.kafka.connect.rest.", "org.apache.kafka.connect.rest.ConnectRestExtension", "org.apache.kafka.connect.rest.ConnectRestExtensionContext", "org.apache.kafka.connect.sink.", "org.apache.kafka.connect.sink.SinkConnector", "org.apache.kafka.connect.sink.SinkRecord", "org.apache.kafka.connect.sink.SinkTask", "org.apache.kafka.connect.sink.SinkTaskContext", "org.apache.kafka.connect.sink.ErrantRecordReporter", "org.apache.kafka.connect.source.", "org.apache.kafka.connect.source.SourceConnector", "org.apache.kafka.connect.source.SourceRecord", "org.apache.kafka.connect.source.SourceTask", "org.apache.kafka.connect.source.SourceTaskContext", "org.apache.kafka.connect.storage.", "org.apache.kafka.connect.storage.Converter", "org.apache.kafka.connect.storage.ConverterConfig", "org.apache.kafka.connect.storage.ConverterType", "org.apache.kafka.connect.storage.HeaderConverter", "org.apache.kafka.connect.storage.OffsetStorageReader", //"org.apache.kafka.connect.storage.SimpleHeaderConverter", explicitly isolated //"org.apache.kafka.connect.storage.StringConverter", explicitly isolated "org.apache.kafka.connect.storage.StringConverterConfig", //"org.apache.kafka.connect.transforms.", isolated by default "org.apache.kafka.connect.transforms.Transformation", "org.apache.kafka.connect.transforms.predicates.Predicate", "org.apache.kafka.connect.util.", "org.apache.kafka.connect.util.ConnectorUtils" ); // Classes in the API should never be loaded in isolation. for (String clazz : apiClasses) { assertFalse(PluginUtils.shouldLoadInIsolation(clazz), clazz + " from 'api' is loaded in isolation but should not be"); } }
public Span nextSpan(Message message) { TraceContextOrSamplingFlags extracted = extractAndClearTraceIdProperties(processorExtractor, message, message); Span result = tracer.nextSpan(extracted); // Processor spans use the normal sampler. // When an upstream context was not present, lookup keys are unlikely added if (extracted.context() == null && !result.isNoop()) { // simplify code by re-using an existing MessagingRequest impl tagQueueOrTopic(new MessageConsumerRequest(message, destination(message)), result); } return result; }
@Test void nextSpan_should_tag_queue_when_no_incoming_context() { message.setDestination(createDestination("foo", QUEUE_TYPE)); jmsTracing.nextSpan(message).start().finish(); assertThat(testSpanHandler.takeLocalSpan().tags()) .containsOnly(entry("jms.queue", "foo")); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { TikaInputStream tis = TikaInputStream.get(stream); Database db = null; XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); String password = null; PasswordProvider passwordProvider = context.get(PasswordProvider.class); if (passwordProvider != null) { password = passwordProvider.getPassword(metadata); } try { if (password == null) { //do this to ensure encryption/wrong password exception vs. more generic //"need right codec" error message. db = new DatabaseBuilder(tis.getFile()).setCodecProvider(new CryptCodecProvider()) .setReadOnly(true).open(); } else { db = new DatabaseBuilder(tis.getFile()) .setCodecProvider(new CryptCodecProvider(password)).setReadOnly(true) .open(); } db.setLinkResolver(IGNORE_LINK_RESOLVER);//just in case JackcessExtractor ex = new JackcessExtractor(metadata, context, locale); ex.parse(db, xhtml); } catch (IOException e) { //TIKA-3849 if (e.getMessage() != null && e.getMessage().contains("Unrecognized map type: 75")) { throw new UnsupportedFormatException( "Jackcess doesn't process mdb versions before v97"); } throw e; } catch (IllegalStateException e) { if (e.getMessage() != null && e.getMessage().contains("Incorrect password")) { throw new EncryptedDocumentException(e); } throw e; } finally { if (db != null) { try { db.close(); } catch (IOException e) { //swallow = silent close } } } xhtml.endDocument(); }
@Test public void testBasic() throws Exception { RecursiveParserWrapper w = new RecursiveParserWrapper(AUTO_DETECT_PARSER); for (String fName : new String[]{"testAccess2.accdb", "testAccess2_2000.mdb", "testAccess2_2002-2003.mdb"}) { InputStream is = null; RecursiveParserWrapperHandler handler = new RecursiveParserWrapperHandler( new BasicContentHandlerFactory(BasicContentHandlerFactory.HANDLER_TYPE.XML, -1)); try { is = this.getResourceAsStream("/test-documents/" + fName); Metadata meta = new Metadata(); ParseContext c = new ParseContext(); w.parse(is, handler, meta, c); } finally { IOUtils.closeQuietly(is); } List<Metadata> list = handler.getMetadataList(); assertEquals(4, list.size()); String mainContent = list.get(0).get(TikaCoreProperties.TIKA_CONTENT); //make sure there's a thead and tbody assertContains("</thead><tbody>", mainContent); //assert table header assertContains("<th>ShortTextField</th>", mainContent); //test date format //java 8 is 6/24/15 ...java 10 is 2015-06-24 assertTrue(mainContent.contains("6/24/15") || mainContent.contains("2015-06-24")); //test that markup is stripped assertContains("over the bold italic dog", mainContent); //test unicode assertContains("\u666E\u6797\u65AF\u987F\u5927\u5B66", mainContent); //test embedded document handling assertContains("Test Document with embedded pdf", list.get(3).get(TikaCoreProperties.TIKA_CONTENT)); } }
private static Row selectRow( Row input, FieldAccessDescriptor fieldAccessDescriptor, Schema inputSchema, Schema outputSchema) { if (fieldAccessDescriptor.getAllFields()) { return input; } Row.Builder output = Row.withSchema(outputSchema); selectIntoRow(inputSchema, input, output, fieldAccessDescriptor); return output.build(); }
@Test public void testSelectNullableNestedRow() { FieldAccessDescriptor fieldAccessDescriptor1 = FieldAccessDescriptor.withFieldNames("nested.field1").resolve(NESTED_NULLABLE_SCHEMA); Row out1 = selectRow( NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor1, Row.nullRow(NESTED_NULLABLE_SCHEMA)); assertNull(out1.getValue(0)); FieldAccessDescriptor fieldAccessDescriptor2 = FieldAccessDescriptor.withFieldNames("nested.*").resolve(NESTED_NULLABLE_SCHEMA); Row out2 = selectRow( NESTED_NULLABLE_SCHEMA, fieldAccessDescriptor2, Row.nullRow(NESTED_NULLABLE_SCHEMA)); assertEquals(Collections.nCopies(4, null), out2.getValues()); }
public static String[] splitToSteps(String path, boolean preserveRootAsStep) { if (path == null) { return null; } if (preserveRootAsStep && path.equals(SHARE_ROOT)) { return new String[] { SHARE_ROOT }; } var includeRoot = preserveRootAsStep && path.startsWith(SHARE_ROOT); if (!includeRoot) { path = ensureRelative(path); } // no ambiguity such as "/|\\\\" var pathSteps = path.split("" + PATH_SEPARATOR); if (includeRoot) { pathSteps[0] = SHARE_ROOT; // replace leading "" } return pathSteps; }
@Test void splitRootPreservingRootShouldReturnRoot() { assertArrayEquals(new String[] { "/" }, FilesPath.splitToSteps("/", true)); }
@Override public Catalog createCatalog(Context context) { final FactoryUtil.CatalogFactoryHelper helper = FactoryUtil.createCatalogFactoryHelper(this, context); helper.validate(); return new HiveCatalog( context.getName(), helper.getOptions().get(DEFAULT_DATABASE), helper.getOptions().get(HIVE_CONF_DIR), helper.getOptions().get(HADOOP_CONF_DIR), helper.getOptions().get(HIVE_VERSION)); }
@Test public void testCreateHiveCatalogWithHadoopConfDir() throws IOException { final String catalogName = "mycatalog"; final String hadoopConfDir = tempFolder.newFolder().getAbsolutePath(); final File mapredSiteFile = new File(hadoopConfDir, "mapred-site.xml"); final String mapredKey = "mapred.site.config.key"; final String mapredVal = "mapred.site.config.val"; writeProperty(mapredSiteFile, mapredKey, mapredVal); final HiveCatalog expectedCatalog = HiveTestUtils.createHiveCatalog( catalogName, CONF_DIR.getPath(), hadoopConfDir, null); final Map<String, String> options = new HashMap<>(); options.put(CommonCatalogOptions.CATALOG_TYPE.key(), HiveCatalogFactoryOptions.IDENTIFIER); options.put(HiveCatalogFactoryOptions.HIVE_CONF_DIR.key(), CONF_DIR.getPath()); options.put(HiveCatalogFactoryOptions.HADOOP_CONF_DIR.key(), hadoopConfDir); final Catalog actualCatalog = FactoryUtil.createCatalog( catalogName, options, null, Thread.currentThread().getContextClassLoader()); checkEquals(expectedCatalog, (HiveCatalog) actualCatalog); assertThat(((HiveCatalog) actualCatalog).getHiveConf().get(mapredKey)).isEqualTo(mapredVal); }
@Override public List<ResourceReference> getResourceDependencies( TransMeta transMeta, StepMeta stepInfo ) { List<ResourceReference> references = new ArrayList<ResourceReference>( 5 ); String realFilename = transMeta.environmentSubstitute( fileName ); String realTransname = transMeta.environmentSubstitute( transName ); String realDirectoryPath = transMeta.environmentSubstitute( directoryPath ); ResourceReference reference = new ResourceReference( stepInfo ); references.add( reference ); if ( !Utils.isEmpty( realFilename ) ) { // Add the filename to the references, including a reference to this step // meta data. // reference.getEntries().add( new ResourceEntry( realFilename, ResourceType.ACTIONFILE ) ); } else if ( !Utils.isEmpty( realTransname ) ) { // Add the trans name (including full repository path) to dependencies String realTransformation = realDirectoryPath + "/" + realTransname; reference.getEntries().add( new ResourceEntry( realTransformation, ResourceType.ACTIONFILE ) ); } return references; }
@Test public void getResourceDependencies() { TransMeta transMeta = mock( TransMeta.class ); StepMeta stepMeta = mock( StepMeta.class ); List<ResourceReference> actualResult = metaInjectMeta.getResourceDependencies( transMeta, stepMeta ); assertEquals( 1, actualResult.size() ); ResourceReference reference = actualResult.iterator().next(); assertEquals( 0, reference.getEntries().size() ); }
@Override public void visit(Entry entry) { if(Boolean.FALSE.equals(entry.getAttribute("allowed"))) return; if (containsSubmenu(entry)) addSubmenu(entry); else addActionItem(entry); }
@Test public void createsGroupWithAction() { Entry parentMenuEntry = new Entry(); final JMenu parentMenu = new JMenu(); new EntryAccessor().setComponent(parentMenuEntry, parentMenu); parentMenuEntry.addChild(groupEntry); groupEntry.addChild(actionEntry); new EntryAccessor().setAction(groupEntry, action); menuActionGroupBuilder.visit(groupEntry); final JMenuItem menuItem = getFirstSubMenuItem(groupEntry); assertThatMenuItemHasCorrectAction(menuItem); }
@Override public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOptions options) { final KafkaFutureImpl<Void> future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: future.complete(null); break; case REQUEST_TIMED_OUT: throw error.exception(); default: log.error("Unregister broker request for broker ID {} failed: {}", brokerId, error.message()); future.completeExceptionally(error.exception()); break; } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }; runnable.call(call, now); return new UnregisterBrokerResult(future); }
@Test public void testUnregisterBrokerTimeoutMaxRetry() { int nodeId = 1; try (final AdminClientUnitTestEnv env = mockClientEnv(Time.SYSTEM, AdminClientConfig.RETRIES_CONFIG, "1")) { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.REQUEST_TIMED_OUT, 0)); env.kafkaClient().prepareResponse(prepareUnregisterBrokerResponse(Errors.REQUEST_TIMED_OUT, 0)); UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId); // Validate response assertNotNull(result.all()); TestUtils.assertFutureThrows(result.all(), Errors.REQUEST_TIMED_OUT.exception().getClass()); } }
@Override public Status check() { if (applicationContext == null) { SpringExtensionInjector springExtensionInjector = SpringExtensionInjector.get(applicationModel); applicationContext = springExtensionInjector.getContext(); } if (applicationContext == null) { return new Status(Status.Level.UNKNOWN); } Map<String, DataSource> dataSources = applicationContext.getBeansOfType(DataSource.class, false, false); if (CollectionUtils.isEmptyMap(dataSources)) { return new Status(Status.Level.UNKNOWN); } Status.Level level = Status.Level.OK; StringBuilder buf = new StringBuilder(); for (Map.Entry<String, DataSource> entry : dataSources.entrySet()) { DataSource dataSource = entry.getValue(); if (buf.length() > 0) { buf.append(", "); } buf.append(entry.getKey()); try (Connection connection = dataSource.getConnection()) { DatabaseMetaData metaData = connection.getMetaData(); try (ResultSet resultSet = metaData.getTypeInfo()) { if (!resultSet.next()) { level = Status.Level.ERROR; } } buf.append(metaData.getURL()); buf.append('('); buf.append(metaData.getDatabaseProductName()); buf.append('-'); buf.append(metaData.getDatabaseProductVersion()); buf.append(')'); } catch (Throwable e) { logger.warn(CONFIG_WARN_STATUS_CHECKER, "", "", e.getMessage(), e); return new Status(level, e.getMessage()); } } return new Status(level, buf.toString()); }
@Test void testWithoutApplicationContext() { Status status = dataSourceStatusChecker.check(); assertThat(status.getLevel(), is(Status.Level.UNKNOWN)); }
public void add(short metricId, MetricValues metricValuesToAdd) { validateNotNull(metricValuesToAdd, "The metric values to be added cannot be null"); if (!_metricValues.isEmpty() && metricValuesToAdd.length() != length()) { throw new IllegalArgumentException("The existing metric length is " + length() + " which is different from the" + " metric length of " + metricValuesToAdd.length() + " that is being added."); } MetricValues metricValues = _metricValues.computeIfAbsent(metricId, id -> new MetricValues(metricValuesToAdd.length())); metricValues.add(metricValuesToAdd); }
@Test public void testAdd() { Map<Short, MetricValues> valuesByMetricId = getValuesByMetricId(); AggregatedMetricValues aggregatedMetricValues = new AggregatedMetricValues(valuesByMetricId); aggregatedMetricValues.add(aggregatedMetricValues); for (Map.Entry<Short, MetricValues> entry : valuesByMetricId.entrySet()) { MetricValues values = entry.getValue(); for (int j = 0; j < 10; j++) { assertEquals(2 * j, values.get(j), 0.01); } } }
public Optional<String> validate(MonitoringInfo monitoringInfo) { if (monitoringInfo.getUrn().isEmpty() || monitoringInfo.getType().isEmpty()) { return Optional.of( String.format( "MonitoringInfo requires both urn %s and type %s to be specified.", monitoringInfo.getUrn(), monitoringInfo.getType())); } // Skip checking unknown MonitoringInfos Map<String, Set<String>> typeToRequiredLabels = REQUIRED_LABELS.get(monitoringInfo.getUrn()); if (typeToRequiredLabels == null) { return Optional.empty(); } Set<String> requiredLabels = typeToRequiredLabels.get(monitoringInfo.getType()); if (requiredLabels == null) { return Optional.empty(); } // TODO(ajamato): Tighten this restriction to use set equality, to catch unused if (!monitoringInfo.getLabelsMap().keySet().containsAll(requiredLabels)) { return Optional.of( String.format( "MonitoringInfo with urn: %s should have labels: %s, actual: %s", monitoringInfo.getUrn(), requiredLabels, monitoringInfo.getLabelsMap())); } return Optional.empty(); }
@Test public void validateReturnsNoErrorOnValidMonitoringInfo() { MonitoringInfo testInput = MonitoringInfo.newBuilder() .setUrn(Urns.USER_SUM_INT64) .putLabels(MonitoringInfoConstants.Labels.NAME, "anyCounter") .putLabels(MonitoringInfoConstants.Labels.NAMESPACE, "") .putLabels(MonitoringInfoConstants.Labels.PTRANSFORM, "anyString") .setType(TypeUrns.SUM_INT64_TYPE) .build(); assertFalse(new SpecMonitoringInfoValidator().validate(testInput).isPresent()); testInput = MonitoringInfo.newBuilder() .setUrn(Urns.USER_DISTRIBUTION_INT64) .putLabels(MonitoringInfoConstants.Labels.NAME, "anyDistribution") .putLabels(MonitoringInfoConstants.Labels.NAMESPACE, "namespace") .putLabels(MonitoringInfoConstants.Labels.PTRANSFORM, "anyString") .setType(TypeUrns.DISTRIBUTION_INT64_TYPE) .putLabels("dummy", "value") .build(); assertFalse(new SpecMonitoringInfoValidator().validate(testInput).isPresent()); testInput = MonitoringInfo.newBuilder() .setUrn(MonitoringInfoConstants.Urns.ELEMENT_COUNT) .setType(TypeUrns.SUM_INT64_TYPE) .putLabels(MonitoringInfoConstants.Labels.PTRANSFORM, "value") .putLabels(MonitoringInfoConstants.Labels.PCOLLECTION, "anotherValue") .build(); assertFalse(new SpecMonitoringInfoValidator().validate(testInput).isPresent()); }
@VisibleForTesting public Optional<ProcessContinuation> run( PartitionMetadata partition, HeartbeatRecord record, RestrictionTracker<TimestampRange, Timestamp> tracker, ManualWatermarkEstimator<Instant> watermarkEstimator) { final String token = partition.getPartitionToken(); LOG.debug("[{}] Processing heartbeat record {}", token, record); final Timestamp timestamp = record.getTimestamp(); final Instant timestampInstant = new Instant(timestamp.toSqlTimestamp().getTime()); if (!tracker.tryClaim(timestamp)) { LOG.debug("[{}] Could not claim queryChangeStream({}), stopping", token, timestamp); return Optional.of(ProcessContinuation.stop()); } metrics.incHeartbeatRecordCount(); watermarkEstimator.setWatermark(timestampInstant); LOG.debug("[{}] Heartbeat record action completed successfully", token); return Optional.empty(); }
@Test public void testRestrictionClaimed() { final String partitionToken = "partitionToken"; final Timestamp timestamp = Timestamp.ofTimeMicroseconds(10L); when(tracker.tryClaim(timestamp)).thenReturn(true); when(partition.getPartitionToken()).thenReturn(partitionToken); final Optional<ProcessContinuation> maybeContinuation = action.run(partition, new HeartbeatRecord(timestamp, null), tracker, watermarkEstimator); assertEquals(Optional.empty(), maybeContinuation); verify(watermarkEstimator).setWatermark(new Instant(timestamp.toSqlTimestamp().getTime())); }
@Override public int run() throws IOException { Preconditions.checkArgument(sourceFiles != null && !sourceFiles.isEmpty(), "Missing file name"); // Ensure all source files have the columns specified first Map<String, Schema> schemas = new HashMap<>(); for (String sourceFile : sourceFiles) { Schema schema = getAvroSchema(sourceFile); schemas.put(sourceFile, Expressions.filterSchema(schema, columns)); } long totalStartTime = System.currentTimeMillis(); long totalCount = 0; for (String sourceFile : sourceFiles) { long startTime = System.currentTimeMillis(); Iterable<Object> reader = openDataFile(sourceFile, schemas.get(sourceFile)); boolean threw = true; long count = 0; try { for (Object record : reader) { count += 1; } threw = false; } catch (RuntimeException e) { throw new RuntimeException("Failed on record " + count + " in " + sourceFile, e); } finally { if (reader instanceof Closeable) { Closeables.close((Closeable) reader, threw); } } totalCount += count; if (1 < sourceFiles.size()) { long endTime = System.currentTimeMillis(); console.info("Scanned " + count + " records from " + sourceFile + " in " + (endTime - startTime) / 1000.0 + " s"); } } long totalEndTime = System.currentTimeMillis(); console.info("Scanned " + totalCount + " records from " + sourceFiles.size() + " file(s)"); console.info("Time: " + (totalEndTime - totalStartTime) / 1000.0 + " s"); return 0; }
@Test public void testScanCommandWithMultipleSourceFiles() throws IOException { File file = parquetFile(); ScanCommand command = new ScanCommand(createLogger()); command.sourceFiles = Arrays.asList(file.getAbsolutePath(), file.getAbsolutePath()); command.setConf(new Configuration()); Assert.assertEquals(0, command.run()); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { if(!session.getClient().changeWorkingDirectory(directory.getAbsolute())) { throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } if(!session.getClient().setFileType(FTPClient.ASCII_FILE_TYPE)) { // Set transfer type for traditional data socket file listings. The data transfer is over the // data connection in type ASCII or type EBCDIC. throw new FTPException(session.getClient().getReplyCode(), session.getClient().getReplyString()); } final List<String> list = new DataConnectionActionExecutor(session).data(new DataConnectionAction<List<String>>() { @Override public List<String> execute() throws BackgroundException { try { return session.getClient().list(FTPCmd.MLSD); } catch(IOException e) { throw new FTPExceptionMappingService().map(e); } } }); return reader.read(directory, list); } catch(IOException e) { throw new FTPExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test public void testListEmpty() throws Exception { final ListService list = new FTPMlsdListService(session); final Path directory = new FTPWorkdirService(session).find(); assertTrue(list.list(directory, new DisabledListProgressListener()).isEmpty()); }
public <T> SideInput<T> fetchSideInput( PCollectionView<T> view, BoundedWindow sideWindow, String stateFamily, SideInputState state, Supplier<Closeable> scopedReadStateSupplier) { Callable<SideInput<T>> loadSideInputFromWindmill = () -> loadSideInputFromWindmill(view, sideWindow, stateFamily, scopedReadStateSupplier); SideInputCache.Key<T> sideInputCacheKey = SideInputCache.Key.create( getInternalTag(view), sideWindow, getViewFn(view).getTypeDescriptor()); try { if (state == SideInputState.KNOWN_READY) { Optional<SideInput<T>> existingCacheEntry = sideInputCache.get(sideInputCacheKey); if (!existingCacheEntry.isPresent()) { return sideInputCache.getOrLoad(sideInputCacheKey, loadSideInputFromWindmill); } if (!existingCacheEntry.get().isReady()) { return sideInputCache.invalidateThenLoadNewEntry( sideInputCacheKey, loadSideInputFromWindmill); } return existingCacheEntry.get(); } return sideInputCache.getOrLoad(sideInputCacheKey, loadSideInputFromWindmill); } catch (Exception e) { LOG.error("Fetch failed: ", e); throw new RuntimeException("Exception while fetching side input: ", e); } }
@Test public void testFetchGlobalDataBasic() throws Exception { SideInputStateFetcherFactory factory = SideInputStateFetcherFactory.fromOptions( PipelineOptionsFactory.as(DataflowStreamingPipelineOptions.class)); SideInputStateFetcher fetcher = factory.createSideInputStateFetcher(server::getSideInputData); ByteStringOutputStream stream = new ByteStringOutputStream(); ListCoder.of(StringUtf8Coder.of()) .encode(Collections.singletonList("data"), stream, Coder.Context.OUTER); ByteString encodedIterable = stream.toByteString(); PCollectionView<String> view = TestPipeline.create().apply(Create.empty(StringUtf8Coder.of())).apply(View.asSingleton()); String tag = view.getTagInternal().getId(); // Test three calls in a row. First, data is not ready, then data is ready, // then the data is already cached. when(server.getSideInputData(any(Windmill.GlobalDataRequest.class))) .thenReturn( buildGlobalDataResponse(tag, false, null), buildGlobalDataResponse(tag, true, encodedIterable)); assertFalse( fetcher .fetchSideInput( view, GlobalWindow.INSTANCE, STATE_FAMILY, SideInputState.UNKNOWN, readStateSupplier) .isReady()); assertFalse( fetcher .fetchSideInput( view, GlobalWindow.INSTANCE, STATE_FAMILY, SideInputState.UNKNOWN, readStateSupplier) .isReady()); assertEquals( "data", fetcher .fetchSideInput( view, GlobalWindow.INSTANCE, STATE_FAMILY, SideInputState.KNOWN_READY, readStateSupplier) .value() .orElse(null)); assertEquals( "data", fetcher .fetchSideInput( view, GlobalWindow.INSTANCE, STATE_FAMILY, SideInputState.KNOWN_READY, readStateSupplier) .value() .orElse(null)); verify(server, times(2)).getSideInputData(buildGlobalDataRequest(tag)); verifyNoMoreInteractions(server); }
@Override protected ExecuteContext doBefore(ExecuteContext context) { LogUtils.printHttpRequestBeforePoint(context); Request request = (Request) context.getObject(); if (LOGGER.isLoggable(Level.FINE)) { LOGGER.log(Level.FINE, "Request''s classloader is {0}, jettyClientWrapper''s classloader is {1}.", new Object[]{Request.class.getClassLoader().getClass().getName(), JettyClientWrapper.class.getClassLoader().getClass().getName()}); } if (!(request instanceof JettyClientWrapper)) { return context; } String url = request.getScheme() + HttpConstants.HTTP_URL_DOUBLE_SLASH + request.getHost() + request.getPath(); Map<String, String> urlInfo = RequestInterceptorUtils.recoverUrl(url); RequestInterceptorUtils.printRequestLog("webClient(jetty)", urlInfo); Optional<Object> result = invokerService.invoke( invokerContext -> buildInvokerFunc(context, invokerContext, request, urlInfo.get(HttpConstants.HTTP_URI_PATH)), ex -> ex, urlInfo.get(HttpConstants.HTTP_URI_SERVICE)); if (result.isPresent()) { Object obj = result.get(); if (obj instanceof Exception) { LOGGER.log(Level.SEVERE, "Webclient(jetty) request is error, url is " + url, (Exception) obj); context.setThrowableOut((Exception) obj); return context; } } // The method returns void context.skip(null); return context; }
@Test public void test() { // Test for normal conditions JettyClientWrapper wrapper = Mockito.spy(new JettyClientWrapper(Mockito.mock(HttpClient.class), new HttpConversation(), HELLO_URI)); ReflectUtils.setFieldValue(wrapper, HttpConstants.HTTP_URI_HOST, "www.domain.com"); ExecuteContext context = ExecuteContext.forMemberMethod(wrapper, method, arguments, null, null); Mockito.doNothing().when(wrapper).send(Mockito.isA(CompleteListener.class)); interceptor.doBefore(context); Assert.assertEquals("127.0.0.1", wrapper.getHost()); Assert.assertEquals(8010, wrapper.getPort()); Assert.assertEquals("/hello", wrapper.getPath()); }
@SuppressWarnings("ConstantConditions") private Queue<Comment> getComments() { LOG.debug("Start: Jira NewCommentsConsumer: retrieving issue comments. Last comment id: {}", lastCommentId); IssueRestClient client = getEndpoint().getClient().getIssueClient(); LinkedList<Comment> newComments = getIssues().stream() .map(issue -> client.getIssue(issue.getKey()).claim()) .flatMap(issue -> StreamSupport.stream(issue.getComments().spliterator(), false)) .filter(comment -> comment.getId() > lastCommentId) .collect(Collectors.toCollection(LinkedList::new)); Collections.reverse(newComments); lastCommentId = newComments.stream().mapToLong(Comment::getId).max().orElse(lastCommentId); LOG.debug("End: Jira NewCommentsConsumer: retrieving issue comments. {} new comments since last run.", newComments.size()); return newComments; }
@Test public void singleIssueCommentsTest() throws Exception { Issue issueWithComments = createIssueWithComments(11L, 3000); Issue issueWithNoComments = createIssue(51L); List<Issue> newIssues = List.of(issueWithComments, issueWithNoComments); SearchResult result = new SearchResult(0, 50, 2, newIssues); when(searchRestClient.searchJql(any(), any(), any(), any())).thenReturn(Promises.promise(result)); newIssues.forEach(issue -> when(issueRestClient.getIssue(eq(issue.getKey()))) .then(inv -> Promises.promise(issue))); //clearInvocations(issueRestClient); List<Comment> comments = new ArrayList<>(); newIssues.forEach(issue -> issue.getComments().forEach(comments::add)); // reverse the order, from oldest comment to recent Collections.reverse(comments); // expect 3000 comments mockResult.expectedBodiesReceived(comments); mockResult.assertIsSatisfied(); }
public static Class<?> getGenericClass(Class<?> cls) { return getGenericClass(cls, 0); }
@Test void testGetGenericClass() { assertThat(ReflectUtils.getGenericClass(Foo1.class), sameInstance(String.class)); }
public static void format(Mode mode, AlluxioConfiguration alluxioConf) throws IOException { NoopUfsManager noopUfsManager = new NoopUfsManager(); switch (mode) { case MASTER: URI journalLocation = JournalUtils.getJournalLocation(); LOG.info("Formatting master journal: {}", journalLocation); JournalSystem journalSystem = new JournalSystem.Builder() .setLocation(journalLocation).build(CommonUtils.ProcessType.MASTER); for (String masterServiceName : ServiceUtils.getMasterServiceNames()) { journalSystem.createJournal(new NoopMaster(masterServiceName, noopUfsManager)); } journalSystem.format(); break; case WORKER: String workerDataFolder = Configuration.getString(PropertyKey.WORKER_DATA_FOLDER); LOG.info("Formatting worker data folder: {}", workerDataFolder); int storageLevels = Configuration.getInt(PropertyKey.WORKER_TIERED_STORE_LEVELS); for (int level = 0; level < storageLevels; level++) { PropertyKey tierLevelDirPath = PropertyKey.Template.WORKER_TIERED_STORE_LEVEL_DIRS_PATH.format(level); String[] dirPaths = Configuration.getString(tierLevelDirPath).split(","); String name = "Data path for tier " + level; for (String dirPath : dirPaths) { String dirWorkerDataFolder = CommonUtils.getWorkerDataDirectory(dirPath, alluxioConf); LOG.info("Formatting {}:{}", name, dirWorkerDataFolder); formatWorkerDataFolder(dirWorkerDataFolder); } } break; default: throw new RuntimeException(String.format("Unrecognized format mode: %s", mode)); } }
@Test public void formatWorker() throws Exception { final int storageLevels = 1; final String perms = "rwx------"; String workerDataFolder; final File[] dirs = new File[] { mTemporaryFolder.newFolder("level0") }; for (File dir : dirs) { workerDataFolder = CommonUtils.getWorkerDataDirectory(dir.getPath(), Configuration.global()); FileUtils.createDir(PathUtils.concatPath(workerDataFolder, "subdir")); FileUtils.createFile(PathUtils.concatPath(workerDataFolder, "file")); } try (Closeable r = new ConfigurationRule(new HashMap<PropertyKey, Object>() { { put(PropertyKey.Template.WORKER_TIERED_STORE_LEVEL_DIRS_PATH.format(0), dirs[0].getPath()); put(PropertyKey.WORKER_TIERED_STORE_LEVELS, storageLevels); put(PropertyKey.WORKER_DATA_FOLDER_PERMISSIONS, perms); } }, Configuration.modifiableGlobal()).toResource()) { Format.format(Format.Mode.WORKER, Configuration.global()); } }
public FEELFnResult<Range> invoke(@ParameterName("from") String from) { if (from == null || from.isEmpty() || from.isBlank()) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "cannot be null")); } Range.RangeBoundary startBoundary; if (from.startsWith("(") || from.startsWith("]")) { startBoundary = RangeBoundary.OPEN; } else if (from.startsWith("[")) { startBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not start with a valid character")); } Range.RangeBoundary endBoundary; if (from.endsWith(")") || from.endsWith("[")) { endBoundary = RangeBoundary.OPEN; } else if (from.endsWith("]")) { endBoundary = RangeBoundary.CLOSED; } else { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not end with a valid character")); } String[] split = from.split("\\.\\."); if (split.length != 2) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "does not include two literals separated by `..` two dots characters")); } String leftString = split[0].substring(1); String rightString = split[1].substring(0, split[1].length() - 1); if ((leftString.isEmpty() || leftString.isBlank()) && (rightString.isEmpty() || rightString.isBlank())) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "at least one endpoint must not be null")); } BaseNode leftNode = parse(leftString); if (!nodeIsAllowed(leftNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a recognised valid literal")); } BaseNode rightNode = parse(rightString); if (!nodeIsAllowed(rightNode)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a recognised valid literal")); } Object left = leftNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(left)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "left endpoint is not a valid value " + left.getClass())); } Object right = rightNode.evaluate(getStubbed()); if (!nodeValueIsAllowed(right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "right endpoint is not a valid value " + right.getClass())); } if (!nodesReturnsSameType(left, right)) { return FEELFnResult.ofError(new InvalidParametersEvent(FEELEvent.Severity.ERROR, "from", "endpoints must be of equivalent types")); } return FEELFnResult.ofResult(new RangeImpl(startBoundary, (Comparable) left, (Comparable) right, endBoundary)); }
@Test void invokeDifferentTypes() { List<String> from = Arrays.asList("[1..\"cheese\"]", "[1..date(\"1978-09-12\")]", "[1..date(\"1978-09-12\")]", "[1..\"upper case(\"aBc4\")\"]"); from.forEach(it -> FunctionTestUtil.assertResultError(rangeFunction.invoke(it), InvalidParametersEvent.class, it)); }
public static String getSonarqubeVersion() { if (sonarqubeVersion == null) { loadVersion(); } return sonarqubeVersion; }
@Test public void getSonarQubeVersion_must_not_return_an_empty_string() { assertThat(SonarQubeVersionHelper.getSonarqubeVersion()).isNotEmpty(); }
private static void instanceTrackingConfig(XmlGenerator gen, Config config) { InstanceTrackingConfig trackingConfig = config.getInstanceTrackingConfig(); gen.open("instance-tracking", "enabled", trackingConfig.isEnabled()) .node("file-name", trackingConfig.getFileName()) .node("format-pattern", trackingConfig.getFormatPattern()) .close(); }
@Test public void testInstanceTrackingConfig() { Config config = new Config(); config.getInstanceTrackingConfig() .setEnabled(true) .setFileName("/dummy/file") .setFormatPattern("dummy-pattern with $HZ_INSTANCE_TRACKING{placeholder} and $RND{placeholder}"); InstanceTrackingConfig generatedConfig = getNewConfigViaXMLGenerator(config).getInstanceTrackingConfig(); assertTrue(generatedConfig + " should be compatible with " + config.getInstanceTrackingConfig(), new InstanceTrackingConfigChecker().check(config.getInstanceTrackingConfig(), generatedConfig)); }
static TaskExecutorResourceSpec resourceSpecFromConfig(Configuration config) { try { checkTaskExecutorResourceConfigSet(config); } catch (IllegalConfigurationException e) { throw new IllegalConfigurationException("Failed to create TaskExecutorResourceSpec", e); } return new TaskExecutorResourceSpec( new CPUResource(config.get(TaskManagerOptions.CPU_CORES)), config.get(TaskManagerOptions.TASK_HEAP_MEMORY), config.get(TaskManagerOptions.TASK_OFF_HEAP_MEMORY), config.get(TaskManagerOptions.NETWORK_MEMORY_MIN), config.get(TaskManagerOptions.MANAGED_MEMORY_SIZE), ExternalResourceUtils.getExternalResourcesCollection(config)); }
@Test void testResourceSpecFromConfigFailsIfRequiredOptionIsNotSet() { TaskExecutorResourceUtils.CONFIG_OPTIONS.stream() .filter(option -> !option.hasDefaultValue()) .forEach( option -> { assertThatThrownBy( () -> TaskExecutorResourceUtils .resourceSpecFromConfig( setAllRequiredOptionsExceptOne( option))) .isInstanceOf(IllegalConfigurationException.class); }); }
@Override void execute() throws HiveMetaException { // Need to confirm unless it's a dry run or specified -yes if (!schemaTool.isDryRun() && !this.yes) { boolean confirmed = promptToConfirm(); if (!confirmed) { System.out.println("Operation cancelled, exiting."); return; } } Connection conn = schemaTool.getConnectionToMetastore(true); try { try (Statement stmt = conn.createStatement()) { final String def = Warehouse.DEFAULT_DATABASE_NAME; // List databases List<String> databases = new ArrayList<>(); try (ResultSet rs = stmt.executeQuery("SHOW DATABASES")) { while (rs.next()) { databases.add(rs.getString(1)); } } // Drop databases for (String database : databases) { // Don't try to drop 'default' database as it's not allowed if (!def.equalsIgnoreCase(database)) { if (schemaTool.isDryRun()) { System.out.println("would drop database " + database); } else { logIfVerbose("dropping database " + database); stmt.execute(String.format("DROP DATABASE `%s` CASCADE", database)); } } } // List tables in 'default' database List<String> tables = new ArrayList<>(); try (ResultSet rs = stmt.executeQuery(String.format("SHOW TABLES IN `%s`", def))) { while (rs.next()) { tables.add(rs.getString(1)); } } // Drop tables in 'default' database for (String table : tables) { if (schemaTool.isDryRun()) { System.out.println("would drop table " + table); } else { logIfVerbose("dropping table " + table); stmt.execute(String.format("DROP TABLE `%s`.`%s`", def, table)); } } } } catch (SQLException se) { throw new HiveMetaException("Failed to drop databases.", se); } }
@Test public void testExecutePromptYes() throws Exception { setUpTwoDatabases(); mockPromptWith("y"); uut.execute(); Mockito.verify(stmtMock).execute("DROP DATABASE `mydb` CASCADE"); Mockito.verify(stmtMock).execute(String.format("DROP TABLE `%s`.`table1`", Warehouse.DEFAULT_DATABASE_NAME)); Mockito.verify(stmtMock).execute(String.format("DROP TABLE `%s`.`table2`", Warehouse.DEFAULT_DATABASE_NAME)); Mockito.verify(stmtMock, times(3)).execute(anyString()); }
@Override public void setUpperRightX(float value) { throw new UnsupportedOperationException("Immutable class"); }
@Test void testSetUpperRightX() { Assertions.assertThrows(UnsupportedOperationException.class, () -> rect.setUpperRightX(0)); }
public StringSubject factValue(String key) { return doFactValue(key, null); }
@Test public void factValueIntFailNoValue() { Object unused = expectFailureWhenTestingThat(simpleFact("foo")).factValue("foo", 0); assertFailureKeys( "expected to have a value", "for key", "and index", "but the key was present with no value", HOW_TO_TEST_KEYS_WITHOUT_VALUES.key); assertFailureValue("for key", "foo"); assertFailureValue("and index", "0"); }
@Override public void onMetaDataChanged(final List<MetaData> metaDataList, final DataEventTypeEnum eventType) { WebsocketData<MetaData> configData = new WebsocketData<>(ConfigGroupEnum.META_DATA.name(), eventType.name(), metaDataList); WebsocketCollector.send(GsonUtils.getInstance().toJson(configData), eventType); }
@Test public void testOnMetaDataChanged() { String message = "{\"groupType\":\"META_DATA\",\"eventType\":\"CREATE\",\"data\":[{\"appName\":\"axiba\"," + "\"path\":\"/test/execute\",\"rpcType\":\"http\",\"serviceName\":\"execute\",\"methodName\":" + "\"execute\",\"parameterTypes\":\"int\",\"rpcExt\":\"{}\",\"enabled\":true}]}"; MockedStatic.Verification verification = () -> WebsocketCollector.send(message, DataEventTypeEnum.CREATE); try (MockedStatic<WebsocketCollector> mockedStatic = mockStatic(WebsocketCollector.class)) { mockedStatic.when(verification).thenAnswer((Answer<Void>) invocation -> null); websocketDataChangedListener.onMetaDataChanged(metaDataList, DataEventTypeEnum.CREATE); mockedStatic.verify(verification); } }
@Override public RestResponse<List<StreamedRow>> makeQueryRequest( final URI serverEndPoint, final String sql, final Map<String, ?> configOverrides, final Map<String, ?> requestProperties ) { final KsqlTarget target = sharedClient .target(serverEndPoint) .properties(configOverrides) .timeout(getQueryTimeout(configOverrides)); final RestResponse<List<StreamedRow>> resp = getTarget(target) .postQueryRequest(sql, requestProperties, Optional.empty()); if (resp.isErroneous()) { return RestResponse.erroneous(resp.getStatusCode(), resp.getErrorMessage()); } return RestResponse.successful(resp.getStatusCode(), resp.getResponse()); }
@Test public void shouldSetQueryTimeout() { // Given: when(ksqlConfig.getLong(KsqlConfig.KSQL_QUERY_PULL_FORWARDING_TIMEOUT_MS_CONFIG)) .thenReturn(300L); // When: final RestResponse<List<StreamedRow>> result = client.makeQueryRequest(SERVER_ENDPOINT, "Sql", ImmutableMap.of(), ImmutableMap.of()); // Then: verify(target).postQueryRequest("Sql", ImmutableMap.of(), Optional.empty()); verify(target).timeout(300L); assertThat(result.getStatusCode(), is(queryResponse.getStatusCode())); }
static Optional<SearchPath> fromString(String path) { if (path == null || path.isEmpty()) { return Optional.empty(); } if (path.indexOf(';') >= 0) { return Optional.empty(); // multi-level not supported at this time } try { SearchPath sp = parseElement(path); if (sp.isEmpty()) { return Optional.empty(); } else { return Optional.of(sp); } } catch (NumberFormatException | InvalidSearchPathException e) { throw new InvalidSearchPathException("Invalid search path '" + path + "'", e); } }
@Test void requreThatWildcardsAreDetected() { assertFalse(SearchPath.fromString("").isPresent()); assertFalse(SearchPath.fromString("*/*").isPresent()); assertFalse(SearchPath.fromString("/").isPresent()); assertFalse(SearchPath.fromString("/*").isPresent()); assertFalse(SearchPath.fromString("//").isPresent()); }
public void update(State state, String summary) { if (canTransferToState(state)) { _state = state; _summary = Utils.validateNotNull(summary, "ProvisionerState summary cannot be null."); _updatedMs = System.currentTimeMillis(); } else { throw new IllegalStateException("Cannot set the provisioner state from " + _state.toString() + " to " + state.toString() + ". The valid target states are " + Collections.unmodifiableSet(VALID_TRANSFER.get(_state))); } }
@Test public void testProvisionerStateInvalidUpdateThrowsException() { ProvisionerState.State originalState = ProvisionerState.State.IN_PROGRESS; String originalSummary = "Test summary."; ProvisionerState provisionerState = new ProvisionerState(originalState, originalSummary); ProvisionerState.State updatedState = ProvisionerState.State.COMPLETED_WITH_ERROR; assertThrows(IllegalArgumentException.class, () -> provisionerState.update(originalState, null)); assertThrows(IllegalStateException.class, () -> provisionerState.update(updatedState, originalSummary)); }
public static void checkValidProjectId(String idToCheck) { if (idToCheck.length() < MIN_PROJECT_ID_LENGTH) { throw new IllegalArgumentException("Project ID " + idToCheck + " cannot be empty."); } if (idToCheck.length() > MAX_PROJECT_ID_LENGTH) { throw new IllegalArgumentException( "Project ID " + idToCheck + " cannot be longer than " + MAX_PROJECT_ID_LENGTH + " characters."); } if (ILLEGAL_PROJECT_CHARS.matcher(idToCheck).find()) { throw new IllegalArgumentException( "Project ID " + idToCheck + " is not a valid ID. Only letters, numbers, hyphens, single quotes, colon, dot and" + " exclamation points are allowed."); } }
@Test public void testCheckValidProjectIdWhenIdIsEmpty() { assertThrows(IllegalArgumentException.class, () -> checkValidProjectId("")); }
public String storeName() { if (storeSupplier != null) { return storeSupplier.name(); } return storeName; }
@Test public void shouldUseProvidedStoreNameWhenSet() { final String storeName = "store-name"; final MaterializedInternal<Object, Object, StateStore> materialized = new MaterializedInternal<>(Materialized.as(storeName), nameProvider, prefix); assertThat(materialized.storeName(), equalTo(storeName)); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PaimonTable that = (PaimonTable) o; return catalogName.equals(that.catalogName) && databaseName.equals(that.databaseName) && tableName.equals(that.tableName) && createTime == that.createTime; }
@Test public void testEquals(@Mocked FileStoreTable paimonNativeTable) { String dbName = "testDB"; String tableName = "testTable"; PaimonTable table = new PaimonTable("testCatalog", dbName, tableName, null, paimonNativeTable, 100L); PaimonTable table2 = new PaimonTable("testCatalog", dbName, tableName, null, paimonNativeTable, 100L); Assert.assertEquals(table, table2); Assert.assertEquals(table, table); Assert.assertNotEquals(table, null); }
public Set<MapperConfig> load(InputStream inputStream) throws IOException { final PrometheusMappingConfig config = ymlMapper.readValue(inputStream, PrometheusMappingConfig.class); return config.metricMappingConfigs() .stream() .flatMap(this::mapMetric) .collect(Collectors.toSet()); }
@Test void defaultType() throws Exception { final Map<String, ImmutableList<Serializable>> config = Collections.singletonMap("metric_mappings", ImmutableList.of( ImmutableMap.of("metric_name", "test1", "match_pattern", "foo.bar"))); assertThat(configLoader.load(new ByteArrayInputStream(objectMapper.writeValueAsBytes(config)))) .containsExactlyInAnyOrder(new MapperConfig( "foo.bar", "gl_test1", ImmutableMap.of("node", "5ca1ab1e-0000-4000-a000-000000000000"))); }
@Description("Returns the closure of the combinatorial boundary of this Geometry") @ScalarFunction("ST_Boundary") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stBoundary(@SqlType(GEOMETRY_TYPE_NAME) Slice input) { return serialize(deserialize(input).getBoundary()); }
@Test public void testSTBoundary() { assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('POINT (1 2)')))", VARCHAR, "GEOMETRYCOLLECTION EMPTY"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('MULTIPOINT (1 2, 2 4, 3 6, 4 8)')))", VARCHAR, "GEOMETRYCOLLECTION EMPTY"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('LINESTRING EMPTY')))", VARCHAR, "MULTIPOINT EMPTY"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('LINESTRING (8 4, 5 7)')))", VARCHAR, "MULTIPOINT ((8 4), (5 7))"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('LINESTRING (100 150,50 60, 70 80, 160 170)')))", VARCHAR, "MULTIPOINT ((100 150), (160 170))"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))')))", VARCHAR, "MULTIPOINT ((1 1), (2 4), (4 4), (5 1))"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('POLYGON ((1 1, 4 1, 1 4, 1 1))')))", VARCHAR, "LINESTRING (1 1, 1 4, 4 1, 1 1)"); assertFunction("ST_AsText(ST_Boundary(ST_GeometryFromText('MULTIPOLYGON (((1 1, 1 3, 3 3, 3 1, 1 1)), ((0 0, 0 2, 2 2, 2 0, 0 0)))')))", VARCHAR, "MULTILINESTRING ((1 1, 1 3, 3 3, 3 1, 1 1), (0 0, 0 2, 2 2, 2 0, 0 0))"); }
@Override public void update(double sampleValue) { double oldMean = onlineMeanCalculator.getValue(); onlineMeanCalculator.update(sampleValue); numSamples++; double newMean = onlineMeanCalculator.getValue(); m2 += (sampleValue - oldMean) * (sampleValue - newMean); }
@Test void insufficientSamples() { WelfordVarianceCalculator calculator = new WelfordVarianceCalculator(); calculator.update(1.23d); assertThrows(NotEnoughSampleException.class, calculator::getValue); }
protected double getOsnr(JsonNode connectivityReply, String name) { double osnr = -1; if (connectivityReply.has("result") && connectivityReply.get("result").has("response")) { Iterator<JsonNode> paths = connectivityReply.get("result").get("response") .elements(); while (paths.hasNext()) { JsonNode path = paths.next(); if (path.get("response-id").asText().equals(name)) { Iterator<JsonNode> elements = path.get("path-properties").get("path-metric").elements(); Iterable<JsonNode> iterable = () -> elements; List<JsonNode> elementsList = StreamSupport .stream(iterable.spliterator(), false) .collect(Collectors.toList()); for (JsonNode node : elementsList) { if (node.has("metric-type") && node.get("metric-type").asText().equals("OSNR-0.1nm")) { osnr = node.get("accumulative-value").asDouble(); break; } } if (osnr != -1) { break; } } } } return osnr; }
@Test public void testGetOsnr() throws IOException { double osnr = manager.getOsnr(reply, "second"); assertEquals(23.47, osnr); }
@Override public void load() throws AccessDeniedException { final Local file = this.getFile(); if(file.exists()) { if(log.isInfoEnabled()) { log.info(String.format("Found bookmarks file at %s", file)); } Checksum current = Checksum.NONE; if(file.isFile()) { try { current = ChecksumComputeFactory.get(HashAlgorithm.md5).compute(file.getInputStream(), new TransferStatus()); if(log.isDebugEnabled()) { log.debug(String.format("Current checksum for %s is %s", file, current)); } } catch(BackgroundException e) { log.warn(String.format("Failure obtaining checksum for %s", file)); } } if(preferences.getBoolean(this.getConfiguration())) { // Previously imported final Checksum previous = new Checksum(HashAlgorithm.md5, preferences.getProperty(String.format("%s.checksum", this.getConfiguration()))); if(log.isDebugEnabled()) { log.debug(String.format("Saved previous checksum %s for bookmark %s", previous, file)); } if(StringUtils.isNotBlank(previous.hash)) { if(previous.equals(current)) { if(log.isInfoEnabled()) { log.info(String.format("Skip importing bookmarks from %s with previously saved checksum %s", file, previous)); } } else { if(log.isInfoEnabled()) { log.info(String.format("Checksum changed for bookmarks file at %s", file)); } // Should filter existing bookmarks. Skip import } } else { // Skip flagged if(log.isDebugEnabled()) { log.debug(String.format("Skip importing bookmarks from %s", file)); } } } else { // First import this.parse(ProtocolFactory.get(), file); } // Save last checksum if(current != null) { preferences.setProperty(String.format("%s.checksum", this.getConfiguration()), current.hash); } } else { if(log.isInfoEnabled()) { log.info(String.format("No bookmarks file at %s", file)); } } // Flag as imported super.load(); }
@Test public void testLoad() throws Exception { final Local source = new Local(System.getProperty("java.io.tmpdir"), new AlphanumericRandomStringService().random()); LocalTouchFactory.get().touch(source); IOUtils.write(RandomUtils.nextBytes(1000), source.getOutputStream(false)); final AtomicBoolean r = new AtomicBoolean(); final ThirdpartyBookmarkCollection c = new ThirdpartyBookmarkCollection() { @Override public String getName() { return StringUtils.EMPTY; } @Override public Local getFile() { return source; } @Override protected void parse(final ProtocolFactory protocols, final Local file) { this.add(new Host(new TestProtocol())); r.set(true); } @Override public String getBundleIdentifier() { return "t"; } }; c.load(); assertTrue(r.get()); assertEquals(0, c.iterator().next().compareTo(new Host(new TestProtocol()))); r.set(false); PreferencesFactory.get().setProperty(c.getConfiguration(), true); c.load(); assertFalse(r.get()); // Modify bookmarks file // IOUtils.write(new RandomStringGenerator.Builder().build().generate(1), source.getOutputStream(true)); // c.load(); // assertTrue(r.get()); AbstractHostCollection bookmarks = new AbstractHostCollection() { }; bookmarks.add(new Host(new TestProtocol())); c.filter(bookmarks); assertTrue(c.isEmpty()); }
@Override public SeekableByteChannel getChannel() { return new RedissonByteChannel(); }
@Test public void testChannelPosition() throws IOException { RBinaryStream stream = redisson.getBinaryStream("test"); SeekableByteChannel c = stream.getChannel(); c.write(ByteBuffer.wrap(new byte[]{1, 2, 3, 4, 5, 6, 7})); c.position(3); ByteBuffer b = ByteBuffer.allocate(3); c.read(b); assertThat(c.position()).isEqualTo(6); byte[] bb = new byte[3]; b.flip(); b.get(bb); assertThat(bb).isEqualTo(new byte[]{4, 5, 6}); }
public Meter getBrokerMeter() { return brokerMeter; }
@Test public void testCreateMetricsManagerLogType() throws CloneNotSupportedException { BrokerConfig brokerConfig = new BrokerConfig(); brokerConfig.setMetricsExporterType(MetricsExporterType.LOG); brokerConfig.setMetricsLabel("label1:value1;label2:value2"); brokerConfig.setMetricsOtelCardinalityLimit(1); MessageStoreConfig messageStoreConfig = new MessageStoreConfig(); String storePathRootDir = System.getProperty("java.io.tmpdir") + File.separator + "store-" + UUID.randomUUID(); messageStoreConfig.setStorePathRootDir(storePathRootDir); NettyServerConfig nettyServerConfig = new NettyServerConfig(); nettyServerConfig.setListenPort(0); BrokerController brokerController = new BrokerController(brokerConfig, nettyServerConfig, new NettyClientConfig(), messageStoreConfig); brokerController.initialize(); BrokerMetricsManager metricsManager = new BrokerMetricsManager(brokerController); assertThat(metricsManager.getBrokerMeter()).isNotNull(); }
@Override public ExecuteContext onThrow(ExecuteContext context) { ThreadLocalUtils.removeRequestData(); LogUtils.printHttpRequestOnThrowPoint(context); return context; }
@Test public void testOnThrow() { ThreadLocalUtils.setRequestData(new RequestData(Collections.emptyMap(), "", "")); interceptor.onThrow(context); Assert.assertNull(ThreadLocalUtils.getRequestData()); }
public static boolean isJsonValid(String schemaText, String jsonText) throws IOException { return isJsonValid(schemaText, jsonText, null); }
@Test void testValidateJsonSchemaWithReferenceSuccess() { boolean valid = true; String schemaText = null; String jsonText = "[{\"name\": \"307\", \"model\": \"Peugeot 307\", \"year\": 2003}," + "{\"name\": \"jean-pierre\", \"model\": \"Peugeot Traveler\", \"year\": 2017}]"; try { // Load schema from file. schemaText = FileUtils .readFileToString(new File("target/test-classes/io/github/microcks/util/openapi/cars-schema.json")); // Validate Json according schema. valid = OpenAPISchemaValidator.isJsonValid(schemaText, jsonText); } catch (Exception e) { e.printStackTrace(); fail("Exception should not be thrown"); } // Assert Json object is valid. assertTrue(valid); }
public static <T> T copyProperties(Object source, Class<T> tClass, String... ignoreProperties) { if (null == source) { return null; } T target = ReflectUtil.newInstanceIfPossible(tClass); copyProperties(source, target, CopyOptions.create().setIgnoreProperties(ignoreProperties)); return target; }
@Test public void copyBeanTest() { final Food info = new Food(); info.setBookID("0"); info.setCode("123"); final Food newFood = BeanUtil.copyProperties(info, Food.class, "code"); assertEquals(info.getBookID(), newFood.getBookID()); assertNull(newFood.getCode()); }
@InvokeOnHeader(Web3jConstants.ETH_GET_TRANSACTION_BY_HASH) void ethGetTransactionByHash(Message message) throws IOException { String transactionHash = message.getHeader(Web3jConstants.TRANSACTION_HASH, configuration::getTransactionHash, String.class); Request<?, EthTransaction> request = web3j.ethGetTransactionByHash(transactionHash); setRequestId(message, request); EthTransaction response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getTransaction().isPresent() ? response.getTransaction().get() : null); } }
@Test public void ethGetTransactionByHashTest() throws Exception { EthTransaction response = Mockito.mock(EthTransaction.class); Mockito.when(mockWeb3j.ethGetTransactionByHash(any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Transaction transaction = Mockito.mock(Transaction.class); Optional<Transaction> optional = Optional.ofNullable(transaction); Mockito.when(response.getTransaction()).thenReturn(optional); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_TRANSACTION_BY_HASH); template.send(exchange); Transaction body = exchange.getIn().getBody(Transaction.class); assertNotNull(body); }
@Override public Optional<SearchVersion> version() { final Request request = new Request("GET", "/?filter_path=version.number,version.distribution"); final Optional<JsonNode> resp = Optional.of(jsonApi.perform(request, "Unable to retrieve cluster information")); final Optional<String> version = resp.map(r -> r.path("version")).map(r -> r.path("number")).map(JsonNode::textValue); final SearchVersion.Distribution distribution = resp.map(r -> r.path("version")).map(r -> r.path("distribution")).map(JsonNode::textValue) .map(StringUtils::toUpperCase) .map(SearchVersion.Distribution::valueOf) .orElse(SearchVersion.Distribution.ELASTICSEARCH); return version .map(this::parseVersion) .map(v -> SearchVersion.create(distribution, v)); }
@Test void testOpensearchVersionFetching() throws IOException { mockResponse("{\"version\" : " + " {" + " \"distribution\" : \"opensearch\"," + " \"number\" : \"1.3.1\"" + " }" + "}"); assertThat(toTest.version()) .isNotEmpty() .contains(SearchVersion.create(SearchVersion.Distribution.OPENSEARCH, Version.parse("1.3.1"))); }
static String escapeAndJoin(List<String> parts) { return parts.stream() .map(ZetaSqlIdUtils::escapeSpecialChars) .map(ZetaSqlIdUtils::replaceWhitespaces) .map(ZetaSqlIdUtils::backtickIfNeeded) .collect(joining(".")); }
@Test public void testHandlesSpecialCharsInOnePart() { List<String> id = Arrays.asList("a\\ab`bc'cd\"de?e"); assertEquals("`a\\\\ab\\`bc\\'cd\\\"de\\?e`", ZetaSqlIdUtils.escapeAndJoin(id)); }
public static void copyConfigurationToJob(Properties props, Map<String, String> jobProps) throws HiveException, IOException { checkRequiredPropertiesAreDefined(props); resolveMetadata(props); for (Entry<Object, Object> entry : props.entrySet()) { String key = String.valueOf(entry.getKey()); if (!key.equals(CONFIG_PWD) && !key.equals(CONFIG_PWD_KEYSTORE) && !key.equals(CONFIG_PWD_KEY) && !key.equals(CONFIG_PWD_URI)) { jobProps.put(String.valueOf(entry.getKey()), String.valueOf(entry.getValue())); } } }
@Test public void testWithAllRequiredSettingsDefined() throws Exception { Properties props = new Properties(); props.put(JdbcStorageConfig.DATABASE_TYPE.getPropertyName(), DatabaseType.MYSQL.toString()); props.put(JdbcStorageConfig.JDBC_URL.getPropertyName(), "jdbc://localhost:3306/hive"); props.put(JdbcStorageConfig.QUERY.getPropertyName(), "SELECT col1,col2,col3 FROM sometable"); props.put(JdbcStorageConfig.JDBC_DRIVER_CLASS.getPropertyName(), "com.mysql.jdbc.Driver"); Map<String, String> jobMap = new HashMap<>(); JdbcStorageConfigManager.copyConfigurationToJob(props, jobMap); assertThat(jobMap, is(notNullValue())); assertThat(jobMap.size(), is(equalTo(4))); assertThat(jobMap.get(JdbcStorageConfig.DATABASE_TYPE.getPropertyName()), is(equalTo("MYSQL"))); assertThat(jobMap.get(JdbcStorageConfig.JDBC_URL.getPropertyName()), is(equalTo("jdbc://localhost:3306/hive"))); assertThat(jobMap.get(JdbcStorageConfig.QUERY.getPropertyName()), is(equalTo("SELECT col1,col2,col3 FROM sometable"))); }
public FloatArrayAsIterable usingTolerance(double tolerance) { return new FloatArrayAsIterable(tolerance(tolerance), iterableSubject()); }
@Test public void usingTolerance_contains_successWithNegativeZero() { assertThat(array(1.0f, -0.0f, 3.0f)).usingTolerance(0.0f).contains(0.0f); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testNoRedirectWithData() throws Exception { createHttpFSServer(false, false); final String path = "/file"; final String username = HadoopUsersConfTestHelper.getHadoopUsers()[0]; // file creation which should not redirect URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( "/webhdfs/v1{0}?user.name={1}&op=CREATE&data=true&noredirect=true", path, username)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.PUT); conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); conn.setDoOutput(true); conn.connect(); Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); JSONObject json = (JSONObject) new JSONParser() .parse(new InputStreamReader(conn.getInputStream())); // get the location to write String location = (String) json.get("Location"); Assert.assertTrue(location.contains(DataParam.NAME)); Assert.assertTrue(location.contains("CREATE")); url = new URL(location); conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(HttpMethod.PUT); conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); conn.setDoOutput(true); conn.connect(); final String writeStr = "write some content"; OutputStream os = conn.getOutputStream(); os.write(writeStr.getBytes()); os.close(); // Verify that file got created Assert.assertEquals(HttpURLConnection.HTTP_CREATED, conn.getResponseCode()); json = (JSONObject) new JSONParser() .parse(new InputStreamReader(conn.getInputStream())); location = (String) json.get("Location"); Assert.assertEquals(TestJettyHelper.getJettyURL() + "/webhdfs/v1" + path, location); }
public <T> Mono<CosmosItemResponse<T>> replaceItem( final T item, final String itemId, final PartitionKey partitionKey, final CosmosItemRequestOptions itemRequestOptions) { CosmosDbUtils.validateIfParameterIsNotEmpty(item, PARAM_ITEM); CosmosDbUtils.validateIfParameterIsNotEmpty(itemId, PARAM_ITEM_ID); CosmosDbUtils.validateIfParameterIsNotEmpty(partitionKey, PARAM_PARTITION_KEY); return applyToContainer(container -> container.replaceItem(item, itemId, partitionKey, itemRequestOptions)); }
@Test void replaceItem() { final CosmosDbContainerOperations operations = new CosmosDbContainerOperations(Mono.just(mock(CosmosAsyncContainer.class))); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.replaceItem(null, null, null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.replaceItem("test", null, null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.replaceItem("test", "testid", null, null)); CosmosDbTestUtils.assertIllegalArgumentException(() -> operations.replaceItem("", "testid", null, null)); }
@Override public void write(DataOutput out) throws IOException { // save history SerializeData data = new SerializeData(); data.jobs = getAllNativeAnalyzeJobList(); data.nativeStatus = new ArrayList<>(getAnalyzeStatusMap().values().stream(). filter(AnalyzeStatus::isNative). map(status -> (NativeAnalyzeStatus) status).collect(Collectors.toSet())); data.basicStatsMeta = new ArrayList<>(getBasicStatsMetaMap().values()); data.histogramStatsMeta = new ArrayList<>(getHistogramStatsMetaMap().values()); String s = GsonUtils.GSON.toJson(data); Text.writeString(out, s); }
@Test public void testExternalAnalyzeStatusPersist() throws Exception { Table table = connectContext.getGlobalStateMgr().getMetadataMgr().getTable("hive0", "partitioned_db", "t1"); ExternalAnalyzeStatus analyzeStatus = new ExternalAnalyzeStatus(100, "hive0", "partitioned_db", "t1", table.getUUID(), ImmutableList.of("c1", "c2"), StatsConstants.AnalyzeType.FULL, StatsConstants.ScheduleType.ONCE, Maps.newHashMap(), LocalDateTime.now()); UtFrameUtils.PseudoImage testImage = new UtFrameUtils.PseudoImage(); analyzeStatus.write(testImage.getDataOutputStream()); ExternalAnalyzeStatus loadAnalyzeStatus = ExternalAnalyzeStatus.read(testImage.getDataInputStream()); Assert.assertEquals("hive0", loadAnalyzeStatus.getCatalogName()); Assert.assertEquals("partitioned_db", loadAnalyzeStatus.getDbName()); Assert.assertEquals("t1", loadAnalyzeStatus.getTableName()); Assert.assertEquals(StatsConstants.AnalyzeType.FULL, loadAnalyzeStatus.getType()); Assert.assertEquals(StatsConstants.ScheduleType.ONCE, loadAnalyzeStatus.getScheduleType()); }
@Override public void open() { super.open(); for (String propertyKey : properties.stringPropertyNames()) { LOGGER.debug("propertyKey: {}", propertyKey); String[] keyValue = propertyKey.split("\\.", 2); if (2 == keyValue.length) { LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]); Properties prefixProperties; if (basePropertiesMap.containsKey(keyValue[0])) { prefixProperties = basePropertiesMap.get(keyValue[0]); } else { prefixProperties = new Properties(); basePropertiesMap.put(keyValue[0].trim(), prefixProperties); } prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey)); } } Set<String> removeKeySet = new HashSet<>(); for (String key : basePropertiesMap.keySet()) { if (!COMMON_KEY.equals(key)) { Properties properties = basePropertiesMap.get(key); if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) { LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.", key, DRIVER_KEY, key, key, URL_KEY); removeKeySet.add(key); } } } for (String key : removeKeySet) { basePropertiesMap.remove(key); } LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap); setMaxLineResults(); setMaxRows(); //TODO(zjffdu) Set different sql splitter for different sql dialects. this.sqlSplitter = new SqlSplitter(); }
@Test void testIncorrectStatementPrecode() throws IOException, InterpreterException { Properties properties = new Properties(); properties.setProperty("default.driver", "org.h2.Driver"); properties.setProperty("default.url", getJdbcConnection()); properties.setProperty("default.user", ""); properties.setProperty("default.password", ""); properties.setProperty(DEFAULT_STATEMENT_PRECODE, "set incorrect"); JDBCInterpreter jdbcInterpreter = new JDBCInterpreter(properties); jdbcInterpreter.open(); String sqlQuery = "select 1"; InterpreterResult interpreterResult = jdbcInterpreter.interpret(sqlQuery, context); assertEquals(InterpreterResult.Code.ERROR, interpreterResult.code()); assertEquals(InterpreterResult.Type.TEXT, interpreterResult.message().get(0).getType()); assertTrue(interpreterResult.message().get(0).getData().contains("Syntax error"), interpreterResult.toString()); }
@Override public CompletableFuture<Acknowledge> requestSlot( final SlotID slotId, final JobID jobId, final AllocationID allocationId, final ResourceProfile resourceProfile, final String targetAddress, final ResourceManagerId resourceManagerId, final Time timeout) { // TODO: Filter invalid requests from the resource manager by using the // instance/registration Id try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobId))) { log.info( "Receive slot request {} for job {} from resource manager with leader id {}.", allocationId, jobId, resourceManagerId); if (!isConnectedToResourceManager(resourceManagerId)) { final String message = String.format( "TaskManager is not connected to the resource manager %s.", resourceManagerId); log.debug(message); return FutureUtils.completedExceptionally(new TaskManagerException(message)); } tryPersistAllocationSnapshot( new SlotAllocationSnapshot( slotId, jobId, targetAddress, allocationId, resourceProfile)); try { final boolean isConnected = allocateSlotForJob( jobId, slotId, allocationId, resourceProfile, targetAddress); if (isConnected) { offerSlotsToJobManager(jobId); } return CompletableFuture.completedFuture(Acknowledge.get()); } catch (SlotAllocationException e) { log.debug("Could not allocate slot for allocation id {}.", allocationId, e); return FutureUtils.completedExceptionally(e); } } }
@Test void testSlotAcceptance() throws Exception { final InstanceID registrationId = new InstanceID(); final OneShotLatch taskExecutorIsRegistered = new OneShotLatch(); final CompletableFuture<Tuple3<InstanceID, SlotID, AllocationID>> availableSlotFuture = new CompletableFuture<>(); final TestingResourceManagerGateway resourceManagerGateway = createRmWithTmRegisterAndNotifySlotHooks( registrationId, taskExecutorIsRegistered, availableSlotFuture); final AllocationID allocationId1 = new AllocationID(); final AllocationID allocationId2 = new AllocationID(); final SlotOffer offer1 = new SlotOffer(allocationId1, 0, ResourceProfile.ANY); final OneShotLatch offerSlotsLatch = new OneShotLatch(); final OneShotLatch taskInTerminalState = new OneShotLatch(); final CompletableFuture<Collection<SlotOffer>> offerResultFuture = new CompletableFuture<>(); final TestingJobMasterGateway jobMasterGateway = createJobMasterWithSlotOfferAndTaskTerminationHooks( offerSlotsLatch, taskInTerminalState, offerResultFuture); rpc.registerGateway(resourceManagerGateway.getAddress(), resourceManagerGateway); rpc.registerGateway(jobMasterGateway.getAddress(), jobMasterGateway); final TaskSlotTable<Task> taskSlotTable = TaskSlotUtils.createTaskSlotTable(2, EXECUTOR_EXTENSION.getExecutor()); final TaskManagerServices taskManagerServices = createTaskManagerServicesWithTaskSlotTable(taskSlotTable); final TestingTaskExecutor taskManager = createTestingTaskExecutor(taskManagerServices); try { taskManager.start(); taskManager.waitUntilStarted(); final TaskExecutorGateway tmGateway = taskManager.getSelfGateway(TaskExecutorGateway.class); // wait until registered at the RM taskExecutorIsRegistered.await(); // request 2 slots for the given allocation ids AllocationID[] allocationIds = new AllocationID[] {allocationId1, allocationId2}; for (int i = 0; i < allocationIds.length; i++) { requestSlot( tmGateway, jobId, allocationIds[i], buildSlotID(i), ResourceProfile.UNKNOWN, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken()); } // notify job leader to start slot offering jobManagerLeaderRetriever.notifyListener( jobMasterGateway.getAddress(), jobMasterGateway.getFencingToken().toUUID()); // wait until slots have been offered offerSlotsLatch.await(); offerResultFuture.complete(Collections.singletonList(offer1)); final Tuple3<InstanceID, SlotID, AllocationID> instanceIDSlotIDAllocationIDTuple3 = availableSlotFuture.get(); final Tuple3<InstanceID, SlotID, AllocationID> expectedResult = Tuple3.of(registrationId, buildSlotID(1), allocationId2); assertThat(instanceIDSlotIDAllocationIDTuple3).isEqualTo(expectedResult); // the slot 1 can be activate for task submission submit(allocationId1, jobMasterGateway, tmGateway, NoOpInvokable.class); // wait for the task completion taskInTerminalState.await(); // the slot 2 can NOT be activate for task submission assertThatThrownBy( () -> submit( allocationId2, jobMasterGateway, tmGateway, NoOpInvokable.class)) .withFailMessage( "It should not be possible to submit task to acquired by JM slot with index 1 (allocationId2)") .isInstanceOf(CompletionException.class) .hasCauseInstanceOf(TaskSubmissionException.class); // the slot 2 is free to request requestSlot( tmGateway, jobId, allocationId2, buildSlotID(1), ResourceProfile.UNKNOWN, jobMasterGateway.getAddress(), resourceManagerGateway.getFencingToken()); } finally { RpcUtils.terminateRpcEndpoint(taskManager); } }
public static String toString(Object obj) { if (null == obj) { return null; } if (obj instanceof long[]) { return Arrays.toString((long[]) obj); } else if (obj instanceof int[]) { return Arrays.toString((int[]) obj); } else if (obj instanceof short[]) { return Arrays.toString((short[]) obj); } else if (obj instanceof char[]) { return Arrays.toString((char[]) obj); } else if (obj instanceof byte[]) { return Arrays.toString((byte[]) obj); } else if (obj instanceof boolean[]) { return Arrays.toString((boolean[]) obj); } else if (obj instanceof float[]) { return Arrays.toString((float[]) obj); } else if (obj instanceof double[]) { return Arrays.toString((double[]) obj); } else if (ArrayUtil.isArray(obj)) { // 对象数组 try { return Arrays.deepToString((Object[]) obj); } catch (Exception ignore) { //ignore } } return obj.toString(); }
@Test public void toStingTest() { int[] a = {1, 3, 56, 6, 7}; assertEquals("[1, 3, 56, 6, 7]", ArrayUtil.toString(a)); long[] b = {1, 3, 56, 6, 7}; assertEquals("[1, 3, 56, 6, 7]", ArrayUtil.toString(b)); short[] c = {1, 3, 56, 6, 7}; assertEquals("[1, 3, 56, 6, 7]", ArrayUtil.toString(c)); double[] d = {1, 3, 56, 6, 7}; assertEquals("[1.0, 3.0, 56.0, 6.0, 7.0]", ArrayUtil.toString(d)); byte[] e = {1, 3, 56, 6, 7}; assertEquals("[1, 3, 56, 6, 7]", ArrayUtil.toString(e)); boolean[] f = {true, false, true, true, true}; assertEquals("[true, false, true, true, true]", ArrayUtil.toString(f)); float[] g = {1, 3, 56, 6, 7}; assertEquals("[1.0, 3.0, 56.0, 6.0, 7.0]", ArrayUtil.toString(g)); char[] h = {'a', 'b', '你', '好', '1'}; assertEquals("[a, b, 你, 好, 1]", ArrayUtil.toString(h)); String[] array = {"aa", "bb", "cc", "dd", "bb", "dd"}; assertEquals("[aa, bb, cc, dd, bb, dd]", ArrayUtil.toString(array)); }
public void addProperty(String key, String value) { store.put(key, value); }
@Test void testConversions() { memConfig.addProperty("long", "2147483648"); memConfig.addProperty("byte", "127"); memConfig.addProperty("short", "32767"); memConfig.addProperty("float", "3.14"); memConfig.addProperty("double", "3.14159265358979323846264338327950"); memConfig.addProperty("enum", "FIELD"); Object longObject = memConfig.convert(Long.class, "long", 1L); Object byteObject = memConfig.convert(Byte.class, "byte", (byte) 1); Object shortObject = memConfig.convert(Short.class, "short", (short) 1); Object floatObject = memConfig.convert(Float.class, "float", 3.14F); Object doubleObject = memConfig.convert(Double.class, "double", 3.14159265358979323846264338327950); JavaBeanAccessor javaBeanAccessor = memConfig.convert(JavaBeanAccessor.class, "enum", JavaBeanAccessor.ALL); Assertions.assertEquals(Long.class, longObject.getClass()); Assertions.assertEquals(2147483648L, longObject); Assertions.assertEquals(Byte.class, byteObject.getClass()); Assertions.assertEquals((byte) 127, byteObject); Assertions.assertEquals(Short.class, shortObject.getClass()); Assertions.assertEquals((short) 32767, shortObject); Assertions.assertEquals(Float.class, floatObject.getClass()); Assertions.assertEquals(3.14F, floatObject); Assertions.assertEquals(Double.class, doubleObject.getClass()); Assertions.assertEquals(3.14159265358979323846264338327950, doubleObject); Assertions.assertEquals(JavaBeanAccessor.class, javaBeanAccessor.getClass()); Assertions.assertEquals(JavaBeanAccessor.FIELD, javaBeanAccessor); }
@Override public void updateUserPassword(Long id, UserProfileUpdatePasswordReqVO reqVO) { // 校验旧密码密码 validateOldPassword(id, reqVO.getOldPassword()); // 执行更新 AdminUserDO updateObj = new AdminUserDO().setId(id); updateObj.setPassword(encodePassword(reqVO.getNewPassword())); // 加密密码 userMapper.updateById(updateObj); }
@Test public void testUpdateUserPassword02_success() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 Long userId = dbUser.getId(); String password = "yudao"; // mock 方法 when(passwordEncoder.encode(anyString())).then( (Answer<String>) invocationOnMock -> "encode:" + invocationOnMock.getArgument(0)); // 调用 userService.updateUserPassword(userId, password); // 断言 AdminUserDO user = userMapper.selectById(userId); assertEquals("encode:" + password, user.getPassword()); }
public boolean hasLeadership() { return (state.get() == State.STARTED) && hasLeadership.get(); }
@Test public void testSessionInterruptionDoNotCauseBrainSplit() throws Exception { final String latchPath = "/testSessionInterruptionDoNotCauseBrainSplit"; final Timing2 timing = new Timing2(); final BlockingQueue<TestEvent> events0 = new LinkedBlockingQueue<>(); final BlockingQueue<TestEvent> events1 = new LinkedBlockingQueue<>(); final List<Closeable> closeableResources = new ArrayList<>(); try { final String id0 = "id0"; final CuratorFramework client0 = createAndStartClient(server.getConnectString(), timing, id0, null); closeableResources.add(client0); final LeaderLatch latch0 = createAndStartLeaderLatch(client0, latchPath, id0, events0); closeableResources.add(latch0); assertThat(events0.poll(timing.forWaiting().milliseconds(), TimeUnit.MILLISECONDS)) .isNotNull() .isEqualTo(new TestEvent(id0, TestEventType.GAINED_LEADERSHIP)); final String id1 = "id1"; final CuratorFramework client1 = createAndStartClient(server.getConnectString(), timing, id1, null); closeableResources.add(client1); final LeaderLatch latch1 = createAndStartLeaderLatch(client1, latchPath, id1, events1); closeableResources.add(latch1); // wait for the non-leading LeaderLatch (i.e. latch1) instance to be done with its creation // this call is time-consuming but necessary because we don't have a handle to detect the end of the reset // call timing.forWaiting().sleepABit(); assertTrue(latch0.hasLeadership()); assertFalse(latch1.hasLeadership()); client0.getZookeeperClient().getZooKeeper().getTestable().injectSessionExpiration(); assertThat(events1.poll(timing.forWaiting().milliseconds(), TimeUnit.MILLISECONDS)) .isNotNull() .isEqualTo(new TestEvent(id1, TestEventType.GAINED_LEADERSHIP)); assertThat(events0.poll(timing.forWaiting().milliseconds(), TimeUnit.MILLISECONDS)) .isNotNull() .isEqualTo(new TestEvent(id0, TestEventType.LOST_LEADERSHIP)); // No leadership grained to old leader after session changed, hence no brain split. assertThat(events0.poll(20, TimeUnit.MILLISECONDS)) .isNotEqualTo(new TestEvent(id0, TestEventType.GAINED_LEADERSHIP)); } finally { // reverse is necessary for closing the LeaderLatch instances before closing the corresponding client Collections.reverse(closeableResources); closeableResources.forEach(CloseableUtils::closeQuietly); } }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<>(); if(replies.isEmpty()) { return children; } // At least one entry successfully parsed boolean success = false; for(String line : replies) { final Map<String, Map<String, String>> file = this.parseFacts(line); if(null == file) { log.error(String.format("Error parsing line %s", line)); continue; } for(Map.Entry<String, Map<String, String>> f : file.entrySet()) { final String name = f.getKey(); // size -- Size in octets // modify -- Last modification time // create -- Creation time // type -- Entry type // unique -- Unique id of file/directory // perm -- File permissions, whether read, write, execute is allowed for the login id. // lang -- Language of the file name per IANA [11] registry. // media-type -- MIME media-type of file contents per IANA registry. // charset -- Character set per IANA registry (if not UTF-8) final Map<String, String> facts = f.getValue(); if(!facts.containsKey("type")) { log.error(String.format("No type fact in line %s", line)); continue; } final Path parsed; if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory)); } else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file)); } else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar final String[] type = facts.get("type").split(":"); if(type.length == 2) { final String target = type[1]; if(target.startsWith(String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file))); } } else { log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line)); continue; } } else { log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line)); continue; } if(!success) { if(parsed.isDirectory() && directory.getName().equals(name)) { log.warn(String.format("Possibly bogus response line %s", line)); } else { success = true; } } if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", name)); } continue; } if(facts.containsKey("size")) { parsed.attributes().setSize(Long.parseLong(facts.get("size"))); } if(facts.containsKey("unix.uid")) { parsed.attributes().setOwner(facts.get("unix.uid")); } if(facts.containsKey("unix.owner")) { parsed.attributes().setOwner(facts.get("unix.owner")); } if(facts.containsKey("unix.gid")) { parsed.attributes().setGroup(facts.get("unix.gid")); } if(facts.containsKey("unix.group")) { parsed.attributes().setGroup(facts.get("unix.group")); } if(facts.containsKey("unix.mode")) { parsed.attributes().setPermission(new Permission(facts.get("unix.mode"))); } else if(facts.containsKey("perm")) { if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) { Permission.Action user = Permission.Action.none; final String flags = facts.get("perm"); if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) { // RETR command may be applied to that object // Listing commands, LIST, NLST, and MLSD may be applied user = user.or(Permission.Action.read); } if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) { user = user.or(Permission.Action.write); } if(StringUtils.contains(flags, 'e')) { // CWD command naming the object should succeed user = user.or(Permission.Action.execute); if(parsed.isDirectory()) { user = user.or(Permission.Action.read); } } final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none); parsed.attributes().setPermission(permission); } } if(facts.containsKey("modify")) { // Time values are always represented in UTC parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify"))); } if(facts.containsKey("create")) { // Time values are always represented in UTC parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create"))); } children.add(parsed); } } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test public void testParseMlsdMode775() throws Exception { Path path = new Path( "/www", EnumSet.of(Path.Type.directory)); String[] replies = new String[]{ "modify=20090210192929;perm=fle;type=dir;unique=FE03U10006D95;UNIX.group=1001;UNIX.mode=02775;UNIX.owner=2000; tangerine" }; final AttributedList<Path> children = new FTPMlsdListResponseReader() .read(path, Arrays.asList(replies)); assertEquals(1, children.size()); assertEquals("2775", children.get(0).attributes().getPermission().getMode()); }
public void initialize(ProxyConfiguration conf) throws Exception { for (ProxyExtension extension : extensions.values()) { extension.initialize(conf); } }
@Test public void testInitialize() throws Exception { ProxyConfiguration conf = new ProxyConfiguration(); extensions.initialize(conf); verify(extension1, times(1)).initialize(same(conf)); verify(extension2, times(1)).initialize(same(conf)); }
public static String getTieredStoragePath(String basePath) { return String.format("%s/%s", basePath, TIERED_STORAGE_DIR); }
@Test void testGetTieredStoragePath() { String tieredStoragePath = SegmentPartitionFile.getTieredStoragePath(tempFolder.getPath()); assertThat(tieredStoragePath) .isEqualTo(new File(tempFolder.getPath(), TIERED_STORAGE_DIR).getPath()); }
@Override protected boolean hasPortfolioChildProjectsPermission(String permission, String portfolioUuid) { return false; }
@Test public void hasPortfolioChildProjectsPermission() { assertThat(githubWebhookUserSession.hasPortfolioChildProjectsPermission("perm", "project")).isFalse(); }
public String convertInt(int i) { return convert(i); }
@Test public void testSmoke() { FileNamePattern pp = new FileNamePattern("t", context); assertEquals("t", pp.convertInt(3)); pp = new FileNamePattern("foo", context); assertEquals("foo", pp.convertInt(3)); pp = new FileNamePattern("%i foo", context); assertEquals("3 foo", pp.convertInt(3)); pp = new FileNamePattern("foo%i.xixo", context); assertEquals("foo3.xixo", pp.convertInt(3)); pp = new FileNamePattern("foo%i.log", context); assertEquals("foo3.log", pp.convertInt(3)); pp = new FileNamePattern("foo.%i.log", context); assertEquals("foo.3.log", pp.convertInt(3)); pp = new FileNamePattern("foo.%3i.log", context); assertEquals("foo.003.log", pp.convertInt(3)); pp = new FileNamePattern("foo.%1i.log", context); assertEquals("foo.43.log", pp.convertInt(43)); // pp = new FileNamePattern("%i.foo\\%", context); // assertEquals("3.foo%", pp.convertInt(3)); // pp = new FileNamePattern("\\%foo", context); // assertEquals("%foo", pp.convertInt(3)); }
public static OffsetAndMetadata fromRequest( OffsetCommitRequestData.OffsetCommitRequestPartition partition, long currentTimeMs, OptionalLong expireTimestampMs ) { return new OffsetAndMetadata( partition.committedOffset(), ofSentinel(partition.committedLeaderEpoch()), partition.committedMetadata() == null ? OffsetAndMetadata.NO_METADATA : partition.committedMetadata(), partition.commitTimestamp() == OffsetCommitRequest.DEFAULT_TIMESTAMP ? currentTimeMs : partition.commitTimestamp(), expireTimestampMs ); }
@Test public void testFromRequest() { MockTime time = new MockTime(); OffsetCommitRequestData.OffsetCommitRequestPartition partition = new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) .setCommittedLeaderEpoch(-1) .setCommittedMetadata(null) .setCommitTimestamp(-1L); assertEquals( new OffsetAndMetadata( 100L, OptionalInt.empty(), "", time.milliseconds(), OptionalLong.empty() ), OffsetAndMetadata.fromRequest( partition, time.milliseconds(), OptionalLong.empty() ) ); partition .setCommittedLeaderEpoch(10) .setCommittedMetadata("hello") .setCommitTimestamp(1234L); assertEquals( new OffsetAndMetadata( 100L, OptionalInt.of(10), "hello", 1234L, OptionalLong.empty() ), OffsetAndMetadata.fromRequest( partition, time.milliseconds(), OptionalLong.empty() ) ); assertEquals( new OffsetAndMetadata( 100L, OptionalInt.of(10), "hello", 1234L, OptionalLong.of(5678L) ), OffsetAndMetadata.fromRequest( partition, time.milliseconds(), OptionalLong.of(5678L) ) ); }
void handleSegmentWithDeleteSegmentStartedState(Long startOffset, RemoteLogSegmentId remoteLogSegmentId) { // Remove the offset mappings as this segment is getting deleted. offsetToId.remove(startOffset, remoteLogSegmentId); // Add this entry to unreferenced set for the leader epoch as it is being deleted. // This allows any retries of deletion as these are returned from listAllSegments and listSegments(leaderEpoch). unreferencedSegmentIds.add(remoteLogSegmentId); }
@Test void handleSegmentWithDeleteSegmentStartedState() { RemoteLogSegmentId segmentId1 = new RemoteLogSegmentId(tpId, Uuid.randomUuid()); RemoteLogSegmentId segmentId2 = new RemoteLogSegmentId(tpId, Uuid.randomUuid()); epochState.handleSegmentWithCopySegmentFinishedState(10L, segmentId1, 100L); epochState.handleSegmentWithCopySegmentFinishedState(101L, segmentId2, 200L); assertEquals(2, epochState.referencedSegmentIds().size()); epochState.handleSegmentWithDeleteSegmentStartedState(10L, segmentId1); epochState.handleSegmentWithDeleteSegmentStartedState(101L, segmentId2); assertTrue(epochState.referencedSegmentIds().isEmpty()); assertEquals(2, epochState.unreferencedSegmentIds().size()); assertTrue(epochState.unreferencedSegmentIds().containsAll(Arrays.asList(segmentId1, segmentId2))); }
public long addAndGet(long value) { this.value += value; return this.value; }
@Test public void testAddAndGet() { MutableLong mutableLong = MutableLong.valueOf(13); assertEquals(24L, mutableLong.addAndGet(11)); assertEquals(24L, mutableLong.value); }
public AscendingLongIterator iterator() { return new IteratorImpl(storages); }
@Test public void testIteratorAdvanceAtLeastToDistinctPrefixes() { long prefix = ((long) Integer.MAX_VALUE * 2 + 1); set(0); verifyAdvanceAtLeastTo(); set(prefix + 1); verifyAdvanceAtLeastTo(); set(prefix * 3 + 1); verifyAdvanceAtLeastTo(); set(prefix * 4 + 1); verifyAdvanceAtLeastTo(); set(prefix * 5 + 1); verifyAdvanceAtLeastTo(); AscendingLongIterator iterator = actual.iterator(); // try advance to the gap iterator.advanceAtLeastTo(prefix * 2 + 1); verify(iterator, expected.tailSet(prefix * 2 + 1)); }
public static Ip4Address valueOf(int value) { byte[] bytes = ByteBuffer.allocate(INET_BYTE_LENGTH).putInt(value).array(); return new Ip4Address(bytes); }
@Test(expected = IllegalArgumentException.class) public void testInvalidValueOfArrayInvalidOffsetIPv4() { Ip4Address ipAddress; byte[] value; value = new byte[] {11, 22, 33, // Preamble 1, 2, 3, 4, 44, 55}; // Extra bytes ipAddress = Ip4Address.valueOf(value, 6); }
public static DescriptorDigest fromDigest(String digest) throws DigestException { if (!digest.matches(DIGEST_REGEX)) { throw new DigestException("Invalid digest: " + digest); } // Extracts the hash portion of the digest. String hash = digest.substring(DIGEST_PREFIX.length()); return new DescriptorDigest(hash); }
@Test public void testCreateFromDigest_fail() { String badDigest = "sha256:not a valid digest"; try { DescriptorDigest.fromDigest(badDigest); Assert.fail("Invalid digest should have caused digest creation failure."); } catch (DigestException ex) { Assert.assertEquals("Invalid digest: " + badDigest, ex.getMessage()); } }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("System"); setAttribute(protobuf, "Server ID", server.getId()); setAttribute(protobuf, "Edition", sonarRuntime.getEdition().getLabel()); setAttribute(protobuf, NCLOC.getName() ,statisticsSupport.getLinesOfCode()); setAttribute(protobuf, "Container", containerSupport.isRunningInContainer()); setAttribute(protobuf, "High Availability", true); setAttribute(protobuf, "External Users and Groups Provisioning", commonSystemInformation.getManagedInstanceProviderName()); setAttribute(protobuf, "External User Authentication", commonSystemInformation.getExternalUserAuthentication()); addIfNotEmpty(protobuf, "Accepted external identity providers", commonSystemInformation.getEnabledIdentityProviders()); addIfNotEmpty(protobuf, "External identity providers whose users are allowed to sign themselves up", commonSystemInformation.getAllowsToSignUpEnabledIdentityProviders()); setAttribute(protobuf, "Force authentication", commonSystemInformation.getForceAuthentication()); return protobuf.build(); }
@Test public void toProtobuf_whenEnabledIdentityProviders_shouldWriteThem() { when(commonSystemInformation.getEnabledIdentityProviders()).thenReturn(List.of("Bitbucket, GitHub")); ProtobufSystemInfo.Section protobuf = underTest.toProtobuf(); assertThatAttributeIs(protobuf, "Accepted external identity providers", "Bitbucket, GitHub"); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStartBounds, final Range<Instant> windowEndBounds, final Optional<Position> position ) { try { final Instant lower = calculateLowerBound(windowStartBounds, windowEndBounds); final Instant upper = calculateUpperBound(windowStartBounds, windowEndBounds); final WindowKeyQuery<GenericKey, ValueAndTimestamp<GenericRow>> query = WindowKeyQuery.withKeyAndWindowStartRange(key, lower, upper); StateQueryRequest<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final KafkaStreams streams = stateStore.getKafkaStreams(); final StateQueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> result = streams.query(request); final QueryResult<WindowStoreIterator<ValueAndTimestamp<GenericRow>>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } if (queryResult.getResult() == null) { return KsMaterializedQueryResult.rowIteratorWithPosition( Collections.emptyIterator(), queryResult.getPosition()); } try (WindowStoreIterator<ValueAndTimestamp<GenericRow>> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Long, ValueAndTimestamp<GenericRow>> next = it.next(); final Instant windowStart = Instant.ofEpochMilli(next.key); if (!windowStartBounds.contains(windowStart)) { continue; } final Instant windowEnd = windowStart.plus(windowSize); if (!windowEndBounds.contains(windowEnd)) { continue; } final TimeWindow window = new TimeWindow(windowStart.toEpochMilli(), windowEnd.toEpochMilli()); final WindowedRow row = WindowedRow.of( stateStore.schema(), new Windowed<>(key, window), next.value.value(), next.value.timestamp() ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test @SuppressWarnings("unchecked") public void shouldThrowIfQueryFails_fethAll() { // Given: final StateQueryResult<?> partitionResult = new StateQueryResult<>(); partitionResult.addResult(PARTITION, QueryResult.forFailure(FailureReason.STORE_EXCEPTION, "Boom")); when(kafkaStreams.query(any(StateQueryRequest.class))).thenReturn(partitionResult); // When: final Exception e = assertThrows( MaterializationException.class, () -> table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, WINDOW_END_BOUNDS) ); // Then: assertThat(e.getMessage(), containsString( "Boom")); assertThat(e, (instanceOf(MaterializationException.class))); }
static <RequestT, ResponseT> Call<RequestT, ResponseT> of( Caller<RequestT, ResponseT> caller, Coder<ResponseT> responseTCoder) { caller = SerializableUtils.ensureSerializable(caller); return new Call<>( Configuration.<RequestT, ResponseT>builder() .setCaller(caller) .setResponseCoder(responseTCoder) .build()); }
@Test public void givenCallerThrowsUserCodeExecutionException_emitsIntoFailurePCollection() { Result<Response> result = pipeline .apply(Create.of(new Request("a"))) .apply( Call.of( new CallerThrowsUserCodeExecutionException(), NON_DETERMINISTIC_RESPONSE_CODER)); PCollection<ApiIOError> failures = result.getFailures(); PAssert.thatSingleton(countStackTracesOf(failures, UserCodeExecutionException.class)) .isEqualTo(1L); PAssert.thatSingleton(countStackTracesOf(failures, UserCodeQuotaException.class)).isEqualTo(0L); PAssert.thatSingleton(countStackTracesOf(failures, UserCodeTimeoutException.class)) .isEqualTo(0L); pipeline.run(); }
@Override protected void activate() { move(0, 0, -20); playSound("GROUNDDIVE_SOUND", 5); spawnParticles("GROUNDDIVE_PARTICLE", 20); }
@Test void testActivate() throws Exception { var groundDive = new GroundDive(); var logs = tapSystemOutNormalized(groundDive::activate) .split("\n"); final var expectedSize = 3; final var log1 = logs[0].split("--")[1].trim(); final var expectedLog1 = "Move to ( 0.0, 0.0, -20.0 )"; final var log2 = getLogContent(logs[1]); final var expectedLog2 = "Play GROUNDDIVE_SOUND with volume 5"; final var log3 = getLogContent(logs[2]); final var expectedLog3 = "Spawn 20 particle with type GROUNDDIVE_PARTICLE"; assertEquals(logs.length, expectedSize); assertEquals(log1, expectedLog1); assertEquals(log2, expectedLog2); assertEquals(log3, expectedLog3); }
@Override public V remove(K key) { return map.remove(key); }
@Test(expected = MethodNotAvailableException.class) public void testRemoveWithOldValue() { adapter.remove(23, "oldValue"); }
@Override @Transactional(rollbackFor = Exception.class) public void updateJobStatus(Long id, Integer status) throws SchedulerException { // 校验 status if (!containsAny(status, JobStatusEnum.NORMAL.getStatus(), JobStatusEnum.STOP.getStatus())) { throw exception(JOB_CHANGE_STATUS_INVALID); } // 校验存在 JobDO job = validateJobExists(id); // 校验是否已经为当前状态 if (job.getStatus().equals(status)) { throw exception(JOB_CHANGE_STATUS_EQUALS); } // 更新 Job 状态 JobDO updateObj = JobDO.builder().id(id).status(status).build(); jobMapper.updateById(updateObj); // 更新状态 Job 到 Quartz 中 if (JobStatusEnum.NORMAL.getStatus().equals(status)) { // 开启 schedulerManager.resumeJob(job.getHandlerName()); } else { // 暂停 schedulerManager.pauseJob(job.getHandlerName()); } }
@Test public void testUpdateJobStatus_changeStatusEquals() { // mock 数据 JobDO job = randomPojo(JobDO.class, o -> o.setStatus(JobStatusEnum.NORMAL.getStatus())); jobMapper.insert(job); // 调用,并断言异常 assertServiceException(() -> jobService.updateJobStatus(job.getId(), job.getStatus()), JOB_CHANGE_STATUS_EQUALS); }
public EtlStatus getEtlJobStatus(SparkLoadAppHandle handle, String appId, long loadJobId, String etlOutputPath, SparkResource resource, BrokerDesc brokerDesc) throws UserException { EtlStatus status = new EtlStatus(); Preconditions.checkState(appId != null && !appId.isEmpty()); if (resource.isYarnMaster()) { // prepare yarn config String configDir = resource.prepareYarnConfig(); // yarn client path String yarnClient = resource.getYarnClientPath(); // command: yarn --config configDir application -status appId String yarnStatusCmd = String.format(YARN_STATUS_CMD, yarnClient, configDir, appId); LOG.info(yarnStatusCmd); String[] envp = {"LC_ALL=" + Config.locale, "JAVA_HOME=" + System.getProperty("java.home")}; CommandResult result = Util.executeCommand(yarnStatusCmd, envp, EXEC_CMD_TIMEOUT_MS); if (result.getReturnCode() != 0) { String stderr = result.getStderr(); // case application not exists if (stderr != null && stderr.contains("doesn't exist in RM")) { LOG.warn("spark application not found. spark app id: {}, load job id: {}, stderr: {}", appId, loadJobId, stderr); status.setState(TEtlState.CANCELLED); status.setFailMsg("spark application not found"); return status; } LOG.warn("yarn application status failed. spark app id: {}, load job id: {}, timeout: {}" + ", return code: {}, stderr: {}, stdout: {}", appId, loadJobId, EXEC_CMD_TIMEOUT_MS, result.getReturnCode(), stderr, result.getStdout()); throw new LoadException("yarn application status failed. error: " + stderr); } ApplicationReport report = new YarnApplicationReport(result.getStdout()).getReport(); LOG.info("yarn application -status {}. load job id: {}, output: {}, report: {}", appId, loadJobId, result.getStdout(), report); YarnApplicationState state = report.getYarnApplicationState(); FinalApplicationStatus faStatus = report.getFinalApplicationStatus(); status.setState(fromYarnState(state, faStatus)); if (status.getState() == TEtlState.CANCELLED) { if (state == YarnApplicationState.FINISHED) { status.setFailMsg("spark app state: " + faStatus.toString()); } else { status.setFailMsg("yarn app state: " + state.toString()); } } status.setTrackingUrl(handle.getUrl() != null ? handle.getUrl() : report.getTrackingUrl()); status.setProgress((int) (report.getProgress() * 100)); } else { // state from handle if (handle == null) { status.setFailMsg("spark app handle is null"); status.setState(TEtlState.CANCELLED); return status; } State state = handle.getState(); status.setState(fromSparkState(state)); if (status.getState() == TEtlState.CANCELLED) { status.setFailMsg("spark app state: " + state.toString()); } LOG.info("spark app id: {}, load job id: {}, app state: {}", appId, loadJobId, state); } if (status.getState() == TEtlState.FINISHED || status.getState() == TEtlState.CANCELLED) { // get dpp result String dppResultFilePath = EtlJobConfig.getDppResultFilePath(etlOutputPath); try { byte[] data; if (brokerDesc.hasBroker()) { data = BrokerUtil.readFile(dppResultFilePath, brokerDesc); } else { data = HdfsUtil.readFile(dppResultFilePath, brokerDesc); } String dppResultStr = new String(data, StandardCharsets.UTF_8); DppResult dppResult = new Gson().fromJson(dppResultStr, DppResult.class); if (dppResult != null) { status.setDppResult(dppResult); if (status.getState() == TEtlState.CANCELLED && !Strings.isNullOrEmpty(dppResult.failedReason)) { status.setFailMsg(dppResult.failedReason); } } } catch (UserException | JsonSyntaxException e) { LOG.warn("read broker file failed. path: {}", dppResultFilePath, e); } } return status; }
@Test public void testGetEtlJobStatus(@Mocked BrokerUtil brokerUtil, @Mocked Util util, @Mocked CommandResult commandResult, @Mocked SparkYarnConfigFiles sparkYarnConfigFiles, @Mocked SparkLoadAppHandle handle) throws IOException, UserException { new Expectations() { { sparkYarnConfigFiles.prepare(); sparkYarnConfigFiles.getConfigDir(); result = "./yarn_config"; commandResult.getReturnCode(); result = 0; commandResult.getStdout(); returns(runningReport, runningReport, failedReport, failedReport, finishReport, finishReport); handle.getUrl(); result = trackingUrl; } }; new Expectations() { { Util.executeCommand(anyString, (String[]) any, anyLong); minTimes = 0; result = commandResult; BrokerUtil.readFile(anyString, (BrokerDesc) any); result = "{'normal_rows': 10, 'abnormal_rows': 0, 'failed_reason': 'etl job failed'}"; } }; SparkResource resource = new SparkResource(resourceName); Map<String, String> sparkConfigs = resource.getSparkConfigs(); sparkConfigs.put("spark.master", "yarn"); sparkConfigs.put("spark.submit.deployMode", "cluster"); sparkConfigs.put("spark.hadoop.yarn.resourcemanager.address", "127.0.0.1:9999"); new Expectations(resource) { { resource.getYarnClientPath(); result = Config.yarn_client_path; } }; BrokerDesc brokerDesc = new BrokerDesc(broker, Maps.newHashMap()); SparkEtlJobHandler handler = new SparkEtlJobHandler(); // running EtlStatus status = handler.getEtlJobStatus(handle, appId, loadJobId, etlOutputPath, resource, brokerDesc); Assert.assertEquals(TEtlState.RUNNING, status.getState()); Assert.assertEquals(50, status.getProgress()); Assert.assertEquals(trackingUrl, status.getTrackingUrl()); // yarn finished and spark failed status = handler.getEtlJobStatus(handle, appId, loadJobId, etlOutputPath, resource, brokerDesc); Assert.assertEquals(TEtlState.CANCELLED, status.getState()); Assert.assertEquals(100, status.getProgress()); Assert.assertEquals("etl job failed", status.getDppResult().failedReason); // finished status = handler.getEtlJobStatus(handle, appId, loadJobId, etlOutputPath, resource, brokerDesc); Assert.assertEquals(TEtlState.FINISHED, status.getState()); Assert.assertEquals(100, status.getProgress()); Assert.assertEquals(trackingUrl, status.getTrackingUrl()); Assert.assertEquals(10, status.getDppResult().normalRows); Assert.assertEquals(0, status.getDppResult().abnormalRows); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testTheatreOfBlood() { when(client.getVarbitValue(Varbits.THEATRE_OF_BLOOD_ORB1)).thenReturn(1); when(client.getVarbitValue(Varbits.THEATRE_OF_BLOOD_ORB2)).thenReturn(15); ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Wave 'The Final Challenge' (Normal Mode) complete!<br>" + "Duration: <col=ff0000>2:42.0</col><br>" + "Theatre of Blood completion time: <col=ff0000>17:00.20</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Theatre of Blood total completion time: <col=ff0000>24:40.20</col>. Personal best: 20:45.00", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your completed Theatre of Blood count is: <col=ff0000>73</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("killcount", "theatre of blood", 73); verify(configManager).setRSProfileConfiguration("personalbest", "theatre of blood", 17 * 60 + .2); verify(configManager).setRSProfileConfiguration("personalbest", "theatre of blood 2 players", 17 * 60 + .2); }
@SuppressWarnings("DataFlowIssue") public static CommandExecutor newInstance(final MySQLCommandPacketType commandPacketType, final CommandPacket commandPacket, final ConnectionSession connectionSession) throws SQLException { if (commandPacket instanceof SQLReceivedPacket) { log.debug("Execute packet type: {}, sql: {}", commandPacketType, ((SQLReceivedPacket) commandPacket).getSQL()); } else { log.debug("Execute packet type: {}", commandPacketType); } switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitExecutor(); case COM_INIT_DB: return new MySQLComInitDbExecutor((MySQLComInitDbPacket) commandPacket, connectionSession); case COM_FIELD_LIST: return new MySQLComFieldListPacketExecutor((MySQLComFieldListPacket) commandPacket, connectionSession); case COM_QUERY: return new MySQLComQueryPacketExecutor((MySQLComQueryPacket) commandPacket, connectionSession); case COM_PING: return new MySQLComPingExecutor(connectionSession); case COM_STMT_PREPARE: return new MySQLComStmtPrepareExecutor((MySQLComStmtPreparePacket) commandPacket, connectionSession); case COM_STMT_EXECUTE: return new MySQLComStmtExecuteExecutor((MySQLComStmtExecutePacket) commandPacket, connectionSession); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataExecutor((MySQLComStmtSendLongDataPacket) commandPacket, connectionSession); case COM_STMT_RESET: return new MySQLComStmtResetExecutor((MySQLComStmtResetPacket) commandPacket, connectionSession); case COM_STMT_CLOSE: return new MySQLComStmtCloseExecutor((MySQLComStmtClosePacket) commandPacket, connectionSession); case COM_SET_OPTION: return new MySQLComSetOptionExecutor((MySQLComSetOptionPacket) commandPacket, connectionSession); case COM_RESET_CONNECTION: return new MySQLComResetConnectionExecutor(connectionSession); default: return new MySQLUnsupportedCommandExecutor(commandPacketType); } }
@Test void assertNewInstanceWithComPing() throws SQLException { assertThat(MySQLCommandExecutorFactory.newInstance(MySQLCommandPacketType.COM_PING, mock(CommandPacket.class), connectionSession), instanceOf(MySQLComPingExecutor.class)); }
public FEELFnResult<String> invoke(@ParameterName("from") Object val) { if ( val == null ) { return FEELFnResult.ofResult( null ); } else { return FEELFnResult.ofResult( TypeUtil.formatValue(val, false) ); } }
@Test void invokeDurationHours() { FunctionTestUtil.assertResult(stringFunction.invoke(Duration.ofHours(9)), "PT9H"); FunctionTestUtil.assertResult(stringFunction.invoke(Duration.ofHours(200)), "P8DT8H"); FunctionTestUtil.assertResult(stringFunction.invoke(Duration.ofHours(-200)), "-P8DT8H"); }
protected Packet recognizeAndReturnXmppPacket(Element root) throws UnsupportedStanzaTypeException, IllegalArgumentException { checkNotNull(root); Packet packet = null; if (root.getName().equals(XmppConstants.IQ_QNAME)) { packet = new IQ(root); } else if (root.getName().equals(XmppConstants.MESSAGE_QNAME)) { packet = new Message(root); } else if (root.getName().equals(XmppConstants.PRESENCE_QNAME)) { packet = new Presence(root); } else { throw new UnsupportedStanzaTypeException("Unrecognized XMPP Packet"); } logger.info("XMPP Packet received\n" + root.asXML()); return packet; }
@Test public void testRecognizePacket() throws Exception { Packet iqPacket = xmppDecoder.recognizeAndReturnXmppPacket(iqElement); assertThat(iqPacket, is(instanceOf(IQ.class))); Packet messagePacket = xmppDecoder.recognizeAndReturnXmppPacket(messageElement); assertThat(messagePacket, is(instanceOf(Message.class))); Packet presencePacket = xmppDecoder.recognizeAndReturnXmppPacket(presenceElement); assertThat(presencePacket, is(instanceOf(Presence.class))); Element wrongElement = new DefaultElement("test"); try { xmppDecoder.recognizeAndReturnXmppPacket(wrongElement); } catch (Exception e) { assertThat(e, is(instanceOf(UnsupportedStanzaTypeException.class))); } }
public PDDocument createPDFFromText( Reader text ) throws IOException { PDDocument doc = new PDDocument(); createPDFFromText(doc, text); return doc; }
@Test void testLeadingTrailingSpaces() throws IOException { TextToPDF pdfCreator = new TextToPDF(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); String text = "Lorem ipsum dolor sit amet,\n" + " consectetur adipiscing \n" + "\n" + "elit. sed do eiusmod"; StringReader reader = new StringReader(text); try (PDDocument doc = pdfCreator.createPDFFromText(reader)) { doc.save(baos); } try (PDDocument doc = Loader.loadPDF(baos.toByteArray())) { assertEquals(1, doc.getNumberOfPages()); PDFTextStripper stripper = new PDFTextStripper(); stripper.setLineSeparator("\n"); stripper.setParagraphStart("\n"); assertEquals(text, stripper.getText(doc).trim()); } }
public FEELFnResult<TemporalAccessor> invoke(@ParameterName("from") String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { TemporalAccessor parsed = FEEL_TIME.parse(val); if (parsed.query(TemporalQueries.offset()) != null) { // it is an offset-zoned time, so I can know for certain an OffsetTime OffsetTime asOffSetTime = parsed.query(OffsetTime::from); return FEELFnResult.ofResult(asOffSetTime); } else if (parsed.query(TemporalQueries.zone()) == null) { // if it does not contain any zone information at all, then I know for certain is a local time. LocalTime asLocalTime = parsed.query(LocalTime::from); return FEELFnResult.ofResult(asLocalTime); } else if (parsed.query(TemporalQueries.zone()) != null) { boolean hasSeconds = timeStringWithSeconds(val); LocalTime asLocalTime = parsed.query(LocalTime::from); ZoneId zoneId = parsed.query(TemporalQueries.zone()); ZoneTime zoneTime = ZoneTime.of(asLocalTime, zoneId, hasSeconds); return FEELFnResult.ofResult(zoneTime); } return FEELFnResult.ofResult(parsed); } catch (DateTimeException e) { return manageDateTimeException(e, val); } }
@Test void invokeTimeUnitsParamsWithOffset() { FunctionTestUtil.assertResult(timeFunction.invoke(10, 43, 15, Duration.ofHours(1)), OffsetTime.of(10, 43, 15, 0, ZoneOffset.ofHours(1))); FunctionTestUtil.assertResult(timeFunction.invoke(10, 43, 15, Duration.ofHours(-1)), OffsetTime.of(10, 43, 15 , 0, ZoneOffset.ofHours(-1))); }
@Override public JWKSet getJWKSet(JWKSetCacheRefreshEvaluator refreshEvaluator, long currentTime, T context) throws KeySourceException { var jwksUrl = discoverJwksUrl(); try (var jwkSetSource = new URLBasedJWKSetSource<>(jwksUrl, new HttpRetriever(httpClient))) { return jwkSetSource.getJWKSet(null, 0, context); } catch (IOException e) { throw new RemoteKeySourceException( "failed to fetch jwks from discovery document '%s'".formatted(discoveryUrl), e); } }
@Test void getJWKSet_badCode(WireMockRuntimeInfo wm) { var discoveryUrl = URI.create(wm.getHttpBaseUrl()).resolve(DISCOVERY_PATH); var jwksUrl = URI.create(wm.getHttpBaseUrl()).resolve(JWKS_PATH); stubFor(get(DISCOVERY_PATH).willReturn(okJson("{\"jwks_uri\": \"%s\"}".formatted(jwksUrl)))); stubFor(get(JWKS_PATH).willReturn(serviceUnavailable())); var sut = new DiscoveryJwkSetSource<>(HttpClient.newHttpClient(), discoveryUrl); assertThrows(JWKSetRetrievalException.class, () -> sut.getJWKSet(null, 0, null)); }
public static AppsInfo mergeAppsInfo(ArrayList<AppInfo> appsInfo, boolean returnPartialResult) { AppsInfo allApps = new AppsInfo(); Map<String, AppInfo> federationAM = new HashMap<>(); Map<String, AppInfo> federationUAMSum = new HashMap<>(); for (AppInfo a : appsInfo) { // Check if this AppInfo is an AM if (a.getAMHostHttpAddress() != null) { // Insert in the list of AM federationAM.put(a.getAppId(), a); // Check if there are any UAM found before if (federationUAMSum.containsKey(a.getAppId())) { // Merge the current AM with the found UAM mergeAMWithUAM(a, federationUAMSum.get(a.getAppId())); // Remove the sum of the UAMs federationUAMSum.remove(a.getAppId()); } // This AppInfo is an UAM } else { if (federationAM.containsKey(a.getAppId())) { // Merge the current UAM with its own AM mergeAMWithUAM(federationAM.get(a.getAppId()), a); } else if (federationUAMSum.containsKey(a.getAppId())) { // Merge the current UAM with its own UAM and update the list of UAM federationUAMSum.put(a.getAppId(), mergeUAMWithUAM(federationUAMSum.get(a.getAppId()), a)); } else { // Insert in the list of UAM federationUAMSum.put(a.getAppId(), a); } } } // Check the remaining UAMs are depending or not from federation for (AppInfo a : federationUAMSum.values()) { if (returnPartialResult || (a.getName() != null && !(a.getName().startsWith(UnmanagedApplicationManager.APP_NAME) || a.getName().startsWith(PARTIAL_REPORT)))) { federationAM.put(a.getAppId(), a); } } allApps.addAll(new ArrayList<>(federationAM.values())); return allApps; }
@Test public void testMergeAppsFinished() { AppsInfo apps = new AppsInfo(); String amHost = "http://i_am_the_AM1:1234"; AppInfo am = new AppInfo(); am.setAppId(APPID1.toString()); am.setAMHostHttpAddress(amHost); am.setState(YarnApplicationState.FINISHED); int value = 1000; setAppInfoFinished(am, value); apps.add(am); AppInfo uam1 = new AppInfo(); uam1.setAppId(APPID1.toString()); apps.add(uam1); setAppInfoFinished(uam1, value); AppInfo uam2 = new AppInfo(); uam2.setAppId(APPID1.toString()); apps.add(uam2); setAppInfoFinished(uam2, value); // in this case the result does not change if we enable partial result AppsInfo result = RouterWebServiceUtil.mergeAppsInfo(apps.getApps(), false); Assert.assertNotNull(result); Assert.assertEquals(1, result.getApps().size()); AppInfo app = result.getApps().get(0); Assert.assertEquals(APPID1.toString(), app.getAppId()); Assert.assertEquals(amHost, app.getAMHostHttpAddress()); Assert.assertEquals(value * 3, app.getPreemptedResourceMB()); Assert.assertEquals(value * 3, app.getPreemptedResourceVCores()); Assert.assertEquals(value * 3, app.getNumNonAMContainerPreempted()); Assert.assertEquals(value * 3, app.getNumAMContainerPreempted()); Assert.assertEquals(value * 3, app.getPreemptedMemorySeconds()); Assert.assertEquals(value * 3, app.getPreemptedVcoreSeconds()); }
@Override public TenantPackageDO validTenantPackage(Long id) { TenantPackageDO tenantPackage = tenantPackageMapper.selectById(id); if (tenantPackage == null) { throw exception(TENANT_PACKAGE_NOT_EXISTS); } if (tenantPackage.getStatus().equals(CommonStatusEnum.DISABLE.getStatus())) { throw exception(TENANT_PACKAGE_DISABLE, tenantPackage.getName()); } return tenantPackage; }
@Test public void testValidTenantPackage_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> tenantPackageService.validTenantPackage(id), TENANT_PACKAGE_NOT_EXISTS); }
public static boolean inSagaBranch() { return BranchType.SAGA == getBranchType(); }
@Test public void testInSagaBranch() { RootContext.bind(DEFAULT_XID); assertThat(RootContext.inSagaBranch()).isFalse(); RootContext.bindBranchType(BranchType.SAGA); assertThat(RootContext.inSagaBranch()).isTrue(); RootContext.unbindBranchType(); assertThat(RootContext.inSagaBranch()).isFalse(); RootContext.unbind(); }
@Operation(summary = "get wid documents for rda", tags = { SwaggerConfig.ACTIVATE_RDA , SwaggerConfig.UPGRADE_LOGIN_LEVEL, SwaggerConfig.WIDCHECKER_RAISE_TO_SUB, SwaggerConfig.REQUEST_ACCOUNT_AND_APP}, operationId = "digidRdaDocuments", parameters = {@Parameter(ref = "API-V"), @Parameter(ref = "OS-T"), @Parameter(ref = "APP-V"), @Parameter(ref = "OS-V"), @Parameter(ref = "REL-T")}) @PostMapping(value = "rda/documents", produces = "application/json") @ResponseBody public AppResponse getWidDocuments(@Valid @RequestBody AppSessionRequest request) throws FlowNotDefinedException, NoSuchAlgorithmException, FlowStateNotDefinedException, IOException, SharedServiceClientException { return service.processAction(ActivationFlowFactory.TYPE, Action.AWAIT_DOCUMENTS, request); }
@Test void validateIfCorrectProcessesAreCalledGetWidDocuments() throws FlowNotDefinedException, NoSuchAlgorithmException, IOException, FlowStateNotDefinedException, SharedServiceClientException { AppSessionRequest request = new AppSessionRequest(); activationController.getWidDocuments(request); verify(flowService, times(1)).processAction(anyString(), any(Action.class), any(AppSessionRequest.class)); }
public static <T> Read<T> read(Class<T> classType) { return new AutoValue_CosmosIO_Read.Builder<T>().setClassType(classType).build(); }
@Test public void testRead() { PCollection<Family> output = pipeline.apply( CosmosIO.read(Family.class) .withContainer(CONTAINER) .withDatabase(DATABASE) .withCoder(SerializableCoder.of(Family.class))); PAssert.thatSingleton(output.apply("Count", Count.globally())).isEqualTo(4L); pipeline.run(); }
@Override public void deleteFile(Long id) throws Exception { // 校验存在 FileDO file = validateFileExists(id); // 从文件存储器中删除 FileClient client = fileConfigService.getFileClient(file.getConfigId()); Assert.notNull(client, "客户端({}) 不能为空", file.getConfigId()); client.delete(file.getPath()); // 删除记录 fileMapper.deleteById(id); }
@Test public void testDeleteFile_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> fileService.deleteFile(id), FILE_NOT_EXISTS); }
@Override public void write(int b) throws IOException { dataOut.write(b); }
@Test public void testWriteForBOffLen() throws Exception { byte[] someInput = new byte[1]; dataOutputStream.write(someInput, 0, someInput.length); verify(mockOutputStream).write(someInput, 0, someInput.length); }
static SourceOperationResponse performSplitWithApiLimit( SourceSplitRequest request, PipelineOptions options, int numBundlesLimit, long apiByteLimit) throws Exception { // Compute the desired bundle size given by the service, or default if none was provided. long desiredBundleSizeBytes = DEFAULT_DESIRED_BUNDLE_SIZE_BYTES; SourceSplitOptions splitOptions = request.getOptions(); if (splitOptions != null && splitOptions.getDesiredBundleSizeBytes() != null) { desiredBundleSizeBytes = splitOptions.getDesiredBundleSizeBytes(); } Source<?> anySource = deserializeFromCloudSource(request.getSource().getSpec()); checkArgument( anySource instanceof BoundedSource, "Cannot split a non-Bounded source: %s", anySource); return performSplitTyped( options, (BoundedSource<?>) anySource, desiredBundleSizeBytes, numBundlesLimit, apiByteLimit); }
@Test public void testSplittingProducedResponseUnderLimit() throws Exception { SourceProducingSubSourcesInSplit source = new SourceProducingSubSourcesInSplit(200, 10_000); com.google.api.services.dataflow.model.Source cloudSource = translateIOToCloudSource(source, options); SourceSplitRequest splitRequest = new SourceSplitRequest(); splitRequest.setSource(cloudSource); ExpectedLogs.LogSaver logSaver = new ExpectedLogs.LogSaver(); LogManager.getLogManager() .getLogger("org.apache.beam.runners.dataflow.worker.WorkerCustomSources") .addHandler(logSaver); WorkerCustomSources.performSplitWithApiLimit(splitRequest, options, 100, 10_000); // verify initial split is not valid verifyLogged( ExpectedLogs.matcher(Level.WARNING, "this is too large for the Google Cloud Dataflow API"), logSaver); // verify that re-bundle is effective verifyLogged(ExpectedLogs.matcher(Level.WARNING, "Re-bundle source"), logSaver); }
@Override public void processWatermarkStatus(WatermarkStatus watermarkStatus) throws Exception {}
@Test void inputStatusesAreNotForwarded() throws Exception { OneInputStreamOperatorTestHarness<Long, Long> testHarness = createTestHarness( WatermarkStrategy.forGenerator((ctx) -> new PeriodicWatermarkGenerator()) .withTimestampAssigner((ctx) -> new LongExtractor())); testHarness.processWatermarkStatus(WatermarkStatus.IDLE); testHarness.setProcessingTime(AUTO_WATERMARK_INTERVAL); assertThat(testHarness.getOutput()).isEmpty(); }
public void setErrorAndRollback(final long ntail, final Status st) { Requires.requireTrue(ntail > 0, "Invalid ntail=" + ntail); if (this.currEntry == null || this.currEntry.getType() != EnumOutter.EntryType.ENTRY_TYPE_DATA) { this.currentIndex -= ntail; } else { this.currentIndex -= ntail - 1; } if (fsmCommittedIndex >= 0) { // can't roll back before fsmCommittedIndex. this.currentIndex = Math.max(this.currentIndex, fsmCommittedIndex + 1); } this.currEntry = null; getOrCreateError().setType(EnumOutter.ErrorType.ERROR_TYPE_STATE_MACHINE); getOrCreateError().getStatus().setError(RaftError.ESTATEMACHINE, "StateMachine meet critical error when applying one or more tasks since index=%d, %s", this.currentIndex, st != null ? st.toString() : "none"); }
@Test(expected = IllegalArgumentException.class) public void testSetErrorAndRollbackInvalid() { this.iter.setErrorAndRollback(-1, null); }
@Override public String getDataSource() { return DataSourceConstant.DERBY; }
@Test void testGetDataSource() { String dataSource = configInfoTagMapperByDerby.getDataSource(); assertEquals(DataSourceConstant.DERBY, dataSource); }
public static void addBackgroundErrorsMetric(final StreamsMetricsImpl streamsMetrics, final RocksDBMetricContext metricContext, final Gauge<BigInteger> valueProvider) { addMutableMetric( streamsMetrics, metricContext, valueProvider, NUMBER_OF_BACKGROUND_ERRORS, TOTAL_NUMBER_OF_BACKGROUND_ERRORS_DESCRIPTION ); }
@Test public void shouldAddBackgroundErrorsMetric() { final String name = "background-errors"; final String description = "Total number of background errors"; runAndVerifyMutableMetric( name, description, () -> RocksDBMetrics.addBackgroundErrorsMetric(streamsMetrics, ROCKSDB_METRIC_CONTEXT, VALUE_PROVIDER) ); }
Optional<Checkpoint> checkpoint(String group, TopicPartition topicPartition, OffsetAndMetadata offsetAndMetadata) { if (offsetAndMetadata != null) { long upstreamOffset = offsetAndMetadata.offset(); OptionalLong downstreamOffset = offsetSyncStore.translateDownstream(group, topicPartition, upstreamOffset); if (downstreamOffset.isPresent()) { return Optional.of(new Checkpoint(group, renameTopicPartition(topicPartition), upstreamOffset, downstreamOffset.getAsLong(), offsetAndMetadata.metadata())); } } return Optional.empty(); }
@Test public void testCheckpoint() { long t1UpstreamOffset = 3L; long t1DownstreamOffset = 4L; long t2UpstreamOffset = 7L; long t2DownstreamOffset = 8L; OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), new CheckpointStore(Collections.emptyMap())); offsetSyncStore.sync(new TopicPartition("topic1", 2), t1UpstreamOffset, t1DownstreamOffset); offsetSyncStore.sync(new TopicPartition("target2.topic5", 6), t2UpstreamOffset, t2DownstreamOffset); Optional<Checkpoint> optionalCheckpoint1 = mirrorCheckpointTask.checkpoint("group9", new TopicPartition("topic1", 2), new OffsetAndMetadata(10, null)); assertTrue(optionalCheckpoint1.isPresent()); Checkpoint checkpoint1 = optionalCheckpoint1.get(); SourceRecord sourceRecord1 = mirrorCheckpointTask.checkpointRecord(checkpoint1, 123L); assertEquals(new TopicPartition("source1.topic1", 2), checkpoint1.topicPartition(), "checkpoint group9 source1.topic1 failed"); assertEquals("group9", checkpoint1.consumerGroupId(), "checkpoint group9 consumerGroupId failed"); assertEquals("group9", Checkpoint.unwrapGroup(sourceRecord1.sourcePartition()), "checkpoint group9 sourcePartition failed"); assertEquals(10, checkpoint1.upstreamOffset(), "checkpoint group9 upstreamOffset failed"); assertEquals(t1DownstreamOffset + 1, checkpoint1.downstreamOffset(), "checkpoint group9 downstreamOffset failed"); assertEquals(123L, sourceRecord1.timestamp().longValue(), "checkpoint group9 timestamp failed"); Optional<Checkpoint> optionalCheckpoint2 = mirrorCheckpointTask.checkpoint("group11", new TopicPartition("target2.topic5", 6), new OffsetAndMetadata(12, null)); assertTrue(optionalCheckpoint2.isPresent()); Checkpoint checkpoint2 = optionalCheckpoint2.get(); SourceRecord sourceRecord2 = mirrorCheckpointTask.checkpointRecord(checkpoint2, 234L); assertEquals(new TopicPartition("topic5", 6), checkpoint2.topicPartition(), "checkpoint group11 topic5 failed"); assertEquals("group11", checkpoint2.consumerGroupId(), "checkpoint group11 consumerGroupId failed"); assertEquals("group11", Checkpoint.unwrapGroup(sourceRecord2.sourcePartition()), "checkpoint group11 sourcePartition failed"); assertEquals(12, checkpoint2.upstreamOffset(), "checkpoint group11 upstreamOffset failed"); assertEquals(t2DownstreamOffset + 1, checkpoint2.downstreamOffset(), "checkpoint group11 downstreamOffset failed"); assertEquals(234L, sourceRecord2.timestamp().longValue(), "checkpoint group11 timestamp failed"); Optional<Checkpoint> optionalCheckpoint3 = mirrorCheckpointTask.checkpoint("group13", new TopicPartition("target2.topic5", 6), new OffsetAndMetadata(7, null)); assertTrue(optionalCheckpoint3.isPresent()); Checkpoint checkpoint3 = optionalCheckpoint3.get(); SourceRecord sourceRecord3 = mirrorCheckpointTask.checkpointRecord(checkpoint3, 234L); assertEquals(new TopicPartition("topic5", 6), checkpoint3.topicPartition(), "checkpoint group13 topic5 failed"); assertEquals("group13", checkpoint3.consumerGroupId(), "checkpoint group13 consumerGroupId failed"); assertEquals("group13", Checkpoint.unwrapGroup(sourceRecord3.sourcePartition()), "checkpoint group13 sourcePartition failed"); assertEquals(t2UpstreamOffset, checkpoint3.upstreamOffset(), "checkpoint group13 upstreamOffset failed"); assertEquals(t2DownstreamOffset, checkpoint3.downstreamOffset(), "checkpoint group13 downstreamOffset failed"); assertEquals(234L, sourceRecord3.timestamp().longValue(), "checkpoint group13 timestamp failed"); }
@Override public void onEvent(MembersChangeEvent event) { try { List<Member> members = serverMemberManager.allMembersWithoutSelf(); refresh(members); } catch (NacosException e) { Loggers.CLUSTER.warn("[serverlist] fail to refresh cluster rpc client, event:{}, msg: {} ", event, e.getMessage()); } }
@Test void testOnEvent() { try { clusterRpcClientProxy.onEvent(MembersChangeEvent.builder().build()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } }
@Override public void transform(Message message, DataType fromType, DataType toType) { if (message.getHeaders().containsKey(Ddb2Constants.ITEM) || message.getHeaders().containsKey(Ddb2Constants.KEY)) { return; } JsonNode jsonBody = getBodyAsJsonNode(message); String operation = Optional.ofNullable(jsonBody.get("operation")).map(JsonNode::asText).orElse(Ddb2Operations.PutItem.name()); if (message.getExchange().hasProperties() && message.getExchange().getProperty("operation", String.class) != null) { operation = message.getExchange().getProperty("operation", String.class); } if (message.getHeaders().containsKey(Ddb2Constants.OPERATION)) { operation = message.getHeader(Ddb2Constants.OPERATION, Ddb2Operations.class).name(); } JsonNode key = jsonBody.get("key"); JsonNode item = jsonBody.get("item"); Map<String, Object> keyProps; if (key != null) { keyProps = dataFormat.getObjectMapper().convertValue(key, new TypeReference<>() { }); } else { keyProps = dataFormat.getObjectMapper().convertValue(jsonBody, new TypeReference<>() { }); } Map<String, Object> itemProps; if (item != null) { itemProps = dataFormat.getObjectMapper().convertValue(item, new TypeReference<>() { }); } else { itemProps = keyProps; } final Map<String, AttributeValue> keyMap = getAttributeValueMap(keyProps); switch (Ddb2Operations.valueOf(operation)) { case PutItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem); message.setHeader(Ddb2Constants.ITEM, getAttributeValueMap(itemProps)); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message); break; case UpdateItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.UpdateItem); message.setHeader(Ddb2Constants.KEY, keyMap); message.setHeader(Ddb2Constants.UPDATE_VALUES, getAttributeValueUpdateMap(itemProps)); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_NEW.toString(), message); break; case DeleteItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.DeleteItem); message.setHeader(Ddb2Constants.KEY, keyMap); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message); break; default: throw new UnsupportedOperationException(String.format("Unsupported operation '%s'", operation)); } }
@Test void shouldFailForWrongBodyType() throws Exception { Exchange exchange = new DefaultExchange(camelContext); exchange.getMessage().setBody("Hello"); Assertions.assertThrows(CamelExecutionException.class, () -> transformer.transform(exchange.getMessage(), DataType.ANY, new DataType(AWS_2_DDB_APPLICATION_JSON_TRANSFORMER))); }
@Override public Tcp clone() throws CloneNotSupportedException { return new Tcp(); }
@Test void testClone() throws CloneNotSupportedException { Tcp original = new Tcp(); Tcp cloned = original.clone(); assertEquals(original.hashCode(), cloned.hashCode()); assertEquals(original, cloned); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void sessionWindowNamedMaterializedCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.stream("input-topic") .groupByKey() .windowedBy(SessionWindows.with(ofMillis(1))) .count(Materialized.<Object, Long, SessionStore<Bytes, byte[]>>as("count-store") .withStoreType(Materialized.StoreType.IN_MEMORY)); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000001\n" + " Processor: KSTREAM-AGGREGATE-0000000001 (stores: [count-store])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false)); }