focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Response noContent() { stream.setStatus(204); return this; }
@Test public void test_noContent() throws Exception { underTest.noContent(); verify(response).setStatus(204); verify(response, never()).getOutputStream(); }
public static <E> List<E> executePaginatedRequest(String request, OAuth20Service scribe, OAuth2AccessToken accessToken, Function<String, List<E>> function) { List<E> result = new ArrayList<>(); readPage(result, scribe, accessToken, addPerPageQueryParameter(request, DEFAULT_PAGE_SIZE), function); return result; }
@Test public void fail_to_executed_paginated_request() { mockWebServer.enqueue(new MockResponse() .setHeader("Link", "<" + serverUrl + "/test?per_page=100&page=2>; rel=\"next\", <" + serverUrl + "/test?per_page=100&page=2>; rel=\"last\"") .setBody("A")); mockWebServer.enqueue(new MockResponse().setResponseCode(404).setBody("Error!")); assertThatThrownBy(() -> executePaginatedRequest(serverUrl + "/test", oAuth20Service, auth2AccessToken, Arrays::asList)) .isInstanceOf(IllegalStateException.class) .hasMessage(format("Fail to execute request '%s/test?per_page=100&page=2'. HTTP code: 404, response: Error!", serverUrl)); }
@Override public void assign(Collection<TopicPartition> partitions) { acquireAndEnsureOpen(); try { if (partitions == null) { throw new IllegalArgumentException("Topic partitions collection to assign to cannot be null"); } if (partitions.isEmpty()) { unsubscribe(); return; } for (TopicPartition tp : partitions) { String topic = (tp != null) ? tp.topic() : null; if (isBlank(topic)) throw new IllegalArgumentException("Topic partitions to assign to cannot have null or empty topic"); } // Clear the buffered data which are not a part of newly assigned topics final Set<TopicPartition> currentTopicPartitions = new HashSet<>(); for (TopicPartition tp : subscriptions.assignedPartitions()) { if (partitions.contains(tp)) currentTopicPartitions.add(tp); } fetchBuffer.retainAll(currentTopicPartitions); // assignment change event will trigger autocommit if it is configured and the group id is specified. This is // to make sure offsets of topic partitions the consumer is unsubscribing from are committed since there will // be no following rebalance. // // See the ApplicationEventProcessor.process() method that handles this event for more detail. applicationEventHandler.add(new AssignmentChangeEvent(subscriptions.allConsumed(), time.milliseconds())); log.info("Assigned to partition(s): {}", partitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); if (subscriptions.assignFromUser(new HashSet<>(partitions))) applicationEventHandler.add(new NewTopicsMetadataUpdateRequestEvent()); } finally { release(); } }
@Test public void testAssignOnNullTopicPartition() { consumer = newConsumer(); assertThrows(IllegalArgumentException.class, () -> consumer.assign(null)); }
@Override public NullsOrderType getDefaultNullsOrderType() { return NullsOrderType.FIRST; }
@Test void assertGetDefaultNullsOrderType() { assertThat(dialectDatabaseMetaData.getDefaultNullsOrderType(), is(NullsOrderType.FIRST)); }
public static Enabled enabled(XmlPullParser parser) throws XmlPullParserException, IOException { ParserUtils.assertAtStartTag(parser); boolean resume = ParserUtils.getBooleanAttribute(parser, "resume", false); String id = parser.getAttributeValue("", "id"); String location = parser.getAttributeValue("", "location"); int max = ParserUtils.getIntegerAttribute(parser, "max", -1); parser.next(); ParserUtils.assertAtEndTag(parser); return new Enabled(id, resume, location, max); }
@Test public void testParseEnabled() throws Exception { String stanzaID = "zid615d9"; boolean resume = true; String location = "test"; int max = 42; String enabledStanza = XMLBuilder.create("enabled") .a("xmlns", "urn:xmpp:sm:3") .a("id", "zid615d9") .a("resume", String.valueOf(resume)) .a("location", location) .a("max", String.valueOf(max)) .asString(outputProperties); StreamManagement.Enabled enabledPacket = ParseStreamManagement.enabled( PacketParserUtils.getParserFor(enabledStanza)); assertNotNull(enabledPacket); assertEquals(enabledPacket.getId(), stanzaID); assertEquals(location, enabledPacket.getLocation()); assertEquals(resume, enabledPacket.isResumeSet()); assertEquals(max, enabledPacket.getMaxResumptionTime()); }
@SuppressWarnings("UnstableApiUsage") @Override public Stream<ColumnName> resolveSelectStar( final Optional<SourceName> sourceName ) { final Stream<ColumnName> names = Stream.of(left, right) .flatMap(JoinNode::getPreJoinProjectDataSources) .filter(s -> !sourceName.isPresent() || sourceName.equals(s.getSourceName())) .flatMap(s -> s.resolveSelectStar(sourceName)); if (sourceName.isPresent() || !joinKey.isSynthetic() || !finalJoin) { return names; } // if we use a synthetic key, we know there's only a single key element final Column syntheticKey = getOnlyElement(getSchema().key()); return Streams.concat(Stream.of(syntheticKey.name()), names); }
@Test public void shouldResolveUnaliasedSelectStarWithMultipleJoins() { // Given: final JoinNode inner = new JoinNode(new PlanNodeId("foo"), LEFT, joinKey, true, right, right2, empty(), "KAFKA"); final JoinNode joinNode = new JoinNode(nodeId, LEFT, joinKey, true, left, inner, empty(), "KAFKA"); when(left.resolveSelectStar(any())).thenReturn(Stream.of(ColumnName.of("l"))); when(right.resolveSelectStar(any())).thenReturn(Stream.of(ColumnName.of("r"))); when(right2.resolveSelectStar(any())).thenReturn(Stream.of(ColumnName.of("r2"))); // When: final Stream<ColumnName> result = joinNode.resolveSelectStar(empty()); // Then: final List<ColumnName> columns = result.collect(Collectors.toList()); assertThat(columns, contains(ColumnName.of("l"), ColumnName.of("r"), ColumnName.of("r2"))); verify(left).resolveSelectStar(empty()); verify(right).resolveSelectStar(empty()); verify(right2).resolveSelectStar(empty()); }
@Override public Long createLevel(MemberLevelCreateReqVO createReqVO) { // 校验配置是否有效 validateConfigValid(null, createReqVO.getName(), createReqVO.getLevel(), createReqVO.getExperience()); // 插入 MemberLevelDO level = MemberLevelConvert.INSTANCE.convert(createReqVO); memberLevelMapper.insert(level); // 返回 return level.getId(); }
@Test public void testCreateLevel_success() { // 准备参数 MemberLevelCreateReqVO reqVO = randomPojo(MemberLevelCreateReqVO.class, o -> { o.setDiscountPercent(randomInt()); o.setIcon(randomURL()); o.setBackgroundUrl(randomURL()); o.setStatus(randomCommonStatus()); }); // 调用 Long levelId = levelService.createLevel(reqVO); // 断言 assertNotNull(levelId); // 校验记录的属性是否正确 MemberLevelDO level = memberlevelMapper.selectById(levelId); assertPojoEquals(reqVO, level); }
public static Logger empty() { return EMPTY_LOGGER; }
@Test public void loggersReturnsEmptyInstance() { Logger logger = Loggers.empty(); assertThat(logger, instanceOf(EmptyLogger.class)); }
@Override public void commitSync() { commitSync(Duration.ofMillis(defaultApiTimeoutMs)); }
@Test public void testInterceptorCommitSync() { Properties props = requiredConsumerConfigAndGroupId("test-id"); props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName()); props.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false"); consumer = newConsumer(props); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); completeCommitSyncApplicationEventSuccessfully(); consumer.commitSync(mockTopicPartitionOffset()); assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get()); }
public static boolean hasLeadership(KubernetesConfigMap configMap, String lockIdentity) { final String leader = configMap.getAnnotations().get(LEADER_ANNOTATION_KEY); return leader != null && leader.contains(lockIdentity); }
@Test void testAnnotationMatched() { leaderConfigMap .getAnnotations() .put(LEADER_ANNOTATION_KEY, "other information " + lockIdentity); assertThat(KubernetesLeaderElector.hasLeadership(leaderConfigMap, lockIdentity)).isTrue(); }
@Override public Path copy(final Path source, final Path target, final TransferStatus status, final ConnectionCallback callback, final StreamListener listener) throws BackgroundException { return copy(source, segmentService.list(source), target, status, callback, listener); }
@Test public void testCopyManifestDifferentBucket() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final Path originFolder = new Path(container, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)); final Path sourceFile = new Path(originFolder, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final SwiftRegionService regionService = new SwiftRegionService(session); final SwiftSegmentService segmentService = new SwiftSegmentService(session, ".segments-test/"); prepareFile(sourceFile, regionService, segmentService); final SwiftFindFeature findFeature = new SwiftFindFeature(session); assertTrue(findFeature.find(sourceFile)); final List<Path> sourceSegments = segmentService.list(sourceFile); final Path targetBucket = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); targetBucket.attributes().setRegion("IAD"); final Path targetFolder = new Path(targetBucket, UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)); final Path targetFile = new Path(targetFolder, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final Path copiedFile = new SwiftDefaultCopyFeature(session, regionService) .copy(sourceFile, targetFile, new TransferStatus(), new DisabledConnectionCallback(), new DisabledStreamListener()); // copied file exists assertTrue(findFeature.find(copiedFile)); final List<Path> targetSegments = segmentService.list(targetFile); assertTrue(sourceSegments.containsAll(targetSegments) && targetSegments.containsAll(sourceSegments)); // delete source, without deleting segments new SwiftDeleteFeature(session, segmentService, regionService).delete( Collections.singletonMap(sourceFile, new TransferStatus()), new DisabledPasswordCallback(), new Delete.DisabledCallback(), false); assertFalse(findFeature.find(sourceFile)); assertTrue(targetSegments.stream().allMatch(p -> { try { return findFeature.find(p); } catch(BackgroundException e) { return false; } })); new SwiftDeleteFeature(session, segmentService, regionService).delete( Collections.singletonMap(copiedFile, new TransferStatus()), new DisabledPasswordCallback(), new Delete.DisabledCallback(), true); assertFalse(findFeature.find(copiedFile)); }
@VisibleForTesting Map<String, String> generateApplicationMasterEnv( final YarnApplicationFileUploader fileUploader, final String classPathStr, final String localFlinkJarStr, final String appIdStr) throws IOException { final Map<String, String> env = new HashMap<>(); // set user specified app master environment variables env.putAll( ConfigurationUtils.getPrefixedKeyValuePairs( ResourceManagerOptions.CONTAINERIZED_MASTER_ENV_PREFIX, this.flinkConfiguration)); // set Flink app class path env.put(ENV_FLINK_CLASSPATH, classPathStr); // Set FLINK_LIB_DIR to `lib` folder under working dir in container env.put(ENV_FLINK_LIB_DIR, Path.CUR_DIR + "/" + ConfigConstants.DEFAULT_FLINK_LIB_DIR); // Set FLINK_OPT_DIR to `opt` folder under working dir in container env.put(ENV_FLINK_OPT_DIR, Path.CUR_DIR + "/" + ConfigConstants.DEFAULT_FLINK_OPT_DIR); // set Flink on YARN internal configuration values env.put(YarnConfigKeys.FLINK_DIST_JAR, localFlinkJarStr); env.put(YarnConfigKeys.ENV_APP_ID, appIdStr); env.put(YarnConfigKeys.ENV_CLIENT_HOME_DIR, fileUploader.getHomeDir().toString()); env.put( YarnConfigKeys.ENV_CLIENT_SHIP_FILES, encodeYarnLocalResourceDescriptorListToString( fileUploader.getEnvShipResourceList())); env.put( YarnConfigKeys.FLINK_YARN_FILES, fileUploader.getApplicationDir().toUri().toString()); // https://github.com/apache/hadoop/blob/trunk/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/markdown/YarnApplicationSecurity.md#identity-on-an-insecure-cluster-hadoop_user_name env.put( YarnConfigKeys.ENV_HADOOP_USER_NAME, UserGroupInformation.getCurrentUser().getUserName()); // set classpath from YARN configuration Utils.setupYarnClassPath(this.yarnConfiguration, env); return env; }
@Test public void testGenerateApplicationMasterEnv(@TempDir File flinkHomeDir) throws IOException { final String fakeLocalFlinkJar = "./lib/flink_dist.jar"; final String fakeClassPath = fakeLocalFlinkJar + ":./usrlib/user.jar"; final ApplicationId appId = ApplicationId.newInstance(0, 0); final Map<String, String> masterEnv = getTestMasterEnv( new Configuration(), flinkHomeDir, fakeClassPath, fakeLocalFlinkJar, appId); assertThat(masterEnv) .containsEntry(ConfigConstants.ENV_FLINK_LIB_DIR, "./lib") .containsEntry(YarnConfigKeys.ENV_APP_ID, appId.toString()) .containsEntry( YarnConfigKeys.FLINK_YARN_FILES, YarnApplicationFileUploader.getApplicationDirPath( new Path(flinkHomeDir.getPath()), appId) .toString()) .containsEntry(YarnConfigKeys.ENV_FLINK_CLASSPATH, fakeClassPath); assertThat(masterEnv.get(YarnConfigKeys.ENV_CLIENT_SHIP_FILES)).isEmpty(); assertThat(masterEnv) .containsEntry(YarnConfigKeys.FLINK_DIST_JAR, fakeLocalFlinkJar) .containsEntry(YarnConfigKeys.ENV_CLIENT_HOME_DIR, flinkHomeDir.getPath()); }
public static String replaceFirst(String source, String search, String replace) { int start = source.indexOf(search); int len = search.length(); if (start == -1) { return source; } if (start == 0) { return replace + source.substring(len); } return source.substring(0, start) + replace + source.substring(start + len); }
@Test public void testReplace3() { assertEquals("abcxyz", JOrphanUtils.replaceFirst("abcdef", "def", "xyz")); }
public Properties getProperties() { return properties; }
@Test public void testHibernateProperties() { assertNull(Configuration.INSTANCE.getProperties().getProperty("hibernate.types.nothing")); assertEquals("def", Configuration.INSTANCE.getProperties().getProperty("hibernate.types.abc")); }
public static String from(Path path) { return from(path.toString()); }
@Test void testHtmlContentType() { assertThat(ContentType.from(Path.of("index.html"))).isEqualTo(TEXT_HTML); }
public static Date getNextExecutionDate(Period period) { // calcule de la date de prochaine exécution (le dimanche à minuit) final Calendar calendar = Calendar.getInstance(); calendar.set(Calendar.HOUR_OF_DAY, 0); calendar.set(Calendar.MINUTE, 0); calendar.set(Calendar.SECOND, 0); calendar.set(Calendar.MILLISECOND, 0); switch (period) { case JOUR: calendar.add(Calendar.DAY_OF_YEAR, 1); break; case SEMAINE: calendar.set(Calendar.DAY_OF_WEEK, Calendar.SUNDAY); // pour le cas où on est déjà dimanche, alors on prend dimanche prochain if (calendar.getTimeInMillis() < System.currentTimeMillis()) { // on utilise add et non roll pour ne pas tourner en boucle le 31/12 calendar.add(Calendar.DAY_OF_YEAR, 7); } break; case MOIS: calendar.set(Calendar.DAY_OF_MONTH, 1); // pour le cas où est on déjà le premier du mois, alors on prend le mois prochain if (calendar.getTimeInMillis() < System.currentTimeMillis()) { // on utilise add et non roll pour ne pas tourner en boucle le 31/12 calendar.add(Calendar.MONTH, 1); } break; case ANNEE: throw new IllegalArgumentException(String.valueOf(period)); case TOUT: throw new IllegalArgumentException(String.valueOf(period)); default: throw new IllegalArgumentException(String.valueOf(period)); } return calendar.getTime(); }
@Test public void testGetNextExecutionDate() { assertNotNull("getNextExecutionDate", MailReport.getNextExecutionDate(Period.JOUR)); assertNotNull("getNextExecutionDate", MailReport.getNextExecutionDate(Period.SEMAINE)); assertNotNull("getNextExecutionDate", MailReport.getNextExecutionDate(Period.MOIS)); }
public ImmutableList<PluginMatchingResult<VulnDetector>> getVulnDetectors( ReconnaissanceReport reconnaissanceReport) { return tsunamiPlugins.entrySet().stream() .filter(entry -> isVulnDetector(entry.getKey())) .map(entry -> matchAllVulnDetectors(entry.getKey(), entry.getValue(), reconnaissanceReport)) .flatMap(Streams::stream) .collect(toImmutableList()); }
@Test public void getVulnDetectors_whenRemoteDetectorServiceNameFilterHasMatchingService_returnsMatchedService() { NetworkService httpService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 80)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("http") .build(); NetworkService httpsService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 443)) .setTransportProtocol(TransportProtocol.TCP) .setServiceName("https") .build(); NetworkService noNameService = NetworkService.newBuilder() .setNetworkEndpoint(NetworkEndpointUtils.forIpAndPort("1.1.1.1", 12345)) .setTransportProtocol(TransportProtocol.TCP) .build(); ReconnaissanceReport fakeReconnaissanceReport = ReconnaissanceReport.newBuilder() .setTargetInfo(TargetInfo.getDefaultInstance()) .addNetworkServices(httpService) .addNetworkServices(httpsService) .addNetworkServices(noNameService) .build(); PluginManager pluginManager = Guice.createInjector( new FakePortScannerBootstrapModule(), new FakeServiceFingerprinterBootstrapModule(), FakeFilteringRemoteDetector.getModule()) .getInstance(PluginManager.class); ImmutableList<PluginMatchingResult<VulnDetector>> vulnDetectors = pluginManager.getVulnDetectors(fakeReconnaissanceReport); assertThat(vulnDetectors).hasSize(1); ImmutableList<MatchedPlugin> matchedResult = ((FakeFilteringRemoteDetector) vulnDetectors.get(0).tsunamiPlugin()).getMatchedPlugins(); assertThat(matchedResult).isNotEmpty(); assertThat(matchedResult.get(0).getPlugin()) .isEqualTo(FakeFilteringRemoteDetector.getHttpServiceDefinition()); assertThat(matchedResult.get(0).getServicesList()).containsExactly(httpService, noNameService); }
@Override public ResultSet getFunctions(final String catalog, final String schemaPattern, final String functionNamePattern) { return null; }
@Test void assertGetFunctions() { assertNull(metaData.getFunctions("", "", "")); }
@Override public void reportErrorAndInvalidate(String bundleSymbolicName, List<String> messages) { try { pluginRegistry.markPluginInvalid(bundleSymbolicName, messages); } catch (Exception e) { LOGGER.warn("[Plugin Health Service] Plugin with id '{}' tried to report health with message '{}' but Go is unaware of this plugin.", bundleSymbolicName, messages, e); } }
@Test void shouldMarkPluginAsInvalidWhenServiceReportsAnError() { String bundleSymbolicName = "plugin-id"; String message = "plugin is broken beyond repair"; List<String> reasons = List.of(message); doNothing().when(pluginRegistry).markPluginInvalid(bundleSymbolicName, reasons); serviceDefault.reportErrorAndInvalidate(bundleSymbolicName, reasons); verify(pluginRegistry).markPluginInvalid(bundleSymbolicName, reasons); }
static Map<String, ValueExtractor> instantiateExtractors(List<AttributeConfig> attributeConfigs, ClassLoader classLoader) { Map<String, ValueExtractor> extractors = createHashMap(attributeConfigs.size()); for (AttributeConfig config : attributeConfigs) { if (extractors.containsKey(config.getName())) { throw new IllegalArgumentException("Could not add " + config + ". Extractor for this attribute name already added."); } extractors.put(config.getName(), instantiateExtractor(config, classLoader)); } return extractors; }
@Test public void instantiate_extractors_accessException() { // GIVEN AttributeConfig string = new AttributeConfig("iq", "com.hazelcast.query.impl.getters.ExtractorHelperTest$AccessExceptionExtractor"); // WHEN assertThatThrownBy(() -> instantiateExtractors(singletonList(string))) .isInstanceOf(IllegalArgumentException.class); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { if(!new LocalFindFeature(session).find(file)) { throw new NotfoundException(file.getAbsolute()); } if(status.isExists()) { new LocalDeleteFeature(session).delete(Collections.singletonMap(renamed, status), new DisabledPasswordCallback(), callback); } if(!session.toPath(file).toFile().renameTo(session.toPath(renamed).toFile())) { throw new LocalExceptionMappingService().map("Cannot rename {0}", new NoSuchFileException(file.getName()), file); } return renamed; }
@Test public void testMoveFile() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path workdir = new LocalHomeFinderFeature().find(); final Path test = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(test, new TransferStatus()); final Path target = new Path(workdir, UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalMoveFeature(session).move(test, target, new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new LocalFindFeature(session).find(test)); assertTrue(new LocalFindFeature(session).find(target)); new LocalDeleteFeature(session).delete(Collections.<Path>singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static ManagedTransform write(String sink) { return new AutoValue_Managed_ManagedTransform.Builder() .setIdentifier( Preconditions.checkNotNull( WRITE_TRANSFORMS.get(sink.toLowerCase()), "An unsupported sink was specified: '%s'. Please specify one of the following sinks: %s", sink, WRITE_TRANSFORMS.keySet())) .setSupportedIdentifiers(new ArrayList<>(WRITE_TRANSFORMS.values())) .build(); }
@Test public void testManagedTestProviderWithConfigMap() { Managed.ManagedTransform writeOp = Managed.write(Managed.ICEBERG) .toBuilder() .setIdentifier(TestSchemaTransformProvider.IDENTIFIER) .build() .withSupportedIdentifiers(Arrays.asList(TestSchemaTransformProvider.IDENTIFIER)) .withConfig(ImmutableMap.of("extra_string", "abc", "extra_integer", 123)); runTestProviderTest(writeOp); }
public List<String> generate(String tableName, String columnName, boolean isAutoGenerated) throws SQLException { return generate(tableName, singleton(columnName), isAutoGenerated); }
@Test public void generate_for_ms_sql() throws SQLException { when(dbConstraintFinder.findConstraintName(TABLE_NAME)).thenReturn(Optional.of(CONSTRAINT)); when(db.getDialect()).thenReturn(MS_SQL); List<String> sqls = underTest.generate(TABLE_NAME, PK_COLUMN, true); assertThat(sqls).containsExactly("ALTER TABLE issues DROP CONSTRAINT pk_id"); }
@Override public DataSource get() { if (highAvailableDataSource == null) { return null; } Map<String, DataSource> dataSourceMap = highAvailableDataSource.getAvailableDataSourceMap(); if (dataSourceMap == null || dataSourceMap.isEmpty()) { return null; } if (dataSourceMap.size() == 1) { for (DataSource v : dataSourceMap.values()) { return v; } } String name = getTarget(); if (name == null) { if (dataSourceMap.get(getDefaultName()) != null) { return dataSourceMap.get(getDefaultName()); } } else { return dataSourceMap.get(name); } return null; }
@Test public void testEmptyMap() { dataSourceMap.clear(); NamedDataSourceSelector selector = new NamedDataSourceSelector(null); assertNull(selector.get()); selector = new NamedDataSourceSelector(dataSource); assertNull(selector.get()); }
public final void tag(I input, ScopedSpan span) { if (input == null) throw new NullPointerException("input == null"); if (span == null) throw new NullPointerException("span == null"); if (span.isNoop()) return; tag(span, input, span.context()); }
@Test void tag_span_empty() { when(parseValue.apply(input, context)).thenReturn(""); tag.tag(input, span); verify(span).context(); verify(span).isNoop(); verify(parseValue).apply(input, context); verifyNoMoreInteractions(parseValue); // doesn't parse twice verify(span).tag("key", ""); verifyNoMoreInteractions(span); // doesn't tag twice }
public Fetch<K, V> collectFetch(final FetchBuffer fetchBuffer) { final Fetch<K, V> fetch = Fetch.empty(); final Queue<CompletedFetch> pausedCompletedFetches = new ArrayDeque<>(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final CompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final CompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) break; if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { // Remove a completedFetch upon a parse with exception if (1) it contains no completedFetch, and // (2) there are no fetched completedFetch with actual content preceding this exception. // The first condition ensures that the completedFetches is not stuck with the same completedFetch // in cases such as the TopicAuthorizationException, and the second condition ensures that no // potential data loss due to an exception in a following record. if (fetch.isEmpty() && FetchResponse.recordsOrFail(completedFetch.partitionData).sizeInBytes() == 0) fetchBuffer.poll(); throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else if (subscriptions.isPaused(nextInLineFetch.partition)) { // when the partition is paused we add the records back to the completedFetches queue instead of draining // them so that they can be returned on a subsequent poll if the partition is resumed at that time log.debug("Skipping fetching records for assigned partition {} because it is paused", nextInLineFetch.partition); pausedCompletedFetches.add(nextInLineFetch); fetchBuffer.setNextInLineFetch(null); } else { final Fetch<K, V> nextFetch = fetchRecords(nextInLineFetch, recordsRemaining); recordsRemaining -= nextFetch.numRecords(); fetch.add(nextFetch); } } } catch (KafkaException e) { if (fetch.isEmpty()) throw e; } finally { // add any polled completed fetches for paused partitions back to the completed fetches queue to be // re-evaluated in the next poll fetchBuffer.addAll(pausedCompletedFetches); } return fetch; }
@Test public void testFetchWithReadReplica() { buildDependencies(); assignAndSeek(topicAPartition0); // Set the preferred read replica and just to be safe, verify it was set. int preferredReadReplicaId = 67; subscriptions.updatePreferredReadReplica(topicAPartition0, preferredReadReplicaId, time::milliseconds); assertNotNull(subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); CompletedFetch completedFetch = completedFetchBuilder.build(); fetchBuffer.add(completedFetch); Fetch<String, String> fetch = fetchCollector.collectFetch(fetchBuffer); // The Fetch and read replica settings should be empty. assertEquals(DEFAULT_RECORD_COUNT, fetch.numRecords()); assertEquals(Optional.of(preferredReadReplicaId), subscriptions.preferredReadReplica(topicAPartition0, time.milliseconds())); }
@Override public long getQueueSize() { return asyncExecutionMonitoring.getQueueSize(); }
@Test public void getQueueSize_delegates_to_AsyncExecutionMonitoring() { when(asyncExecutionMonitoring.getQueueSize()).thenReturn(12); assertThat(underTest.getQueueSize()).isEqualTo(12); verify(asyncExecutionMonitoring).getQueueSize(); }
@VisibleForTesting static int checkJar(Path file) throws Exception { final URI uri = file.toUri(); int numSevereIssues = 0; try (final FileSystem fileSystem = FileSystems.newFileSystem( new URI("jar:file", uri.getHost(), uri.getPath(), uri.getFragment()), Collections.emptyMap())) { if (isTestJarAndEmpty(file, fileSystem.getPath("/"))) { return 0; } if (!noticeFileExistsAndIsValid(fileSystem.getPath("META-INF", "NOTICE"), file)) { numSevereIssues++; } if (!licenseFileExistsAndIsValid(fileSystem.getPath("META-INF", "LICENSE"), file)) { numSevereIssues++; } numSevereIssues += getNumLicenseFilesOutsideMetaInfDirectory(file, fileSystem.getPath("/")); numSevereIssues += getFilesWithIncompatibleLicenses(file, fileSystem.getPath("/")); } return numSevereIssues; }
@Test void testRejectedOnNoticeFileInRoot(@TempDir Path tempDir) throws Exception { assertThat( JarFileChecker.checkJar( createJar( tempDir, Entry.fileEntry(VALID_NOTICE_CONTENTS, VALID_NOTICE_PATH), Entry.fileEntry(VALID_LICENSE_CONTENTS, VALID_LICENSE_PATH), Entry.fileEntry( VALID_NOTICE_CONTENTS, Arrays.asList("some_custom_notice"))))) .isEqualTo(1); }
@Override public String execute(CommandContext commandContext, String[] args) { if (args.length > 0) { return "Unsupported parameter " + Arrays.toString(args) + " for pwd."; } String service = commandContext.getRemote().attr(ChangeTelnet.SERVICE_KEY).get(); StringBuilder buf = new StringBuilder(); if (StringUtils.isEmpty(service)) { buf.append('/'); } else { buf.append(service); } return buf.toString(); }
@Test void testMessageError() throws RemotingException { defaultAttributeMap.attr(ChangeTelnet.SERVICE_KEY).set(null); String result = pwdTelnet.execute(mockCommandContext, new String[] {"test"}); assertEquals("Unsupported parameter [test] for pwd.", result); }
public static Iterable<OGCGeometry> flattenCollection(OGCGeometry geometry) { if (geometry == null) { return ImmutableList.of(); } if (!(geometry instanceof OGCConcreteGeometryCollection)) { return ImmutableList.of(geometry); } if (((OGCConcreteGeometryCollection) geometry).numGeometries() == 0) { return ImmutableList.of(); } return () -> new GeometryCollectionIterator(geometry); }
@Test public void testFlattenCollection() { assertFlattenLeavesUnchanged(OGCGeometry.fromText("POINT EMPTY")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("POINT (1 2)")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTIPOINT EMPTY")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTIPOINT (1 2)")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTIPOINT (1 2, 3 4)")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("LINESTRING EMPTY")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("LINESTRING (1 2, 3 4)")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTILINESTRING EMPTY")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTILINESTRING ((1 2, 3 4))")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTILINESTRING ((1 2, 3 4), (5 6, 7 8))")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("POLYGON EMPTY")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("POLYGON ((0 0, 0 1, 1 1, 0 0))")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTIPOLYGON EMPTY")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTIPOLYGON (((0 0, 0 1, 1 1, 0 0)))")); assertFlattenLeavesUnchanged(OGCGeometry.fromText("MULTIPOLYGON (((0 0, 0 1, 1 1, 0 0)), ((10 10, 10 11, 11 11, 10 10)))")); assertFlattens(OGCGeometry.fromText("GEOMETRYCOLLECTION EMPTY"), ImmutableList.of()); assertFlattens(OGCGeometry.fromText("GEOMETRYCOLLECTION (POINT EMPTY)"), OGCGeometry.fromText("POINT EMPTY")); assertFlattens(OGCGeometry.fromText("GEOMETRYCOLLECTION (POINT (0 1))"), OGCGeometry.fromText("POINT (0 1)")); assertFlattens(OGCGeometry.fromText("GEOMETRYCOLLECTION (GEOMETRYCOLLECTION EMPTY)"), ImmutableList.of()); assertFlattens( OGCGeometry.fromText("GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1), POINT (1 2)))"), ImmutableList.of(OGCGeometry.fromText("POINT (0 1)"), OGCGeometry.fromText("POINT (1 2)"))); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testHdfsAccess() throws Exception { createHttpFSServer(false, false); long oldOpsListStatus = metricsGetter.get("LISTSTATUS").call(); String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format("/webhdfs/v1/?user.name={0}&op=liststatus", user)); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); Assert.assertEquals(conn.getResponseCode(), HttpURLConnection.HTTP_OK); BufferedReader reader = new BufferedReader( new InputStreamReader(conn.getInputStream())); reader.readLine(); reader.close(); Assert.assertEquals(1 + oldOpsListStatus, (long) metricsGetter.get("LISTSTATUS").call()); }
@Override @CacheEvict(value = RedisKeyConstants.MAIL_ACCOUNT, key = "#id") public void deleteMailAccount(Long id) { // 校验是否存在账号 validateMailAccountExists(id); // 校验是否存在关联模版 if (mailTemplateService.getMailTemplateCountByAccountId(id) > 0) { throw exception(MAIL_ACCOUNT_RELATE_TEMPLATE_EXISTS); } // 删除 mailAccountMapper.deleteById(id); }
@Test public void testDeleteMailAccount_notExists() { // 准备参数 Long id = randomLongId(); // 调用, 并断言异常 assertServiceException(() -> mailAccountService.deleteMailAccount(id), MAIL_ACCOUNT_NOT_EXISTS); }
@Override public Num calculate(BarSeries series, Position position) { return position.hasLoss() ? series.one() : series.zero(); }
@Test public void calculateWithOneLongPosition() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); Position position = new Position(Trade.buyAt(1, series), Trade.sellAt(3, series)); assertNumEquals(1, getCriterion().calculate(series, position)); }
public static <T> RetryTransformer<T> of(Retry retry) { return new RetryTransformer<>(retry); }
@Test public void retryOnResultFailAfterMaxAttemptsUsingMaybe() throws InterruptedException { RetryConfig config = RetryConfig.<String>custom() .retryOnResult("retry"::equals) .waitDuration(Duration.ofMillis(50)) .maxAttempts(3).build(); Retry retry = Retry.of("testName", config); given(helloWorldService.returnHelloWorld()) .willReturn("retry"); Maybe.fromCallable(helloWorldService::returnHelloWorld) .compose(RetryTransformer.of(retry)) .test() .await() .assertValueCount(1) .assertValue("retry") .assertComplete() .assertSubscribed(); then(helloWorldService).should(times(3)).returnHelloWorld(); }
@Override public NotifyMessageDO getNotifyMessage(Long id) { return notifyMessageMapper.selectById(id); }
@Test public void testGetNotifyMessage() { // mock 数据 NotifyMessageDO dbNotifyMessage = randomPojo(NotifyMessageDO.class, o -> o.setTemplateParams(randomTemplateParams())); notifyMessageMapper.insert(dbNotifyMessage); // 准备参数 Long id = dbNotifyMessage.getId(); // 调用 NotifyMessageDO notifyMessage = notifyMessageService.getNotifyMessage(id); assertPojoEquals(dbNotifyMessage, notifyMessage); }
public static ImmutableList<SbeField> generateFields(Ir ir, IrOptions irOptions) { ImmutableList.Builder<SbeField> fields = ImmutableList.builder(); TokenIterator iterator = getIteratorForMessage(ir, irOptions); while (iterator.hasNext()) { Token token = iterator.next(); switch (token.signal()) { case BEGIN_FIELD: fields.add(processPrimitive(iterator)); break; default: // TODO(https://github.com/apache/beam/issues/21102): Support remaining field types break; } } return fields.build(); }
@Test public void testGenerateFieldsWithInvalidMessageName() throws Exception { Ir ir = getIr(OnlyPrimitives.RESOURCE_PATH); IrOptions options = IrOptions.builder().setMessageName(UUID.randomUUID().toString()).build(); assertThrows( IllegalArgumentException.class, () -> IrFieldGenerator.generateFields(ir, options)); }
@Override public Mono<String> resolve(Subscription.Subscriber subscriber) { var identity = UserIdentity.of(subscriber.getName()); if (identity.isAnonymous()) { return Mono.fromSupplier(() -> getEmail(subscriber)); } return client.fetch(User.class, subscriber.getName()) .filter(user -> user.getSpec().isEmailVerified()) .mapNotNull(user -> user.getSpec().getEmail()); }
@Test void testResolve() { var subscriber = new Subscription.Subscriber(); subscriber.setName(AnonymousUserConst.PRINCIPAL + "#test@example.com"); subscriberEmailResolver.resolve(subscriber) .as(StepVerifier::create) .expectNext("test@example.com") .verifyComplete(); subscriber.setName(AnonymousUserConst.PRINCIPAL + "#"); subscriberEmailResolver.resolve(subscriber) .as(StepVerifier::create) .verifyErrorMessage("The subscriber does not have an email"); var user = new User(); user.setMetadata(new Metadata()); user.getMetadata().setName("fake-user"); user.setSpec(new User.UserSpec()); user.getSpec().setEmail("test@halo.run"); user.getSpec().setEmailVerified(false); when(client.fetch(eq(User.class), eq("fake-user"))).thenReturn(Mono.just(user)); subscriber.setName("fake-user"); subscriberEmailResolver.resolve(subscriber) .as(StepVerifier::create) .verifyComplete(); user.getSpec().setEmailVerified(true); when(client.fetch(eq(User.class), eq("fake-user"))).thenReturn(Mono.just(user)); subscriber.setName("fake-user"); subscriberEmailResolver.resolve(subscriber) .as(StepVerifier::create) .expectNext("test@halo.run") .verifyComplete(); }
public List<R> scanForResourcesInPackage(String packageName, Predicate<String> packageFilter) { requireValidPackageName(packageName); requireNonNull(packageFilter, "packageFilter must not be null"); BiFunction<Path, Path, Resource> createResource = createPackageResource(packageName); List<URI> rootUrisForPackage = getUrisForPackage(getClassLoader(), packageName); return findResourcesForUris(rootUrisForPackage, packageName, packageFilter, createResource); }
@Test void scanForResourcesInSubPackage() { String basePackageName = "io.cucumber.core.resource"; List<URI> resources = resourceScanner.scanForResourcesInPackage(basePackageName, aPackage -> true); assertThat(resources, containsInAnyOrder( URI.create("classpath:io/cucumber/core/resource/test/resource.txt"), URI.create("classpath:io/cucumber/core/resource/test/other-resource.txt"), URI.create("classpath:io/cucumber/core/resource/test/spaces%20in%20name%20resource.txt"))); }
public @CheckForNull URL toExternalURL() throws IOException { return null; }
@Test public void testExternalUrl() throws Exception { VirtualFile root = new VirtualFileMinimalImplementation(); assertThat(root.toExternalURL(), nullValue()); }
@Override public FastAppend appendFile(DataFile file) { Preconditions.checkNotNull(file, "Invalid data file: null"); if (newFilePaths.add(file.path())) { this.hasNewFiles = true; newFiles.add(file); summaryBuilder.addedFile(spec, file); } return this; }
@TestTemplate public void appendNullFile() { assertThatThrownBy(() -> table.newFastAppend().appendFile(null).commit()) .isInstanceOf(NullPointerException.class) .hasMessage("Invalid data file: null"); }
@Override public PipelineDef parse(Path pipelineDefPath, Configuration globalPipelineConfig) throws Exception { return parse(mapper.readTree(pipelineDefPath.toFile()), globalPipelineConfig); }
@Test void testEvaluateDefaultLocalTimeZone() throws Exception { URL resource = Resources.getResource("definitions/pipeline-definition-minimized.yaml"); YamlPipelineDefinitionParser parser = new YamlPipelineDefinitionParser(); PipelineDef pipelineDef = parser.parse(Paths.get(resource.toURI()), new Configuration()); assertThat(pipelineDef.getConfig().get(PIPELINE_LOCAL_TIME_ZONE)) .isNotEqualTo(PIPELINE_LOCAL_TIME_ZONE.defaultValue()); }
@Override public Optional<IndexSet> get(final String indexSetId) { return this.indexSetsCache.get() .stream() .filter(indexSet -> Objects.equals(indexSet.id(), indexSetId)) .map(indexSetConfig -> (IndexSet) mongoIndexSetFactory.create(indexSetConfig)) .findFirst(); }
@Test public void indexSetsCacheShouldBeInvalidatedForIndexSetDeletion() { final IndexSetConfig indexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> indexSetConfigs = Collections.singletonList(indexSetConfig); when(indexSetService.findAll()).thenReturn(indexSetConfigs); final List<IndexSetConfig> result = this.indexSetsCache.get(); assertThat(result) .isNotNull() .hasSize(1) .containsExactly(indexSetConfig); this.indexSetsCache.handleIndexSetDeletion(mock(IndexSetDeletedEvent.class)); final IndexSetConfig newIndexSetConfig = mock(IndexSetConfig.class); final List<IndexSetConfig> newIndexSetConfigs = Collections.singletonList(newIndexSetConfig); when(indexSetService.findAll()).thenReturn(newIndexSetConfigs); final List<IndexSetConfig> newResult = this.indexSetsCache.get(); assertThat(newResult) .isNotNull() .hasSize(1) .containsExactly(newIndexSetConfig); verify(indexSetService, times(2)).findAll(); }
@Override public final String toString() { StringJoiner result = new StringJoiner(", ", "(", ")"); for (int i = 0; i < values.size(); i++) { result.add(getValue(i)); } return result.toString(); }
@Test void assertSysdateToString() { List<ExpressionSegment> expressionSegments = new ArrayList<>(1); expressionSegments.add(new ColumnSegment(0, 6, new IdentifierValue("SYSDATE"))); InsertValue insertValue = new InsertValue(expressionSegments); String actualToString = insertValue.toString(); String expectedToString = "(SYSDATE)"; assertThat(actualToString, is(expectedToString)); }
@Override public Optional<SelectionContext<VariableMap>> match(SelectionCriteria criteria) { Map<String, String> variables = new HashMap<>(); if (userRegex.isPresent()) { Matcher userMatcher = userRegex.get().matcher(criteria.getUser()); if (!userMatcher.matches()) { return Optional.empty(); } addVariableValues(userRegex.get(), criteria.getUser(), variables); } if (sourceRegex.isPresent()) { String source = criteria.getSource().orElse(""); if (!sourceRegex.get().matcher(source).matches()) { return Optional.empty(); } addVariableValues(sourceRegex.get(), source, variables); } if (principalRegex.isPresent()) { String principal = criteria.getPrincipal().orElse(""); if (!principalRegex.get().matcher(principal).matches()) { return Optional.empty(); } addVariableValues(principalRegex.get(), principal, variables); } if (!clientTags.isEmpty() && !criteria.getTags().containsAll(clientTags)) { return Optional.empty(); } if (clientInfoRegex.isPresent() && !clientInfoRegex.get().matcher(criteria.getClientInfo().orElse(EMPTY_CRITERIA_STRING)).matches()) { return Optional.empty(); } if (selectorResourceEstimate.isPresent() && !selectorResourceEstimate.get().match(criteria.getResourceEstimates())) { return Optional.empty(); } if (queryType.isPresent()) { String contextQueryType = criteria.getQueryType().orElse(EMPTY_CRITERIA_STRING); if (!queryType.get().equalsIgnoreCase(contextQueryType)) { return Optional.empty(); } } if (schema.isPresent() && criteria.getSchema().isPresent()) { if (criteria.getSchema().get().compareToIgnoreCase(schema.get()) != 0) { return Optional.empty(); } } variables.putIfAbsent(USER_VARIABLE, criteria.getUser()); // Special handling for source, which is an optional field that is part of the standard variables variables.putIfAbsent(SOURCE_VARIABLE, criteria.getSource().orElse("")); variables.putIfAbsent(SCHEMA_VARIABLE, criteria.getSchema().orElse("")); VariableMap map = new VariableMap(variables); ResourceGroupId id = group.expandTemplate(map); OptionalInt firstDynamicSegment = group.getFirstDynamicSegment(); return Optional.of(new SelectionContext<>(id, map, firstDynamicSegment)); }
@Test public void testSchema() { ResourceGroupId resourceGroupId = new ResourceGroupId(new ResourceGroupId("global"), "schema1"); StaticSelector selector = new StaticSelector( Optional.empty(), Optional.empty(), Optional.of(ImmutableList.of()), Optional.empty(), Optional.empty(), Optional.empty(), Optional.of("schema1"), Optional.empty(), new ResourceGroupIdTemplate("global.${SCHEMA}")); assertEquals(selector.match(newSelectionCriteria("userA", null, "schema1", ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES)).map(SelectionContext::getResourceGroupId), Optional.of(resourceGroupId)); assertEquals(selector.match(newSelectionCriteria("userB", "source", "schema2", ImmutableSet.of(), EMPTY_RESOURCE_ESTIMATES)), Optional.empty()); assertEquals(selector.match(newSelectionCriteria("userA", null, "schema1", ImmutableSet.of("tag1"), EMPTY_RESOURCE_ESTIMATES)).map(SelectionContext::getResourceGroupId), Optional.of(resourceGroupId)); }
public static boolean isKeepAlive(HttpMessage message) { return !message.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE, true) && (message.protocolVersion().isKeepAliveDefault() || message.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE, true)); }
@Test public void testKeepAliveIfConnectionHeaderMultipleValues() { HttpMessage http11Message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http:localhost/http_1_1"); http11Message.headers().set( HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", " + HttpHeaderValues.CLOSE); assertFalse(HttpUtil.isKeepAlive(http11Message)); http11Message.headers().set( HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", Close"); assertFalse(HttpUtil.isKeepAlive(http11Message)); http11Message.headers().set( HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE + ", " + HttpHeaderValues.UPGRADE); assertFalse(HttpUtil.isKeepAlive(http11Message)); http11Message.headers().set( HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", " + HttpHeaderValues.KEEP_ALIVE); assertTrue(HttpUtil.isKeepAlive(http11Message)); }
@VisibleForTesting void globalUpdates(String args, SchedConfUpdateInfo updateInfo) { if (args == null) { return; } HashMap<String, String> globalUpdates = new HashMap<>(); for (String globalUpdate : args.split(SPLIT_BY_SLASH_COMMA)) { globalUpdate = globalUpdate.replace("\\", ""); putKeyValuePair(globalUpdates, globalUpdate); } updateInfo.setGlobalParams(globalUpdates); }
@Test(timeout = 10000) public void testGlobalUpdate() { SchedConfUpdateInfo schedUpdateInfo = new SchedConfUpdateInfo(); cli.globalUpdates("schedKey1=schedVal1,schedKey2=schedVal2", schedUpdateInfo); Map<String, String> paramValues = new HashMap<>(); paramValues.put("schedKey1", "schedVal1"); paramValues.put("schedKey2", "schedVal2"); validateGlobalParams(schedUpdateInfo, paramValues); }
@Override public Map<String, String> context() { return context; }
@Test public void contextShouldContainEnvAndPropertiesAndHostAndPort() throws Exception { String hostname = "xx.xx.xx"; int port = 20; AgentBootstrapperArgs bootstrapperArgs = new AgentBootstrapperArgs().setServerUrl(new URL("https://" + hostname + ":" + port + "/go")).setRootCertFile(null).setSslVerificationMode(AgentBootstrapperArgs.SslMode.NONE); DefaultAgentLaunchDescriptorImpl launchDescriptor = new DefaultAgentLaunchDescriptorImpl(bootstrapperArgs, new AgentBootstrapper()); Map context = launchDescriptor.context(); assertContainsAll(bootstrapperArgs.toProperties(), context); }
public static <T> Collector<T, ?, List<T>> toUnmodifiableList() { return Collectors.collectingAndThen(Collectors.toList(), Collections::unmodifiableList); }
@Test public void convertToUnmodifiableProducesFaithfulCopy() { List<Integer> list = Arrays.asList(1, 2, 3); List<Integer> unmodifiable = list.stream().collect(StreamUtils.toUnmodifiableList()); assertEquals(list, unmodifiable); }
@Description("transforms the string to normalized form") @ScalarFunction @LiteralParameters({"x", "y"}) @SqlType(StandardTypes.VARCHAR) public static Slice normalize(@SqlType("varchar(x)") Slice slice, @SqlType("varchar(y)") Slice form) { Normalizer.Form targetForm; try { targetForm = Normalizer.Form.valueOf(form.toStringUtf8()); } catch (IllegalArgumentException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, "Normalization form must be one of [NFD, NFC, NFKD, NFKC]"); } return utf8Slice(Normalizer.normalize(slice.toStringUtf8(), targetForm)); }
@Test public void testNormalize() { assertFunction("normalize('sch\u00f6n', NFD)", VARCHAR, "scho\u0308n"); assertFunction("normalize('sch\u00f6n')", VARCHAR, "sch\u00f6n"); assertFunction("normalize('sch\u00f6n', NFC)", VARCHAR, "sch\u00f6n"); assertFunction("normalize('sch\u00f6n', NFKD)", VARCHAR, "scho\u0308n"); assertFunction("normalize('sch\u00f6n', NFKC)", VARCHAR, "sch\u00f6n"); assertFunction("normalize('\u3231\u3327\u3326\u2162', NFKC)", VARCHAR, "(\u682a)\u30c8\u30f3\u30c9\u30ebIII"); assertFunction("normalize('\uff8a\uff9d\uff76\uff78\uff76\uff85', NFKC)", VARCHAR, "\u30cf\u30f3\u30ab\u30af\u30ab\u30ca"); }
@Override public Class<GetDataTypes> getRequestType() { return GetDataTypes.class; }
@Test public void testGetRequestType() { AuthServiceProviderRegistry registry = mock(AuthServiceProviderRegistry.class); DataTypesAction dataTypesAction = new DataTypesAction(registry, new Monitor() { }); Class<GetDataTypes> actual = dataTypesAction.getRequestType(); assertNotEquals(actual, null); assertEquals(actual, GetDataTypes.class); }
@Override public Processor<KIn, VIn, KOut, VOut> get() { return new KStreamMapProcessor(); }
@Test public void testMap() { final StreamsBuilder builder = new StreamsBuilder(); final String topicName = "topic"; final int[] expectedKeys = new int[] {0, 1, 2, 3}; final MockApiProcessorSupplier<String, Integer, Void, Void> supplier = new MockApiProcessorSupplier<>(); final KStream<Integer, String> stream = builder.stream(topicName, Consumed.with(Serdes.Integer(), Serdes.String())); stream.map((key, value) -> KeyValue.pair(value, key)).process(supplier); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { for (final int expectedKey : expectedKeys) { final TestInputTopic<Integer, String> inputTopic = driver.createInputTopic(topicName, new IntegerSerializer(), new StringSerializer(), Instant.ofEpochMilli(0L), Duration.ZERO); inputTopic.pipeInput(expectedKey, "V" + expectedKey, 10L - expectedKey); } } final KeyValueTimestamp[] expected = new KeyValueTimestamp[] {new KeyValueTimestamp<>("V0", 0, 10), new KeyValueTimestamp<>("V1", 1, 9), new KeyValueTimestamp<>("V2", 2, 8), new KeyValueTimestamp<>("V3", 3, 7)}; assertEquals(4, supplier.theCapturedProcessor().processed().size()); for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], supplier.theCapturedProcessor().processed().get(i)); } }
public void setCodec(final Codec codec) { this.codec = checkNotNull(codec, "codec can not be null"); }
@Test void testCompression() throws Exception { // given final Path outputPath = new Path(File.createTempFile("avro-output-file", "avro").getAbsolutePath()); final AvroOutputFormat<User> outputFormat = new AvroOutputFormat<>(outputPath, User.class); outputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE); final Path compressedOutputPath = new Path( File.createTempFile("avro-output-file", "compressed.avro") .getAbsolutePath()); final AvroOutputFormat<User> compressedOutputFormat = new AvroOutputFormat<>(compressedOutputPath, User.class); compressedOutputFormat.setWriteMode(FileSystem.WriteMode.OVERWRITE); compressedOutputFormat.setCodec(AvroOutputFormat.Codec.SNAPPY); // when output(outputFormat); output(compressedOutputFormat); // then assertThat(fileSize(outputPath)).isGreaterThan(fileSize(compressedOutputPath)); // cleanup FileSystem fs = FileSystem.getLocalFileSystem(); fs.delete(outputPath, false); fs.delete(compressedOutputPath, false); }
public static ParseResult parse(String text) { Map<String, String> localProperties = new HashMap<>(); String intpText = ""; String scriptText = null; Matcher matcher = REPL_PATTERN.matcher(text); if (matcher.find()) { String headingSpace = matcher.group(1); intpText = matcher.group(2); int startPos = headingSpace.length() + intpText.length() + 1; if (startPos < text.length() && text.charAt(startPos) == '(') { startPos = parseLocalProperties(text, startPos, localProperties); } scriptText = text.substring(startPos); } else { intpText = ""; scriptText = text; } return new ParseResult(intpText, removeLeadingWhiteSpaces(scriptText), localProperties); }
@Test void testParagraphTextQuotedPropertyValue1() { ParagraphTextParser.ParseResult parseResult = ParagraphTextParser.parse( "%spark.pyspark(pool=\"value with = inside\")"); assertEquals("spark.pyspark", parseResult.getIntpText()); assertEquals(1, parseResult.getLocalProperties().size()); assertEquals("value with = inside", parseResult.getLocalProperties().get("pool")); assertEquals("", parseResult.getScriptText()); }
@Override public Map<RedisClusterNode, Collection<RedisClusterNode>> clusterGetMasterReplicaMap() { Iterable<RedisClusterNode> res = clusterGetNodes(); Set<RedisClusterNode> masters = new HashSet<RedisClusterNode>(); for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) { RedisClusterNode redisClusterNode = iterator.next(); if (redisClusterNode.isMaster()) { masters.add(redisClusterNode); } } Map<RedisClusterNode, Collection<RedisClusterNode>> result = new HashMap<RedisClusterNode, Collection<RedisClusterNode>>(); for (Iterator<RedisClusterNode> iterator = res.iterator(); iterator.hasNext();) { RedisClusterNode redisClusterNode = iterator.next(); for (RedisClusterNode masterNode : masters) { if (redisClusterNode.getMasterId() != null && redisClusterNode.getMasterId().equals(masterNode.getId())) { Collection<RedisClusterNode> list = result.get(masterNode); if (list == null) { list = new ArrayList<RedisClusterNode>(); result.put(masterNode, list); } list.add(redisClusterNode); } } } return result; }
@Test public void testClusterGetMasterSlaveMap() { Map<RedisClusterNode, Collection<RedisClusterNode>> map = connection.clusterGetMasterReplicaMap(); assertThat(map).hasSize(3); for (Collection<RedisClusterNode> slaves : map.values()) { assertThat(slaves).hasSize(1); } }
public void validateConvertedConfig(String outputDir) throws Exception { QueueMetrics.clearQueueMetrics(); Path configPath = new Path(outputDir, "capacity-scheduler.xml"); CapacitySchedulerConfiguration csConfig = new CapacitySchedulerConfiguration( new Configuration(false), false); csConfig.addResource(configPath); Path convertedSiteConfigPath = new Path(outputDir, "yarn-site.xml"); Configuration siteConf = new YarnConfiguration( new Configuration(false)); siteConf.addResource(convertedSiteConfigPath); RMContextImpl rmContext = new RMContextImpl(); siteConf.set(YarnConfiguration.FS_BASED_RM_CONF_STORE, outputDir); ConfigurationProvider provider = new FileSystemBasedConfigurationProvider(); provider.init(siteConf); rmContext.setConfigurationProvider(provider); RMNodeLabelsManager mgr = new RMNodeLabelsManager(); mgr.init(siteConf); rmContext.setNodeLabelManager(mgr); try (CapacityScheduler cs = new CapacityScheduler()) { cs.setConf(siteConf); cs.setRMContext(rmContext); cs.serviceInit(csConfig); cs.serviceStart(); LOG.info("Capacity scheduler was successfully started"); cs.serviceStop(); } catch (Exception e) { LOG.error("Could not start Capacity Scheduler", e); throw new VerificationException( "Verification of converted configuration failed", e); } }
@Test public void testValidationPassed() throws Exception { validator.validateConvertedConfig(CONFIG_DIR_PASSES); // expected: no exception }
@Operation(summary = "updateResourceContent", description = "UPDATE_RESOURCE_NOTES") @Parameters({ @Parameter(name = "content", description = "CONTENT", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "fullName", description = "FULL_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "tenantCode", description = "TENANT_CODE", required = true, schema = @Schema(implementation = String.class)) }) @PutMapping(value = "/update-content") @ApiException(EDIT_RESOURCE_FILE_ON_LINE_ERROR) public Result updateResourceContent(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "fullName") String fullName, @RequestParam(value = "tenantCode") String tenantCode, @RequestParam(value = "content") String content) { if (StringUtils.isEmpty(content)) { log.error("The resource file contents are not allowed to be empty"); return error(RESOURCE_FILE_IS_EMPTY.getCode(), RESOURCE_FILE_IS_EMPTY.getMsg()); } return resourceService.updateResourceContent(loginUser, fullName, tenantCode, content); }
@Test public void testUpdateResourceContent() throws Exception { Result mockResult = new Result<>(); mockResult.setCode(Status.TENANT_NOT_EXIST.getCode()); Mockito.when(resourcesService.updateResourceContent(Mockito.any(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString())) .thenReturn(mockResult); MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("id", "1"); paramsMap.add("content", "echo test_1111"); paramsMap.add("fullName", "dolphinscheduler/resourcePath"); paramsMap.add("tenantCode", "123"); MvcResult mvcResult = mockMvc.perform(put("/resources/update-content") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isOk()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.TENANT_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); }
public MemcacheProtocolConfig getMemcacheProtocolConfig() { return memcacheProtocolConfig; }
@Test public void testMemcacheProtocolConfig_isNotNullByDefault() { assertNotNull(networkConfig.getMemcacheProtocolConfig()); }
public void setParentArtifactId(String parentArtifactId) { this.parentArtifactId = parentArtifactId; }
@Test public void testSetParentArtifactId() { String parentArtifactId = "something"; Model instance = new Model(); instance.setParentArtifactId(parentArtifactId); assertNotNull(instance.getParentArtifactId()); }
public String getName() { return name; }
@Test public void getName() { assertEquals(file.getName(), ss.getName()); }
public List<AnalyzedInstruction> getAnalyzedInstructions() { return analyzedInstructions.getValues(); }
@Test public void testInstanceOfNarrowingNez_art() throws IOException { MethodImplementationBuilder builder = new MethodImplementationBuilder(2); builder.addInstruction(new BuilderInstruction22c(Opcode.INSTANCE_OF, 0, 1, new ImmutableTypeReference("Lmain;"))); builder.addInstruction(new BuilderInstruction21t(Opcode.IF_NEZ, 0, builder.getLabel("instance_of"))); builder.addInstruction(new BuilderInstruction10x(Opcode.RETURN_VOID)); builder.addLabel("instance_of"); builder.addInstruction(new BuilderInstruction10x(Opcode.RETURN_VOID)); MethodImplementation methodImplementation = builder.getMethodImplementation(); Method method = new ImmutableMethod("Lmain;", "narrowing", Collections.singletonList(new ImmutableMethodParameter("Ljava/lang/Object;", null, null)), "V", AccessFlags.PUBLIC.getValue(), null, null, methodImplementation); ClassDef classDef = new ImmutableClassDef("Lmain;", AccessFlags.PUBLIC.getValue(), "Ljava/lang/Object;", null, null, null, null, Collections.singletonList(method)); DexFile dexFile = new ImmutableDexFile(forArtVersion(56), Collections.singletonList(classDef)); ClassPath classPath = new ClassPath(Lists.newArrayList(new DexClassProvider(dexFile)), true, 56); MethodAnalyzer methodAnalyzer = new MethodAnalyzer(classPath, method, null, false); List<AnalyzedInstruction> analyzedInstructions = methodAnalyzer.getAnalyzedInstructions(); Assert.assertEquals("Ljava/lang/Object;", analyzedInstructions.get(2).getPreInstructionRegisterType(1).type.getType()); Assert.assertEquals("Lmain;", analyzedInstructions.get(3).getPreInstructionRegisterType(1).type.getType()); }
public boolean setRuleDescriptionContextKey(DefaultIssue issue, @Nullable String previousContextKey) { String currentContextKey = issue.getRuleDescriptionContextKey().orElse(null); issue.setRuleDescriptionContextKey(previousContextKey); if (!Objects.equals(currentContextKey, previousContextKey)) { issue.setRuleDescriptionContextKey(currentContextKey); issue.setChanged(true); return true; } return false; }
@Test void setRuleDescriptionContextKey_dontSetContextKeyIfBothValuesAreNull() { issue.setRuleDescriptionContextKey(null); boolean updated = underTest.setRuleDescriptionContextKey(issue, null); assertThat(updated).isFalse(); assertThat(issue.getRuleDescriptionContextKey()).isEmpty(); }
protected RemotingCommand request(ChannelHandlerContext ctx, RemotingCommand request, ProxyContext context, long timeoutMillis) throws Exception { String brokerName; if (request.getCode() == RequestCode.SEND_MESSAGE_V2) { if (request.getExtFields().get(BROKER_NAME_FIELD_FOR_SEND_MESSAGE_V2) == null) { return RemotingCommand.buildErrorResponse(ResponseCode.VERSION_NOT_SUPPORTED, "Request doesn't have field bname"); } brokerName = request.getExtFields().get(BROKER_NAME_FIELD_FOR_SEND_MESSAGE_V2); } else { if (request.getExtFields().get(BROKER_NAME_FIELD) == null) { return RemotingCommand.buildErrorResponse(ResponseCode.VERSION_NOT_SUPPORTED, "Request doesn't have field bname"); } brokerName = request.getExtFields().get(BROKER_NAME_FIELD); } if (request.isOnewayRPC()) { messagingProcessor.requestOneway(context, brokerName, request, timeoutMillis); return null; } messagingProcessor.request(context, brokerName, request, timeoutMillis) .thenAccept(r -> writeResponse(ctx, context, request, r)) .exceptionally(t -> { writeErrResponse(ctx, context, request, t); return null; }); return null; }
@Test public void testRequestProxyException() throws Exception { ArgumentCaptor<RemotingCommand> captor = ArgumentCaptor.forClass(RemotingCommand.class); String brokerName = "broker"; String remark = "exception"; CompletableFuture<RemotingCommand> future = new CompletableFuture<>(); future.completeExceptionally(new ProxyException(ProxyExceptionCode.FORBIDDEN, remark)); when(messagingProcessorMock.request(any(), eq(brokerName), any(), anyLong())).thenReturn(future); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.PULL_MESSAGE, null); request.addExtField(AbstractRemotingActivity.BROKER_NAME_FIELD, brokerName); RemotingCommand remotingCommand = remotingActivity.request(ctx, request, null, 10000); assertThat(remotingCommand).isNull(); verify(ctx, times(1)).writeAndFlush(captor.capture()); assertThat(captor.getValue().getCode()).isEqualTo(ResponseCode.NO_PERMISSION); }
public SelectorDO getSelector() { return (SelectorDO) getSource(); }
@Test void getSelector() { SelectorDO selector = selectorCreatedEvent.getSelector(); assertEquals(selectorDO, selector); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, new HostPreferences(session.getHost()).getInteger("sds.listing.chunksize")); }
@Test public void testList() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final Path room = new SDSDirectoryFeature(session, nodeid).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final AtomicBoolean callback = new AtomicBoolean(); assertTrue(new SDSListService(session, nodeid).list(room, new DisabledListProgressListener() { @Override public void chunk(final Path parent, final AttributedList<Path> list) { assertNotSame(AttributedList.EMPTY, list); callback.set(true); } }).isEmpty()); assertTrue(callback.get()); final String filename = String.format("%s%s", new AlphanumericRandomStringService().random(), new NFDNormalizer().normalize("ä")); final Path file = new SDSTouchFeature(session, nodeid).touch(new Path(room, filename, EnumSet.of(Path.Type.file)), new TransferStatus()); final AttributedList<Path> list = new SDSListService(session, nodeid).list(room, new DisabledListProgressListener(), 1); assertEquals(1, (list.size())); assertNotNull(list.find(new DefaultPathPredicate(file))); // Not preserving Unicode normalization assertNotEquals(filename, list.find(new DefaultPathPredicate(file)).getName()); new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(2, (new SDSListService(session, nodeid).list(room, new DisabledListProgressListener(), 1).size())); assertEquals(2, (new SDSListService(session, nodeid).list(room, new DisabledListProgressListener()).size())); new SDSTouchFeature(session, nodeid).touch(new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertEquals(3, (new SDSListService(session, nodeid).list(room, new DisabledListProgressListener(), 1).size())); assertEquals(3, (new SDSListService(session, nodeid).list(room, new DisabledListProgressListener()).size())); new SDSDeleteFeature(session, nodeid).delete(Collections.<Path>singletonList(room), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public RLock writeLock() { return new RedissonWriteLock(commandExecutor, getName()); }
@Test public void testWriteLock() throws InterruptedException { final RReadWriteLock lock = redisson.getReadWriteLock("lock"); final RLock writeLock = lock.writeLock(); writeLock.lock(); Assertions.assertTrue(lock.writeLock().tryLock()); Thread t = new Thread() { public void run() { Assertions.assertFalse(writeLock.isHeldByCurrentThread()); Assertions.assertTrue(writeLock.isLocked()); Assertions.assertFalse(lock.readLock().tryLock()); Assertions.assertFalse(redisson.getReadWriteLock("lock").readLock().tryLock()); try { Thread.sleep(1000); } catch (InterruptedException e) { // TODO Auto-generated catch block e.printStackTrace(); } Assertions.assertTrue(lock.readLock().tryLock()); Assertions.assertTrue(redisson.getReadWriteLock("lock").readLock().tryLock()); }; }; t.start(); t.join(50); writeLock.unlock(); Assertions.assertTrue(lock.readLock().tryLock()); Assertions.assertTrue(writeLock.isHeldByCurrentThread()); writeLock.unlock(); Thread.sleep(1000); Assertions.assertFalse(lock.writeLock().tryLock()); Assertions.assertFalse(lock.writeLock().isLocked()); Assertions.assertFalse(lock.writeLock().isHeldByCurrentThread()); lock.writeLock().forceUnlock(); }
@Override public TimeSeriesEntry<V, L> pollFirstEntry() { return get(pollFirstEntryAsync()); }
@Test public void testPollFirstEntry() { RTimeSeries<String, String> t = redisson.getTimeSeries("test"); t.add(1, "10", "100"); t.add(2, "20"); t.add(3, "30"); TimeSeriesEntry<String, String> e = t.pollFirstEntry(); assertThat(e).isEqualTo(new TimeSeriesEntry<>(1, "10", "100")); assertThat(t.size()).isEqualTo(2); TimeSeriesEntry<String, String> ee = t.firstEntry(); assertThat(ee).isEqualTo(new TimeSeriesEntry<>(2, "20")); }
@Override public void triggerOnIndexCreation() { try (DbSession dbSession = dbClient.openSession(false)) { // remove already existing indexing task, if any removeExistingIndexationTasks(dbSession); dbClient.branchDao().updateAllNeedIssueSync(dbSession); List<BranchDto> branchInNeedOfIssueSync = dbClient.branchDao().selectBranchNeedingIssueSync(dbSession); LOG.info("{} branch found in need of issue sync.", branchInNeedOfIssueSync.size()); if (branchInNeedOfIssueSync.isEmpty()) { return; } List<String> projectUuids = branchInNeedOfIssueSync.stream().map(BranchDto::getProjectUuid).distinct().collect(toCollection(ArrayList<String>::new)); LOG.info("{} projects found in need of issue sync.", projectUuids.size()); sortProjectUuids(dbSession, projectUuids); Map<String, List<BranchDto>> branchesByProject = branchInNeedOfIssueSync.stream() .collect(Collectors.groupingBy(BranchDto::getProjectUuid)); List<CeTaskSubmit> tasks = new ArrayList<>(); for (String projectUuid : projectUuids) { List<BranchDto> branches = branchesByProject.get(projectUuid); for (BranchDto branch : branches) { tasks.add(buildTaskSubmit(branch)); } } ceQueue.massSubmit(tasks); dbSession.commit(); } }
@Test public void remove_existing_indexation_task() { String reportTaskUuid = persistReportTasks(); CeQueueDto task = new CeQueueDto(); task.setUuid("uuid_2"); task.setTaskType(BRANCH_ISSUE_SYNC); dbClient.ceQueueDao().insert(dbTester.getSession(), task); CeActivityDto activityDto = new CeActivityDto(task); activityDto.setStatus(Status.SUCCESS); dbClient.ceActivityDao().insert(dbTester.getSession(), activityDto); dbTester.commit(); underTest.triggerOnIndexCreation(); assertThat(dbClient.ceQueueDao().selectAllInAscOrder(dbTester.getSession())).extracting("uuid").containsExactly(reportTaskUuid); assertThat(dbClient.ceActivityDao().selectByTaskType(dbTester.getSession(), BRANCH_ISSUE_SYNC)).isEmpty(); assertThat(dbClient.ceActivityDao().selectByTaskType(dbTester.getSession(), REPORT)).hasSize(1); assertThat(dbClient.ceTaskCharacteristicsDao().selectByTaskUuids(dbTester.getSession(), new HashSet<>(List.of("uuid_2")))).isEmpty(); assertThat(logTester.logs(Level.INFO)) .contains( "1 pending indexing task found to be deleted...", "1 completed indexing task found to be deleted...", "Indexing task deletion complete.", "Deleting tasks characteristics...", "Tasks characteristics deletion complete."); }
@ExecuteOn(TaskExecutors.IO) @Post(uri = "/{executionId}/eval/{taskRunId}", consumes = MediaType.TEXT_PLAIN) @Operation(tags = {"Executions"}, summary = "Evaluate a variable expression for this taskrun") public EvalResult eval( @Parameter(description = "The execution id") @PathVariable String executionId, @Parameter(description = "The taskrun id") @PathVariable String taskRunId, @Body String expression ) throws InternalException { Execution execution = executionRepository .findById(tenantService.resolveTenant(), executionId) .orElseThrow(() -> new NoSuchElementException("Unable to find execution '" + executionId + "'")); TaskRun taskRun = execution .findTaskRunByTaskRunId(taskRunId); Flow flow = flowRepository .findByExecution(execution); Task task = flow.findTaskByTaskId(taskRun.getTaskId()); try { return EvalResult.builder() .result(runContextRender(flow, task, execution, taskRun, expression)) .build(); } catch (IllegalVariableEvaluationException e) { return EvalResult.builder() .error(e.getMessage()) .stackTrace(ExceptionUtils.getStackTrace(e)) .build(); } }
@Test void eval() throws TimeoutException { Execution execution = runnerUtils.runOne(null, "io.kestra.tests", "each-sequential-nested"); ExecutionController.EvalResult result = this.eval(execution, "my simple string", 0); assertThat(result.getResult(), is("my simple string")); result = this.eval(execution, "{{ taskrun.id }}", 0); assertThat(result.getResult(), is(execution.getTaskRunList().getFirst().getId())); result = this.eval(execution, "{{ outputs['1-1_return'][taskrun.value].value }}", 21); assertThat(result.getResult(), containsString("1-1_return")); result = this.eval(execution, "{{ missing }}", 21); assertThat(result.getResult(), is(nullValue())); assertThat(result.getError(), containsString("Unable to find `missing` used in the expression `{{ missing }}` at line 1")); assertThat(result.getStackTrace(), containsString("Unable to find `missing` used in the expression `{{ missing }}` at line 1")); }
public ConcurrentNavigableMap<String, Object> toTreeMap(final String json) { return GSON_MAP.fromJson(json, new TypeToken<ConcurrentSkipListMap<String, Object>>() { }.getType()); }
@Test public void testToTreeMap() { Map<String, Object> map = ImmutableMap.of("id", 123L, "name", "test", "double", 1.0D, "boolean", true, "data", generateTestObject()); String json = "{\"name\":\"test\",\"id\":123,\"double\":1.0,\"boolean\":true,\"data\":" + EXPECTED_JSON + "}"; Map<String, Object> parseMap = GsonUtils.getInstance().toTreeMap(json); map.forEach((key, value) -> { assertTrue(parseMap.containsKey(key)); Object jsonValue = parseMap.get(key); if (jsonValue instanceof JsonElement) { assertEquals(value, GsonUtils.getInstance().fromJson((JsonElement) jsonValue, TestObject.class)); } else { assertEquals(value, parseMap.get(key)); } }); assertNull(GsonUtils.getInstance().toObjectMap(null)); }
public void putValue(String fieldName, @Nullable Object value) { _fieldToValueMap.put(fieldName, value); }
@Test public void testIntValuesEqual() { GenericRow first = new GenericRow(); first.putValue("one", 1); GenericRow second = new GenericRow(); second.putValue("one", 1); Assert.assertEquals(first, second); }
public static <T> CompletionStage<T> recover(CompletionStage<T> completionStage, Function<Throwable, T> exceptionHandler){ return completionStage.exceptionally(exceptionHandler); }
@Test public void shouldThrowRuntimeException() { RuntimeException exception = new RuntimeException("blub"); CompletableFuture<String> future = new CompletableFuture<>(); future.completeExceptionally(exception); assertThatThrownBy(() -> recover(future, TimeoutException.class, (e) -> "fallback").toCompletableFuture() .get(1, TimeUnit.SECONDS)) .hasCause(exception); }
public List<Protocol> find() { return this.find(Protocol::isEnabled); }
@Test public void testGetProtocols() { final TestProtocol defaultProtocol = new TestProtocol(Scheme.ftp); final TestProtocol providerProtocol = new TestProtocol(Scheme.ftp) { @Override public String getProvider() { return "c"; } }; final TestProtocol disabledProtocol = new TestProtocol(Scheme.sftp) { @Override public boolean isEnabled() { return false; } }; final ProtocolFactory f = new ProtocolFactory(new HashSet<>( Arrays.asList(defaultProtocol, providerProtocol, disabledProtocol))); final List<Protocol> protocols = f.find(); assertTrue(protocols.contains(defaultProtocol)); assertTrue(protocols.contains(providerProtocol)); assertFalse(protocols.contains(disabledProtocol)); }
@Bean public PluginDataHandler rewritePluginDataHandler() { return new RewritePluginDataHandler(); }
@Test public void testRewritePluginDataHandler() { new ApplicationContextRunner() .withConfiguration(AutoConfigurations.of(RewritePluginConfiguration.class)) .withBean(RewritePluginConfigurationTest.class) .withPropertyValues("debug=true") .run(context -> { PluginDataHandler handler = context.getBean("rewritePluginDataHandler", PluginDataHandler.class); assertNotNull(handler); }); }
private ExitStatus run() { try { init(); return new Processor().processNamespace().getExitStatus(); } catch (IllegalArgumentException e) { System.out.println(e + ". Exiting ..."); return ExitStatus.ILLEGAL_ARGUMENTS; } catch (IOException e) { System.out.println(e + ". Exiting ..."); LOG.error(e + ". Exiting ..."); return ExitStatus.IO_EXCEPTION; } finally { dispatcher.shutdownNow(); } }
@Test(timeout = 300000) public void testWithFederateClusterWithinSameNode() throws Exception { final Configuration conf = new HdfsConfiguration(); initConf(conf); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(4).storageTypes( new StorageType[] {StorageType.DISK, StorageType.ARCHIVE}).nnTopology(MiniDFSNNTopology .simpleFederatedTopology(2)).build(); DFSTestUtil.setFederatedConfiguration(cluster, conf); try { cluster.waitActive(); final String file = "/test/file"; Path dir = new Path ("/test"); final DistributedFileSystem dfs1 = cluster.getFileSystem(0); final DistributedFileSystem dfs2 = cluster.getFileSystem(1); URI nn1 = dfs1.getUri(); URI nn2 = dfs2.getUri(); setupStoragePoliciesAndPaths(dfs1, dfs2, dir, file); // move to ARCHIVE dfs1.setStoragePolicy(dir, "COLD"); int rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", nn1 + dir.toString()}); Assert.assertEquals("Movement to ARCHIVE should be successful", 0, rc); //move to DISK dfs2.setStoragePolicy(dir, "HOT"); rc = ToolRunner.run(conf, new Mover.Cli(), new String[] {"-p", nn2 + dir.toString()}); Assert.assertEquals("Movement to DISK should be successful", 0, rc); // Wait till namenode notified about the block location details waitForLocatedBlockWithArchiveStorageType(dfs1, file, 3); waitForLocatedBlockWithDiskStorageType(dfs2, file, 3); } finally { cluster.shutdown(); } }
@Override public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) { if (msg instanceof Http2DataFrame) { Http2DataFrame dataFrame = (Http2DataFrame) msg; encoder().writeData(ctx, dataFrame.stream().id(), dataFrame.content(), dataFrame.padding(), dataFrame.isEndStream(), promise); } else if (msg instanceof Http2HeadersFrame) { writeHeadersFrame(ctx, (Http2HeadersFrame) msg, promise); } else if (msg instanceof Http2WindowUpdateFrame) { Http2WindowUpdateFrame frame = (Http2WindowUpdateFrame) msg; Http2FrameStream frameStream = frame.stream(); // It is legit to send a WINDOW_UPDATE frame for the connection stream. The parent channel doesn't attempt // to set the Http2FrameStream so we assume if it is null the WINDOW_UPDATE is for the connection stream. try { if (frameStream == null) { increaseInitialConnectionWindow(frame.windowSizeIncrement()); } else { consumeBytes(frameStream.id(), frame.windowSizeIncrement()); } promise.setSuccess(); } catch (Throwable t) { promise.setFailure(t); } } else if (msg instanceof Http2ResetFrame) { Http2ResetFrame rstFrame = (Http2ResetFrame) msg; int id = rstFrame.stream().id(); // Only ever send a reset frame if stream may have existed before as otherwise we may send a RST on a // stream in an invalid state and cause a connection error. if (connection().streamMayHaveExisted(id)) { encoder().writeRstStream(ctx, rstFrame.stream().id(), rstFrame.errorCode(), promise); } else { ReferenceCountUtil.release(rstFrame); promise.setFailure(Http2Exception.streamError( rstFrame.stream().id(), Http2Error.PROTOCOL_ERROR, "Stream never existed")); } } else if (msg instanceof Http2PingFrame) { Http2PingFrame frame = (Http2PingFrame) msg; encoder().writePing(ctx, frame.ack(), frame.content(), promise); } else if (msg instanceof Http2SettingsFrame) { encoder().writeSettings(ctx, ((Http2SettingsFrame) msg).settings(), promise); } else if (msg instanceof Http2SettingsAckFrame) { // In the event of manual SETTINGS ACK, it is assumed the encoder will apply the earliest received but not // yet ACKed settings. encoder().writeSettingsAck(ctx, promise); } else if (msg instanceof Http2GoAwayFrame) { writeGoAwayFrame(ctx, (Http2GoAwayFrame) msg, promise); } else if (msg instanceof Http2PushPromiseFrame) { Http2PushPromiseFrame pushPromiseFrame = (Http2PushPromiseFrame) msg; writePushPromise(ctx, pushPromiseFrame, promise); } else if (msg instanceof Http2PriorityFrame) { Http2PriorityFrame priorityFrame = (Http2PriorityFrame) msg; encoder().writePriority(ctx, priorityFrame.stream().id(), priorityFrame.streamDependency(), priorityFrame.weight(), priorityFrame.exclusive(), promise); } else if (msg instanceof Http2UnknownFrame) { Http2UnknownFrame unknownFrame = (Http2UnknownFrame) msg; encoder().writeFrame(ctx, unknownFrame.frameType(), unknownFrame.stream().id(), unknownFrame.flags(), unknownFrame.content(), promise); } else if (!(msg instanceof Http2Frame)) { ctx.write(msg, promise); } else { ReferenceCountUtil.release(msg); throw new UnsupportedMessageTypeException(msg, SUPPORTED_MESSAGES); } }
@Test public void unknownFrameTypeShouldThrowAndBeReleased() throws Exception { class UnknownHttp2Frame extends AbstractReferenceCounted implements Http2Frame { @Override public String name() { return "UNKNOWN"; } @Override protected void deallocate() { } @Override public ReferenceCounted touch(Object hint) { return this; } } UnknownHttp2Frame frame = new UnknownHttp2Frame(); assertEquals(1, frame.refCnt()); ChannelFuture f = channel.write(frame); f.await(); assertTrue(f.isDone()); assertFalse(f.isSuccess()); assertThat(f.cause(), instanceOf(UnsupportedMessageTypeException.class)); assertEquals(0, frame.refCnt()); }
@Override public Flux<String> getServices() { return Flux.defer(() -> Flux.fromIterable(getRegisterCenterService().getServices())) .onErrorResume(ex -> { LOGGER.error("Can not acquire services list", ex); return Flux.empty(); }).subscribeOn(Schedulers.boundedElastic()); }
@Test public void getServices() { final Flux<String> services = client.getServices(); final List<String> block = services.collectList().block(); Assert.assertNotNull(block); Assert.assertEquals(block.size(), SERVICES.size()); for (int i = 0; i < block.size(); i++) { Assert.assertEquals(block.get(i), SERVICES.get(i)); } }
public static boolean isUnrecoverableError(Throwable cause) { Optional<Throwable> unrecoverableError = ThrowableClassifier.findThrowableOfThrowableType( cause, ThrowableType.NonRecoverableError); return unrecoverableError.isPresent(); }
@Test void testUnrecoverableErrorCheck() { // normal error assertThat(ExecutionFailureHandler.isUnrecoverableError(new Exception())).isFalse(); // direct unrecoverable error assertThat( ExecutionFailureHandler.isUnrecoverableError( new SuppressRestartsException(new Exception()))) .isTrue(); // nested unrecoverable error assertThat( ExecutionFailureHandler.isUnrecoverableError( new Exception(new SuppressRestartsException(new Exception())))) .isTrue(); }
public static String truncateContent(String content) { if (content == null) { return ""; } else if (content.length() <= SHOW_CONTENT_SIZE) { return content; } else { return content.substring(0, SHOW_CONTENT_SIZE) + "..."; } }
@Test void testTruncateContent() { String content = "aa"; String actual = ContentUtils.truncateContent(content); assertEquals(content, actual); }
@Override public int getCanaryDistributionPolicy() { switch (_clusterInfoItem.getClusterPropertiesItem().getDistribution()) { case STABLE: return 0; case CANARY: return 1; default: return -1; } }
@Test(dataProvider = "getCanaryDistributionPoliciesTestData") public void testGetCanaryDistributionPolicy(CanaryDistributionProvider.Distribution distribution, int expectedValue) { ClusterInfoJmx clusterInfoJmx = new ClusterInfoJmx( new ClusterInfoItem(_mockedSimpleBalancerState, new ClusterProperties("Foo"), new PartitionAccessor() { @Override public int getMaxPartitionId() { return 0; } @Override public int getPartitionId(URI uri) { return 0; } }, distribution) ); Assert.assertEquals(clusterInfoJmx.getCanaryDistributionPolicy(), expectedValue); }
@Override public boolean areEqual(Object one, Object another) { if (one == another) { return true; } if (one == null || another == null) { return false; } if (one instanceof String && another instanceof String) { return one.equals(another); } if ((one instanceof Collection && another instanceof Collection) || (one instanceof Map && another instanceof Map)) { return Objects.equals(one, another); } if (one.getClass().equals(another.getClass())) { Method equalsMethod = ReflectionUtils.getMethodOrNull(one.getClass(), "equals", Object.class); if (equalsMethod != null && !Object.class.equals(equalsMethod.getDeclaringClass())) { return one.equals(another); } } return objectMapperWrapper.toJsonNode(objectMapperWrapper.toString(one)).equals( objectMapperWrapper.toJsonNode(objectMapperWrapper.toString(another)) ); }
@Test public void testAbstractClassImplementationsAreEqual() { JsonTypeDescriptor descriptor = new JsonTypeDescriptor(); FormImpl firstEntity = new FormImpl("value1"); FormImpl secondEntity = new FormImpl("value2"); assertTrue(descriptor.areEqual(firstEntity, secondEntity)); }
@Override public ProcessingResult process(ReplicationTask task) { try { EurekaHttpResponse<?> httpResponse = task.execute(); int statusCode = httpResponse.getStatusCode(); Object entity = httpResponse.getEntity(); if (logger.isDebugEnabled()) { logger.debug("Replication task {} completed with status {}, (includes entity {})", task.getTaskName(), statusCode, entity != null); } if (isSuccess(statusCode)) { task.handleSuccess(); } else if (statusCode == 503) { logger.debug("Server busy (503) reply for task {}", task.getTaskName()); return ProcessingResult.Congestion; } else { task.handleFailure(statusCode, entity); return ProcessingResult.PermanentError; } } catch (Throwable e) { if (maybeReadTimeOut(e)) { logger.error("It seems to be a socket read timeout exception, it will retry later. if it continues to happen and some eureka node occupied all the cpu time, you should set property 'eureka.server.peer-node-read-timeout-ms' to a bigger value", e); //read timeout exception is more Congestion then TransientError, return Congestion for longer delay return ProcessingResult.Congestion; } else if (isNetworkConnectException(e)) { logNetworkErrorSample(task, e); return ProcessingResult.TransientError; } else { logger.error("{}: {} Not re-trying this exception because it does not seem to be a network exception", peerId, task.getTaskName(), e); return ProcessingResult.PermanentError; } } return ProcessingResult.Success; }
@Test public void testBatchableTaskListExecution() throws Exception { TestableInstanceReplicationTask task = aReplicationTask().build(); replicationClient.withBatchReply(200); replicationClient.withNetworkStatusCode(200); ProcessingResult status = replicationTaskProcessor.process(Collections.<ReplicationTask>singletonList(task)); assertThat(status, is(ProcessingResult.Success)); assertThat(task.getProcessingState(), is(ProcessingState.Finished)); }
public static TerminateQuery query(final Optional<NodeLocation> location, final QueryId queryId) { return new TerminateQuery(location, Optional.of(queryId)); }
@SuppressWarnings("UnstableApiUsage") @Test public void shouldImplementHashCodeAndEqualsProperty() { new EqualsTester() .addEqualityGroup( // Note: At the moment location does not take part in equality testing TerminateQuery.query(Optional.of(SOME_LOCATION), SOME_QUERY_ID), TerminateQuery.query(Optional.of(OTHER_LOCATION), SOME_QUERY_ID) ) .addEqualityGroup( TerminateQuery.query(Optional.empty(), new QueryId("diff")) ) .testEquals(); }
public static <T> ByFields<T> byFieldNames(String... fieldNames) { return ByFields.of(FieldAccessDescriptor.withFieldNames(fieldNames)); }
@Test @Category(NeedsRunner.class) public void testGroupByNestedKey() throws NoSuchSchemaException { PCollection<Row> grouped = pipeline .apply( Create.of( Outer.of(Basic.of("key1", 1L, "value1")), Outer.of(Basic.of("key1", 1L, "value2")), Outer.of(Basic.of("key2", 2L, "value3")), Outer.of(Basic.of("key2", 2L, "value4")))) .apply(Group.byFieldNames("inner.field1", "inner.field2")); Schema keySchema = Schema.builder().addStringField("field1").addInt64Field("field2").build(); Schema outputSchema = Schema.builder() .addRowField("key", keySchema) .addIterableField("value", FieldType.row(OUTER_SCHEMA)) .build(); SerializableFunction<Outer, Row> toRow = pipeline.getSchemaRegistry().getToRowFunction(Outer.class); List<Row> expected = ImmutableList.of( Row.withSchema(outputSchema) .addValue(Row.withSchema(keySchema).addValues("key1", 1L).build()) .addIterable( ImmutableList.of( toRow.apply(Outer.of(Basic.of("key1", 1L, "value1"))), toRow.apply(Outer.of(Basic.of("key1", 1L, "value2"))))) .build(), Row.withSchema(outputSchema) .addValue(Row.withSchema(keySchema).addValues("key2", 2L).build()) .addIterable( ImmutableList.of( toRow.apply(Outer.of(Basic.of("key2", 2L, "value3"))), toRow.apply(Outer.of(Basic.of("key2", 2L, "value4"))))) .build()); PAssert.that(grouped).satisfies(actual -> containsKIterableVs(expected, actual)); pipeline.run(); }
@Operation(summary = "Receive app to app SAML AuthnRequest") @PostMapping(value = {"/frontchannel/saml/v4/entrance/request_authentication", "/frontchannel/saml/v4/idp/request_authentication"}, produces = "application/json", consumes = "application/x-www-form-urlencoded", params = "Type") @ResponseBody public Map<String, Object> requestAuthenticationApp(HttpServletRequest request, @RequestParam(name = "Type") String requestType, @RequestParam(name = "RelayState") String relayState) throws SamlValidationException, DienstencatalogusException, SharedServiceClientException, ComponentInitializationException, MessageDecodingException, AdException, SamlSessionException { validateRequestType(requestType, relayState); AuthenticationRequest authenticationRequest = authenticationService.startAuthenticationProcess(request); return authenticationAppToAppService.createAuthenticationParameters(relayState, authenticationRequest); }
@Test public void requestAuthenticationEntranceApp() throws AdException, SamlSessionException, DienstencatalogusException, SharedServiceClientException, SamlValidationException, MessageDecodingException, ComponentInitializationException { Map<String, Object> authenticationParameters = new HashMap<>(); authenticationParameters.put("parameter1", "valueParameter1"); AuthenticationRequest authenticationRequest = new AuthenticationRequest(); when(authenticationServiceMock.startAuthenticationProcess(any(HttpServletRequest.class))).thenReturn(authenticationRequest); when(authenticationAppToAppServiceMock.createAuthenticationParameters(anyString(), any(AuthenticationRequest.class))).thenReturn(authenticationParameters); Map<String, Object> result = authenticationControllerMock.requestAuthenticationApp(request, APP_TO_APP.type, "relayState"); assertNotNull(result); assertEquals(authenticationParameters.size(), result.size()); verify(authenticationServiceMock, times(1)).startAuthenticationProcess(any(HttpServletRequest.class)); verify(authenticationAppToAppServiceMock, times(1)).createAuthenticationParameters(anyString(), any(AuthenticationRequest.class)); }
public XAConnection xaConnection(XAConnection xaConnection) { return TracingXAConnection.create(xaConnection, this); }
@Test void xaConnection_wrapsInput() { assertThat(jmsTracing.xaConnection(mock(XAConnection.class))) .isInstanceOf(TracingXAConnection.class); }
public void scheduleUpdateIfAbsent(String serviceName, String groupName, String clusters) { if (!asyncQuerySubscribeService) { return; } String serviceKey = ServiceInfo.getKey(NamingUtils.getGroupedName(serviceName, groupName), clusters); if (futureMap.get(serviceKey) != null) { return; } synchronized (futureMap) { if (futureMap.get(serviceKey) != null) { return; } ScheduledFuture<?> future = addTask(new UpdateTask(serviceName, groupName, clusters)); futureMap.put(serviceKey, future); } }
@Test void testScheduleUpdateIfAbsentDuplicate() throws InterruptedException, NacosException { info.setCacheMillis(10000L); nacosClientProperties.setProperty(PropertyKeyConst.NAMING_ASYNC_QUERY_SUBSCRIBE_SERVICE, "true"); serviceInfoUpdateService = new ServiceInfoUpdateService(nacosClientProperties, holder, proxy, notifier); serviceInfoUpdateService.scheduleUpdateIfAbsent(serviceName, group, clusters); serviceInfoUpdateService.scheduleUpdateIfAbsent(serviceName, group, clusters); TimeUnit.MILLISECONDS.sleep(1500); // Only once called Mockito.verify(proxy).queryInstancesOfService(serviceName, group, clusters, false); }
@Override public SchemaResult getKeySchema( final Optional<String> topicName, final Optional<Integer> schemaId, final FormatInfo expectedFormat, final SerdeFeatures serdeFeatures ) { return getSchema(topicName, schemaId, expectedFormat, serdeFeatures, true); }
@Test public void shouldReturnSchemaFromGetKeySchemaIfFound() { // When: final SchemaResult result = supplier.getKeySchema(Optional.of(TOPIC_NAME), Optional.empty(), expectedFormat, SerdeFeatures.of(SerdeFeature.UNWRAP_SINGLES)); // Then: assertThat(result.schemaAndId, is(not(Optional.empty()))); assertThat(result.schemaAndId.get().id, is(SCHEMA_ID)); assertThat(result.schemaAndId.get().columns, is(ImmutableList.of(column1))); }
@Override public boolean tryLock() { return get(tryLockAsync()); }
@Test public void testRedisFailed() { GenericContainer<?> redis = createRedis(); redis.start(); Config config = createConfig(redis); RedissonClient redisson = Redisson.create(config); Assertions.assertThrows(RedisException.class, () -> { RLock lock = redisson.getSpinLock("myLock"); // kill RedisServer while main thread is sleeping. redis.stop(); Thread.sleep(3000); lock.tryLock(5, 10, TimeUnit.SECONDS); }); redisson.shutdown(); }
public RouteContext route(final ConnectionContext connectionContext, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database) { SQLRouteExecutor executor = isNeedAllSchemas(queryContext.getSqlStatementContext().getSqlStatement()) ? new AllSQLRouteExecutor() : new PartialSQLRouteExecutor(rules, props); return executor.route(connectionContext, queryContext, globalRuleMetaData, database); }
@Test void assertRouteFailure() { ConnectionContext connectionContext = mock(ConnectionContext.class); when(connectionContext.getCurrentDatabaseName()).thenReturn(Optional.of("logic_schema")); ShardingSphereMetaData metaData = mock(ShardingSphereMetaData.class); RuleMetaData ruleMetaData = new RuleMetaData(Collections.singleton(new RouteRuleFixture())); ShardingSphereDatabase database = new ShardingSphereDatabase("logic_schema", mock(DatabaseType.class), mock(ResourceMetaData.class, RETURNS_DEEP_STUBS), ruleMetaData, Collections.emptyMap()); when(metaData.containsDatabase("logic_schema")).thenReturn(true); when(metaData.getDatabase("logic_schema")).thenReturn(database); QueryContext queryContext = new QueryContext(mock(CommonSQLStatementContext.class), "SELECT 1", Collections.emptyList(), new HintValueContext(), connectionContext, metaData); assertThrows(UnsupportedOperationException.class, () -> new SQLRouteEngine(Collections.singleton(new RouteFailureRuleFixture()), new ConfigurationProperties(new Properties())).route(new ConnectionContext(Collections::emptySet), queryContext, mock(RuleMetaData.class), database)); }
@Override public Date getStartedAt() { return new Date(state.getStartedAt()); }
@Test public void test_startup_information() { long time = 123_456_789L; when(state.getStartedAt()).thenReturn(time); assertThat(underTest.getStartedAt().getTime()).isEqualTo(time); }
@Override public Comparable convert(Comparable value) { if (!(value instanceof CompositeValue)) { throw new IllegalArgumentException("Cannot convert [" + value + "] to composite"); } CompositeValue compositeValue = (CompositeValue) value; Comparable[] components = compositeValue.getComponents(); Comparable[] converted = new Comparable[components.length]; for (int i = 0; i < components.length; ++i) { Comparable component = components[i]; if (component == NULL || component == NEGATIVE_INFINITY || component == POSITIVE_INFINITY) { converted[i] = component; } else { converted[i] = converters[i].convert(component); } } return new CompositeValue(converted); }
@Test public void testSpecialValuesArePreserved() { assertEquals(value(NULL), converter(INTEGER_CONVERTER).convert(value(NULL))); assertEquals(value(NEGATIVE_INFINITY), converter(INTEGER_CONVERTER).convert(value(NEGATIVE_INFINITY))); assertEquals(value(POSITIVE_INFINITY), converter(INTEGER_CONVERTER).convert(value(POSITIVE_INFINITY))); }
public boolean isSecure() { if(getScheme().equalsIgnoreCase(HTTPS_PREFIX)) { return true; } return false; }
@Test void testIsSecure() { { URI uri = URI.create("http://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); DiscFilterRequest request = new DiscFilterRequest(httpReq); assertFalse(request.isSecure()); } { URI uri = URI.create("https://example.yahoo.com/test"); HttpRequest httpReq = newRequest(uri, HttpRequest.Method.GET, HttpRequest.Version.HTTP_1_1); DiscFilterRequest request = new DiscFilterRequest(httpReq); assertTrue(request.isSecure()); } }
@Override synchronized void setPartitionTime(final TopicPartition partition, final long partitionTime) { wrapped.setPartitionTime(partition, partitionTime); }
@Test public void testSetPartitionTime() { final TopicPartition partition = new TopicPartition("topic", 0); final long partitionTime = 12345678L; synchronizedPartitionGroup.setPartitionTime(partition, partitionTime); verify(wrapped, times(1)).setPartitionTime(partition, partitionTime); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String mysqlDataType = typeDefine.getDataType().toUpperCase(); if (mysqlDataType.endsWith("ZEROFILL")) { mysqlDataType = mysqlDataType.substring(0, mysqlDataType.length() - "ZEROFILL".length()).trim(); } if (typeDefine.isUnsigned() && !(mysqlDataType.endsWith(" UNSIGNED"))) { mysqlDataType = mysqlDataType + " UNSIGNED"; } switch (mysqlDataType) { case MYSQL_NULL: builder.dataType(BasicType.VOID_TYPE); break; case MYSQL_BIT: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.dataType(BasicType.BOOLEAN_TYPE); } else if (typeDefine.getLength() == 1) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(PrimitiveByteArrayType.INSTANCE); // BIT(M) -> BYTE(M/8) long byteLength = typeDefine.getLength() / 8; byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0; builder.columnLength(byteLength); } break; case MYSQL_TINYINT: if (typeDefine.getColumnType().equalsIgnoreCase("tinyint(1)")) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(BasicType.BYTE_TYPE); } break; case MYSQL_TINYINT_UNSIGNED: case MYSQL_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case MYSQL_SMALLINT_UNSIGNED: case MYSQL_MEDIUMINT: case MYSQL_MEDIUMINT_UNSIGNED: case MYSQL_INT: case MYSQL_INTEGER: case MYSQL_YEAR: builder.dataType(BasicType.INT_TYPE); break; case MYSQL_INT_UNSIGNED: case MYSQL_INTEGER_UNSIGNED: case MYSQL_BIGINT: builder.dataType(BasicType.LONG_TYPE); break; case MYSQL_BIGINT_UNSIGNED: DecimalType intDecimalType = new DecimalType(20, 0); builder.dataType(intDecimalType); builder.columnLength(Long.valueOf(intDecimalType.getPrecision())); builder.scale(intDecimalType.getScale()); break; case MYSQL_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_FLOAT_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_FLOAT_UNSIGNED); builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DOUBLE_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_DOUBLE_UNSIGNED); builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DECIMAL: Preconditions.checkArgument(typeDefine.getPrecision() > 0); DecimalType decimalType; if (typeDefine.getPrecision() > DEFAULT_PRECISION) { log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL); decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } else { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case MYSQL_DECIMAL_UNSIGNED: Preconditions.checkArgument(typeDefine.getPrecision() > 0); log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL_UNSIGNED); DecimalType decimalUnsignedType = new DecimalType( typeDefine.getPrecision().intValue() + 1, typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); builder.dataType(decimalUnsignedType); builder.columnLength(Long.valueOf(decimalUnsignedType.getPrecision())); builder.scale(decimalUnsignedType.getScale()); break; case MYSQL_ENUM: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(100L); } else { builder.columnLength(typeDefine.getLength()); } break; case MYSQL_CHAR: case MYSQL_VARCHAR: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_TINYTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_TEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_JSON: builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_BINARY: case MYSQL_VARBINARY: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(1L); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_TINYBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_GEOMETRY: builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case MYSQL_TIME: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case MYSQL_DATETIME: case MYSQL_TIMESTAMP: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.MYSQL, mysqlDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertDate() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder().name("test").columnType("date").dataType("date").build(); Column column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(LocalTimeType.LOCAL_DATE_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
@Override public UnderFileSystem create(String path, UnderFileSystemConfiguration conf) { Preconditions.checkNotNull(path, "Unable to create UnderFileSystem instance:" + " URI path should not be null"); if (checkOBSCredentials(conf)) { try { return OBSUnderFileSystem.createInstance(new AlluxioURI(path), conf); } catch (Exception e) { throw Throwables.propagate(e); } } String err = "OBS credentials or endpoint not available, cannot create OBS Under File System."; throw Throwables.propagate(new IOException(err)); }
@Test public void createInstanceWithPath() { UnderFileSystem ufs = mFactory.create(mObsPath, mConf); Assert.assertNotNull(ufs); Assert.assertTrue(ufs instanceof OBSUnderFileSystem); }
public void lock(long lockedByPipelineId) { this.lockedByPipelineId = lockedByPipelineId; locked = true; }
@Test void shouldNotBeEqualToAnotherPipelineStateIfAllAttributesDoNotMatch() { PipelineState pipelineState1 = new PipelineState("p", new StageIdentifier("p", 1, "1", 1L, "s", "1")); PipelineState pipelineState2 = new PipelineState("p", new StageIdentifier("p", 1, "1", 1L, "s", "1")); pipelineState1.lock(1); assertThat(pipelineState2).isNotEqualTo(pipelineState1); }
public static ImmutableList<String> splitToLowercaseTerms(String identifierName) { if (ONLY_UNDERSCORES.matcher(identifierName).matches()) { // Degenerate case of names which contain only underscore return ImmutableList.of(identifierName); } return TERM_SPLITTER .splitToStream(identifierName) .map(String::toLowerCase) .collect(toImmutableList()); }
@Test public void splitToLowercaseTerms_separatesTerms_withLowerCamelCase() { String identifierName = "camelCaseTerm"; ImmutableList<String> terms = NamingConventions.splitToLowercaseTerms(identifierName); assertThat(terms).containsExactly("camel", "case", "term"); }
public final void isNotSameInstanceAs(@Nullable Object unexpected) { if (actual == unexpected) { /* * We use actualCustomStringRepresentation() because it might be overridden to be better than * actual.toString()/unexpected.toString(). */ failWithoutActual( fact("expected not to be specific instance", actualCustomStringRepresentation())); } }
@Test public void isNotSameInstanceAsWithDifferentTypesAndSameToString() { Object a = "true"; Object b = true; assertThat(a).isNotSameInstanceAs(b); }
public TopicList getTopicListFromNameServer(final long timeoutMillis) throws RemotingException, MQClientException, InterruptedException { RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_ALL_TOPIC_LIST_FROM_NAMESERVER, null); RemotingCommand response = this.remotingClient.invokeSync(null, request, timeoutMillis); assert response != null; switch (response.getCode()) { case ResponseCode.SUCCESS: { byte[] body = response.getBody(); if (body != null) { return TopicList.decode(body, TopicList.class); } } default: break; } throw new MQClientException(response.getCode(), response.getRemark()); }
@Test public void assertGetTopicListFromNameServer() throws RemotingException, InterruptedException, MQClientException { mockInvokeSync(); TopicList responseBody = new TopicList(); responseBody.setBrokerAddr(defaultBrokerAddr); responseBody.getTopicList().add(defaultTopic); setResponseBody(responseBody); TopicList actual = mqClientAPI.getTopicListFromNameServer(defaultTimeout); assertNotNull(actual); assertEquals(1, actual.getTopicList().size()); assertEquals(defaultBrokerAddr, actual.getBrokerAddr()); }
@Override public boolean acquireLock(List<RowLock> rowLock) { return false; }
@Test public void testAcquireLock() { LocalDBLocker locker = new LocalDBLocker(); List<RowLock> rowLocks = new ArrayList<>(); boolean result = locker.acquireLock(rowLocks); // Assert the result of the acquireLock method Assertions.assertFalse(result); }