focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Iterable<RedisClusterNode> clusterGetNodes() { return read(null, StringCodec.INSTANCE, CLUSTER_NODES); }
@Test public void testClusterGetNodes() { Iterable<RedisClusterNode> nodes = connection.clusterGetNodes(); assertThat(nodes).hasSize(6); for (RedisClusterNode redisClusterNode : nodes) { assertThat(redisClusterNode.getLinkState()).isNotNull(); assertThat(redisClusterNode.getFlags()).isNotEmpty(); assertThat(redisClusterNode.getHost()).isNotNull(); assertThat(redisClusterNode.getPort()).isNotNull(); assertThat(redisClusterNode.getId()).isNotNull(); assertThat(redisClusterNode.getType()).isNotNull(); if (redisClusterNode.getType() == NodeType.MASTER) { assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty(); } else { assertThat(redisClusterNode.getMasterId()).isNotNull(); } } }
@Override public List<Wizard> findWizardsWithSpellbook(String name) { var spellbook = spellbookDao.findByName(name); return new ArrayList<>(spellbook.getWizards()); }
@Test void testFindWizardsWithSpellbook() { final var bookname = "bookname"; final var spellbook = mock(Spellbook.class); final var wizards = Set.of( mock(Wizard.class), mock(Wizard.class), mock(Wizard.class) ); when(spellbook.getWizards()).thenReturn(wizards); final var spellbookDao = mock(SpellbookDao.class); when(spellbookDao.findByName(bookname)).thenReturn(spellbook); final var wizardDao = mock(WizardDao.class); final var spellDao = mock(SpellDao.class); final var service = new MagicServiceImpl(wizardDao, spellbookDao, spellDao); verifyNoInteractions(wizardDao, spellbookDao, spellDao, spellbook); final var result = service.findWizardsWithSpellbook(bookname); verify(spellbookDao).findByName(bookname); verify(spellbook).getWizards(); assertNotNull(result); assertEquals(3, result.size()); verifyNoMoreInteractions(wizardDao, spellbookDao, spellDao); }
public void dispatch(EvaluatedRule evaluatedRule) { for (ArchUnitExtension extension : extensionLoader.getAll()) { dispatch(evaluatedRule, extension); } }
@Test public void only_dispatches_to_enabled_extensions() { TestExtension extensionOne = newExtensionWithEnabled("one", false); TestExtension extensionTwo = newExtensionWithEnabled("two", true); when(extensionLoader.getAll()).thenReturn(ImmutableSet.of(extensionOne, extensionTwo)); logTestRule.watch(ArchUnitExtensions.class, Level.DEBUG); extensions.dispatch(evaluatedRule); assertThat(extensionOne.wasNeverCalled()).as("Extension 'one' was never called").isTrue(); assertThat(extensionTwo.wasNeverCalled()).as("Extension 'two' was never called").isFalse(); logTestRule.assertLogMessage(Level.DEBUG, "Extension 'one' is disabled, skipping... (to enable this extension, configure extension.one.enabled=true)"); }
public String getParentVersion() { return parentVersion; }
@Test public void testGetParentVersion() { Model instance = new Model(); instance.setParentVersion(""); String expResult = ""; String result = instance.getParentVersion(); assertEquals(expResult, result); }
@SuppressWarnings("unused") // Part of required API. public void execute( final ConfiguredStatement<InsertValues> statement, final SessionProperties sessionProperties, final KsqlExecutionContext executionContext, final ServiceContext serviceContext ) { final InsertValues insertValues = statement.getStatement(); final MetaStore metaStore = executionContext.getMetaStore(); final KsqlConfig config = statement.getSessionConfig().getConfig(true); final DataSource dataSource = getDataSource(config, metaStore, insertValues); validateInsert(insertValues.getColumns(), dataSource); final ProducerRecord<byte[], byte[]> record = buildRecord(statement, metaStore, dataSource, serviceContext); try { producer.sendRecord(record, serviceContext, config.getProducerClientConfigProps()); } catch (final TopicAuthorizationException e) { // TopicAuthorizationException does not give much detailed information about why it failed, // except which topics are denied. Here we just add the ACL to make the error message // consistent with other authorization error messages. final Exception rootCause = new KsqlTopicAuthorizationException( AclOperation.WRITE, e.unauthorizedTopics() ); throw new KsqlException(createInsertFailedExceptionMessage(insertValues), rootCause); } catch (final ClusterAuthorizationException e) { // ClusterAuthorizationException is thrown when using idempotent producers // and either a topic write permission or a cluster-level idempotent write // permission (only applicable for broker versions no later than 2.8) is // missing. In this case, we include additional context to help the user // distinguish this type of failure from other permissions exceptions // such as the ones thrown above when TopicAuthorizationException is caught. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } catch (final KafkaException e) { if (e.getCause() != null && e.getCause() instanceof ClusterAuthorizationException) { // The error message thrown when an idempotent producer is missing permissions // is (nondeterministically) inconsistent: it is either a raw ClusterAuthorizationException, // as checked for above, or a ClusterAuthorizationException wrapped inside a KafkaException. // ksqlDB handles these two the same way, accordingly. // See https://issues.apache.org/jira/browse/KAFKA-14138 for more. throw new KsqlException( createInsertFailedExceptionMessage(insertValues), createClusterAuthorizationExceptionRootCause(dataSource) ); } else { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } } catch (final Exception e) { throw new KsqlException(createInsertFailedExceptionMessage(insertValues), e); } }
@Test public void shouldThrowOnTablesWithNoKeyFieldAndNoRowKeyProvided() { // Given: givenSourceTableWithSchema(SerdeFeatures.of(), SerdeFeatures.of()); final ConfiguredStatement<InsertValues> statement = givenInsertValues( ImmutableList.of(COL0, COL1), ImmutableList.of( new StringLiteral("str"), new LongLiteral(2L)) ); // When: final Exception e = assertThrows( KsqlException.class, () -> executor.execute(statement, mock(SessionProperties.class), engine, serviceContext) ); // Then: assertThat(e.getMessage(), containsString( "Failed to insert values into 'TOPIC'. Value for primary key column(s) k0 is required for tables")); }
static void bestEffortSetAttribute(TransformerFactory transformerFactory, AtomicBoolean flag, String name, Object value) { if (flag.get()) { try { transformerFactory.setAttribute(name, value); } catch (Throwable t) { flag.set(false); LOG.debug("Issue setting TransformerFactory attribute {}: {}", name, t.toString()); } } }
@Test public void testBestEffortSetAttribute() throws Exception { TransformerFactory factory = TransformerFactory.newInstance(); AtomicBoolean flag1 = new AtomicBoolean(true); XMLUtils.bestEffortSetAttribute(factory, flag1, "unsupportedAttribute false", "abc"); Assert.assertFalse("unexpected attribute results in return of false?", flag1.get()); AtomicBoolean flag2 = new AtomicBoolean(true); XMLUtils.bestEffortSetAttribute(factory, flag2, XMLConstants.ACCESS_EXTERNAL_DTD, ""); Assert.assertTrue("expected attribute results in return of true?", flag2.get()); AtomicBoolean flag3 = new AtomicBoolean(false); XMLUtils.bestEffortSetAttribute(factory, flag3, XMLConstants.ACCESS_EXTERNAL_DTD, ""); Assert.assertFalse("expected attribute results in return of false if input flag is false?", flag3.get()); }
public void set(PropertyKey key, Object value) { set(key, value, Source.RUNTIME); }
@Test public void getMalformedEnum() { mThrown.expect(IllegalArgumentException.class); mConfiguration.set(PropertyKey.USER_FILE_READ_TYPE_DEFAULT, TestEnum.VALUE); }
@Override public long getOffsetInQueueByTime(String topic, int queueId, long timestamp) { return getOffsetInQueueByTime(topic, queueId, timestamp, BoundaryType.LOWER); }
@Test public void testGetOffsetInQueueByTime() { Properties properties = new Properties(); properties.setProperty("tieredStorageLevel", "FORCE"); configuration.update(properties); Mockito.when(fetcher.getOffsetInQueueByTime(anyString(), anyInt(), anyLong(), eq(BoundaryType.LOWER))).thenReturn(1L); Mockito.when(defaultStore.getOffsetInQueueByTime(anyString(), anyInt(), anyLong())).thenReturn(2L); Mockito.when(defaultStore.getEarliestMessageTime()).thenReturn(100L); Assert.assertEquals(1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 1000, BoundaryType.LOWER)); Assert.assertEquals(1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); Mockito.when(fetcher.getOffsetInQueueByTime(anyString(), anyInt(), anyLong(), eq(BoundaryType.LOWER))).thenReturn(-1L); Assert.assertEquals(-1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0)); Assert.assertEquals(-1L, currentStore.getOffsetInQueueByTime(mq.getTopic(), mq.getQueueId(), 0, BoundaryType.LOWER)); }
public String getCallSite(CallSiteReference callSiteReference) { StringWriter writer = new StringWriter(); try { getWriter(writer).writeCallSite(callSiteReference); } catch (IOException e) { throw new AssertionError("Unexpected IOException"); } return writer.toString(); }
@Test public void testGetCallSite() throws IOException { TestDexFormatter formatter = new TestDexFormatter(); Assert.assertEquals( "call site", formatter.getCallSite(mock(CallSiteReference.class))); }
@Override protected Class<?> loadClass(final String name, final boolean resolve) throws ClassNotFoundException { synchronized (getClassLoadingLock(name)) { try { final Class<?> loadedClass = findLoadedClass(name); if (loadedClass != null) { return resolveIfNeeded(resolve, loadedClass); } if (isComponentFirstClass(name)) { return loadClassFromComponentFirst(name, resolve); } if (isOwnerFirstClass(name)) { return loadClassFromOwnerFirst(name, resolve); } // making this behavior configurable (component-only/component-first/owner-first) // would allow this class to subsume the FlinkUserCodeClassLoader (with an added // exception handler) return loadClassFromComponentOnly(name, resolve); } catch (ClassNotFoundException e) { // If we know the package of this class Optional<String> foundAssociatedModule = knownPackagePrefixesModuleAssociation.entrySet().stream() .filter(entry -> name.startsWith(entry.getKey())) .map(Map.Entry::getValue) .findFirst(); if (foundAssociatedModule.isPresent()) { throw new ClassNotFoundException( String.format( "Class '%s' not found. Perhaps you forgot to add the module '%s' to the classpath?", name, foundAssociatedModule.get()), e); } throw e; } } }
@Test void testOwnerFirstClassFoundIgnoresComponent() throws Exception { TestUrlClassLoader owner = new TestUrlClassLoader(CLASS_TO_LOAD.getName(), CLASS_RETURNED_BY_OWNER); final ComponentClassLoader componentClassLoader = new ComponentClassLoader( new URL[0], owner, new String[] {CLASS_TO_LOAD.getName()}, new String[0], Collections.emptyMap()); final Class<?> loadedClass = componentClassLoader.loadClass(CLASS_TO_LOAD.getName()); assertThat(loadedClass).isSameAs(CLASS_RETURNED_BY_OWNER); }
@VisibleForTesting static AbsoluteUnixPath getAppRootChecked( RawConfiguration rawConfiguration, ProjectProperties projectProperties) throws InvalidAppRootException { String appRoot = rawConfiguration.getAppRoot(); if (appRoot.isEmpty()) { appRoot = projectProperties.isWarProject() ? DEFAULT_JETTY_APP_ROOT : JavaContainerBuilder.DEFAULT_APP_ROOT; } try { return AbsoluteUnixPath.get(appRoot); } catch (IllegalArgumentException ex) { throw new InvalidAppRootException(appRoot, appRoot, ex); } }
@Test public void testGetAppRootChecked_errorOnNonAbsolutePath() { when(rawConfiguration.getAppRoot()).thenReturn("relative/path"); Exception exception = assertThrows( InvalidAppRootException.class, () -> PluginConfigurationProcessor.getAppRootChecked( rawConfiguration, projectProperties)); assertThat(exception).hasMessageThat().isEqualTo("relative/path"); }
public static <UserT, DestinationT, OutputT> WriteFiles<UserT, DestinationT, OutputT> to( FileBasedSink<UserT, DestinationT, OutputT> sink) { checkArgument(sink != null, "sink can not be null"); return new AutoValue_WriteFiles.Builder<UserT, DestinationT, OutputT>() .setSink(sink) .setComputeNumShards(null) .setNumShardsProvider(null) .setWindowedWrites(false) .setWithAutoSharding(false) .setMaxNumWritersPerBundle(DEFAULT_MAX_NUM_WRITERS_PER_BUNDLE) .setSideInputs(sink.getDynamicDestinations().getSideInputs()) .setSkipIfEmpty(false) .setBadRecordErrorHandler(new DefaultErrorHandler<>()) .setBadRecordRouter(BadRecordRouter.THROWING_ROUTER) .build(); }
@Test public void testDisplayData() { DynamicDestinations<String, Void, String> dynamicDestinations = DynamicFileDestinations.constant( DefaultFilenamePolicy.fromParams( new Params() .withBaseFilename( getBaseOutputDirectory() .resolve("file", StandardResolveOptions.RESOLVE_FILE)) .withShardTemplate("-SS-of-NN"))); SimpleSink<Void> sink = new SimpleSink<Void>( getBaseOutputDirectory(), dynamicDestinations, Compression.UNCOMPRESSED) { @Override public void populateDisplayData(DisplayData.Builder builder) { builder.add(DisplayData.item("foo", "bar")); } }; WriteFiles<String, ?, String> write = WriteFiles.to(sink); DisplayData displayData = DisplayData.from(write); assertThat(displayData, hasDisplayItem("sink", sink.getClass())); assertThat(displayData, includesDisplayDataFor("sink", sink)); }
public static DirectoryLock lockForDirectory(File dir, ILogger logger) { File lockFile = new File(dir, FILE_NAME); FileChannel channel = openChannel(lockFile); FileLock lock = acquireLock(lockFile, channel); if (logger.isFineEnabled()) { logger.fine("Acquired lock on " + lockFile.getAbsolutePath()); } return new DirectoryLock(dir, channel, lock, logger); }
@Test public void test_lockForDirectory_forNonExistingDir() { assertThatThrownBy(() -> lockForDirectory(new File(UuidUtil.newUnsecureUuidString()), logger)) .isInstanceOf(HazelcastException.class) .hasCauseInstanceOf(FileNotFoundException.class); }
@Override public void verify(byte[] data, byte[] signature, MessageDigest digest) { verify(data, new EcSignature(signature), digest); }
@Test public void shouldValidateSignatureWithoutSignature() { verify(D, Q); }
public CompletionStage<Void> migrate(MigrationSet set) { InterProcessLock lock = new InterProcessSemaphoreMutex(client.unwrap(), ZKPaths.makePath(lockPath, set.id())); CompletionStage<Void> lockStage = lockAsync(lock, lockMax.toMillis(), TimeUnit.MILLISECONDS, executor); return lockStage.thenCompose(__ -> runMigrationInLock(lock, set)); }
@Test public void testTransactionForBadOps() throws Exception { CuratorOp op1 = client.transactionOp().create().forPath("/test2", "something".getBytes()); CuratorOp op2 = client.transactionOp().create().forPath("/a/b/c/d"); Migration migration = () -> Arrays.asList(op1, op2); MigrationSet migrationSet = MigrationSet.build("1", Collections.singletonList(migration)); try { complete(manager.migrate(migrationSet)); fail("Should throw"); } catch (Throwable e) { assertTrue(Throwables.getRootCause(e) instanceof KeeperException.NoNodeException); } assertNull(client.unwrap().checkExists().forPath("/test")); }
@Override public PageResult<BrokerageUserDO> getBrokerageUserPage(BrokerageUserPageReqVO pageReqVO) { List<Long> childIds = getChildUserIdsByLevel(pageReqVO.getBindUserId(), pageReqVO.getLevel()); // 有”绑定用户编号“查询条件时,没有查到下级会员,直接返回空 if (pageReqVO.getBindUserId() != null && CollUtil.isEmpty(childIds)) { return PageResult.empty(); } return brokerageUserMapper.selectPage(pageReqVO, childIds); }
@Test @Disabled // TODO 请修改 null 为需要的值,然后删除 @Disabled 注解 public void testGetBrokerageUserPage() { // mock 数据 BrokerageUserDO dbBrokerageUser = randomPojo(BrokerageUserDO.class, o -> { // 等会查询到 o.setBindUserId(null); o.setBrokerageEnabled(null); o.setCreateTime(null); }); brokerageUserMapper.insert(dbBrokerageUser); // 测试 brokerageUserId 不匹配 brokerageUserMapper.insert(cloneIgnoreId(dbBrokerageUser, o -> o.setBindUserId(null))); // 测试 brokerageEnabled 不匹配 brokerageUserMapper.insert(cloneIgnoreId(dbBrokerageUser, o -> o.setBrokerageEnabled(null))); // 测试 createTime 不匹配 brokerageUserMapper.insert(cloneIgnoreId(dbBrokerageUser, o -> o.setCreateTime(null))); // 准备参数 BrokerageUserPageReqVO reqVO = new BrokerageUserPageReqVO(); reqVO.setBindUserId(null); reqVO.setBrokerageEnabled(null); reqVO.setCreateTime(buildBetweenTime(2023, 2, 1, 2023, 2, 28)); // 调用 PageResult<BrokerageUserDO> pageResult = brokerageUserService.getBrokerageUserPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbBrokerageUser, pageResult.getList().get(0)); }
@Operation(summary = "Get single metadatafile") @PostMapping(value = "metadata", consumes = "application/json", produces = "application/json") public SamlMetadataResponse resolveMetadata(@Valid @RequestBody SamlMetadataRequest request) { return metadataRetrieverService.resolveSamlMetadata(request); }
@Test public void resolveSamlMetadata() { SamlMetadataRequest request = new SamlMetadataRequest(); SamlMetadataResponse response = new SamlMetadataResponse(); response.setRequestStatus("OK"); when(metadataRetrieverServiceMock.resolveSamlMetadata(any(SamlMetadataRequest.class))).thenReturn(response); SamlMetadataResponse result = controllerMock.resolveMetadata(request); verify(metadataRetrieverServiceMock, times(1)).resolveSamlMetadata(any(SamlMetadataRequest.class)); assertEquals(result.getRequestStatus(), response.getRequestStatus()); }
@Override public int getMaxTableNameLength() { return 0; }
@Test void assertGetMaxTableNameLength() { assertThat(metaData.getMaxTableNameLength(), is(0)); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyEntriesIn_heterogeneousMultimap_failsWithSameToString() throws Exception { expectFailureWhenTestingThat(ImmutableMultimap.of(1, "a", 1, "b", 2L, "c")) .containsExactlyEntriesIn(ImmutableMultimap.of(1L, "a", 1L, "b", 2, "c")); assertFailureKeys("missing", "unexpected", "---", "expected", "but was"); assertFailureValue( "missing", "[1=a (Map.Entry<java.lang.Long, java.lang.String>), " + "1=b (Map.Entry<java.lang.Long, java.lang.String>), " + "2=c (Map.Entry<java.lang.Integer, java.lang.String>)]"); assertFailureValue( "unexpected", "[1=a (Map.Entry<java.lang.Integer, java.lang.String>), " + "1=b (Map.Entry<java.lang.Integer, java.lang.String>), " + "2=c (Map.Entry<java.lang.Long, java.lang.String>)]"); }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldRemoveUnusedStandbyTaskFromStateUpdaterAndCloseCleanly() { final StandbyTask standbyTaskToClose = standbyTask(taskId02, taskId02ChangelogPartitions) .inState(State.RUNNING) .withInputPartitions(taskId02Partitions).build(); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); when(stateUpdater.getTasks()).thenReturn(mkSet(standbyTaskToClose)); final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>(); when(stateUpdater.remove(standbyTaskToClose.id())).thenReturn(future); future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToClose)); taskManager.handleAssignment(Collections.emptyMap(), Collections.emptyMap()); verify(standbyTaskToClose).suspend(); verify(standbyTaskToClose).closeClean(); verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap()); verify(standbyTaskCreator).createTasks(Collections.emptyMap()); }
public AmazonInfo build() { return new AmazonInfo(Name.Amazon.name(), metadata); }
@Test public void payloadWithNameAfterMetadata() throws IOException { String json = "{" + " \"@class\": \"com.netflix.appinfo.AmazonInfo\"," + " \"metadata\": {" + " \"instance-id\": \"i-12345\"" + " }," + " \"name\": \"Amazon\"" + "}"; AmazonInfo info = newMapper().readValue(json, AmazonInfo.class); AmazonInfo expected = AmazonInfo.Builder.newBuilder() .addMetadata(AmazonInfo.MetaDataKey.instanceId, "i-12345") .build(); Assert.assertEquals(expected, nonCompact(info)); }
public static ParseResult parse(String text) { Map<String, String> localProperties = new HashMap<>(); String intpText = ""; String scriptText = null; Matcher matcher = REPL_PATTERN.matcher(text); if (matcher.find()) { String headingSpace = matcher.group(1); intpText = matcher.group(2); int startPos = headingSpace.length() + intpText.length() + 1; if (startPos < text.length() && text.charAt(startPos) == '(') { startPos = parseLocalProperties(text, startPos, localProperties); } scriptText = text.substring(startPos); } else { intpText = ""; scriptText = text; } return new ParseResult(intpText, removeLeadingWhiteSpaces(scriptText), localProperties); }
@Test void testParagraphInterpreterWithoutProperties() { ParagraphTextParser.ParseResult parseResult = ParagraphTextParser.parse("%spark() sc.version"); assertEquals("spark", parseResult.getIntpText()); assertEquals(0, parseResult.getLocalProperties().size()); assertEquals("sc.version", parseResult.getScriptText()); }
@Override public Iterable<RedisClusterNode> clusterGetNodes() { return read(null, StringCodec.INSTANCE, CLUSTER_NODES); }
@Test public void testClusterGetNodes() { Iterable<RedisClusterNode> nodes = connection.clusterGetNodes(); assertThat(nodes).hasSize(6); for (RedisClusterNode redisClusterNode : nodes) { assertThat(redisClusterNode.getLinkState()).isNotNull(); assertThat(redisClusterNode.getFlags()).isNotEmpty(); assertThat(redisClusterNode.getHost()).isNotNull(); assertThat(redisClusterNode.getPort()).isNotNull(); assertThat(redisClusterNode.getId()).isNotNull(); assertThat(redisClusterNode.getType()).isNotNull(); if (redisClusterNode.getType() == NodeType.MASTER) { assertThat(redisClusterNode.getSlotRange().getSlots()).isNotEmpty(); } else { assertThat(redisClusterNode.getMasterId()).isNotNull(); } } }
@Bean public CorsFilter corsFilter() { UrlBasedCorsConfigurationSource source = new UrlBasedCorsConfigurationSource(); CorsConfiguration config = jHipsterProperties.getCors(); if (!CollectionUtils.isEmpty(config.getAllowedOrigins()) || !CollectionUtils.isEmpty(config.getAllowedOriginPatterns())) { log.debug("Registering CORS filter"); source.registerCorsConfiguration("/api/**", config); source.registerCorsConfiguration("/management/**", config); source.registerCorsConfiguration("/v3/api-docs", config); source.registerCorsConfiguration("/swagger-ui/**", config); } return new CorsFilter(source); }
@Test void shouldCorsFilterDeactivatedForEmptyAllowedOrigins() throws Exception { props.getCors().setAllowedOrigins(new ArrayList<>()); MockMvc mockMvc = MockMvcBuilders.standaloneSetup(new WebConfigurerTestController()).addFilters(webConfigurer.corsFilter()).build(); mockMvc .perform(get("/api/test-cors").header(HttpHeaders.ORIGIN, "other.domain.com")) .andExpect(status().isOk()) .andExpect(header().doesNotExist(HttpHeaders.ACCESS_CONTROL_ALLOW_ORIGIN)); }
public static NoMappingAction noAction() { return new NoMappingAction(); }
@Test public void testNoActionMethod() { MappingAction mappingAction = MappingActions.noAction(); checkAndConvert(mappingAction, MappingAction.Type.NO_ACTION, NoMappingAction.class); }
public static boolean regionMatches(final CharSequence cs, final boolean ignoreCase, final int thisStart, final CharSequence substring, final int start, final int length) { if (cs instanceof String && substring instanceof String) { return ((String) cs).regionMatches(ignoreCase, thisStart, (String) substring, start, length); } int index1 = thisStart; int index2 = start; int tmpLen = length; while (tmpLen-- > 0) { final char c1 = cs.charAt(index1++); final char c2 = substring.charAt(index2++); if (c1 == c2) { continue; } if (!ignoreCase) { return false; } // The same check as in String.regionMatches(): if (Character.toUpperCase(c1) != Character.toUpperCase(c2) && Character.toLowerCase(c1) != Character .toLowerCase(c2)) { return false; } } return true; }
@Test void testRegionMatchesNotEqualsCaseInsensitiveForNonString() { assertFalse(StringUtils.regionMatches(new StringBuilder("abc"), true, 0, "xCab", 1, 3)); }
public static String getShortClassName(String className) { final List<String> packages = StrUtil.split(className, CharUtil.DOT); if (null == packages || packages.size() < 2) { return className; } final int size = packages.size(); final StringBuilder result = StrUtil.builder(); result.append(packages.get(0).charAt(0)); for (int i = 1; i < size - 1; i++) { result.append(CharUtil.DOT).append(packages.get(i).charAt(0)); } result.append(CharUtil.DOT).append(packages.get(size - 1)); return result.toString(); }
@Test public void getShortClassNameTest() { String className = "cn.hutool.core.util.StrUtil"; String result = ClassUtil.getShortClassName(className); assertEquals("c.h.c.u.StrUtil", result); }
private Mono<ServerResponse> getPostNavigationByName(ServerRequest request) { final var name = request.pathVariable("name"); return postFinder.cursor(name) .doOnNext(result -> { if (result.getCurrent() == null) { throw new NotFoundException("Post not found"); } }) .flatMap(result -> ServerResponse.ok().contentType(MediaType.APPLICATION_JSON) .bodyValue(result) ); }
@Test public void testGetPostNavigationByName() { Metadata metadata = new Metadata(); metadata.setName("test"); NavigationPostVo navigation = NavigationPostVo.builder() .current(PostVo.builder().metadata(metadata).build()) .build(); when(postFinder.cursor(anyString())) .thenReturn(Mono.just(navigation)); webClient.get().uri("/posts/{name}/navigation", "test") .exchange() .expectStatus().isOk() .expectHeader().contentType(MediaType.APPLICATION_JSON) .expectBody() .jsonPath("$.current.metadata.name").isEqualTo("test"); verify(postFinder).cursor(anyString()); }
@Override public Collection<Integer> getOutboundPorts(EndpointQualifier endpointQualifier) { final AdvancedNetworkConfig advancedNetworkConfig = node.getConfig().getAdvancedNetworkConfig(); if (advancedNetworkConfig.isEnabled()) { EndpointConfig endpointConfig = advancedNetworkConfig.getEndpointConfigs().get(endpointQualifier); final Collection<Integer> outboundPorts = endpointConfig != null ? endpointConfig.getOutboundPorts() : Collections.emptyList(); final Collection<String> outboundPortDefinitions = endpointConfig != null ? endpointConfig.getOutboundPortDefinitions() : Collections.emptyList(); return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions); } final NetworkConfig networkConfig = node.getConfig().getNetworkConfig(); final Collection<Integer> outboundPorts = networkConfig.getOutboundPorts(); final Collection<String> outboundPortDefinitions = networkConfig.getOutboundPortDefinitions(); return AddressUtil.getOutboundPorts(outboundPorts, outboundPortDefinitions); }
@Test public void testGetOutboundPorts_acceptsSemicolonAsASeparator() { networkConfig.addOutboundPortDefinition("29000;29001"); Collection<Integer> outboundPorts = serverContext.getOutboundPorts(MEMBER); assertThat(outboundPorts).hasSize(2); assertThat(outboundPorts).containsExactlyInAnyOrder(29000, 29001); }
@Override @Nullable public Object convert(String value) { if (value == null || value.isEmpty()) { return null; } final Parser parser = new Parser(timeZone.toTimeZone()); final List<DateGroup> r = parser.parse(value); if (r.isEmpty() || r.get(0).getDates().isEmpty()) { return null; } return new DateTime(r.get(0).getDates().get(0), timeZone); }
@Test public void convertUsesEtcUTCIfTimeZoneSettingIsEmpty() throws Exception { Converter c = new FlexibleDateConverter(ImmutableMap.<String, Object>of("time_zone", "")); final DateTime dateOnly = (DateTime) c.convert("2014-3-12"); assertThat(dateOnly.getZone()).isEqualTo(DateTimeZone.forID("Etc/UTC")); }
public void assignUsers( List<Object> usersToAssign ) { List<UIRepositoryObjectAcl> previousVal = new ArrayList<UIRepositoryObjectAcl>(); previousVal.addAll( getSelectedUserList() ); List<UIRepositoryObjectAcl> assignList = new ArrayList<UIRepositoryObjectAcl>(); for ( Object user : usersToAssign ) { if ( user instanceof String ) assignList.add( assignUser( (String) user ) ); } this.firePropertyChange( "selectedUserList", null, getSelectedUserList() ); //$NON-NLS-1$ setSelectedAssignedUsers( assignList ); setSelectedAvailableUsers( new ArrayList<String>() ); }
@Test public void testAssignUsers() { UIRepositoryObjectAcl selectedUserAcl = new UIRepositoryObjectAcl( createUserAce( USER1 ) ); repositoryObjectAcls.addAcl( selectedUserAcl ); repositoryObjectAclModel.setAclsList( defaultUserNameList, null ); List<Object> objectUserList = Arrays.asList( new Object[] { USER2 } ); repositoryObjectAclModel.assignUsers( objectUserList ); assertStringListMatches( Arrays.asList( new String[] { USER3 } ), repositoryObjectAclModel.getAvailableUserList() ); assertNameToAclListMatches( Arrays.asList( new String[] { USER2 } ), repositoryObjectAclModel .getSelectedAssignedUsers() ); assertNameToAclListMatches( Arrays.asList( new String[] { USER2 } ), repositoryObjectAclModel.getAclsToAdd() ); repositoryObjectAclModel.updateSelectedAcls(); assertNameToAclListMatches( Arrays.asList( new String[] { USER1, USER2 } ), repositoryObjectAclModel .getSelectedAcls().getAcls() ); // For some reason, updateSelectedAcls does not clear aclsToAdd. After the update USER2 is still present in // the aclsToAdd list. This probably is not an issue because the interface reloads. For now, I will clear // manually now so I can exercise some unassign code. repositoryObjectAclModel.getAclsToAdd().clear(); // Unassign the pending USER2 and the pre-assigned USER1 UIRepositoryObjectAcl user2Acl = repositoryObjectAclModel.getSelectedUser( 1 ); repositoryObjectAclModel.unassign( Arrays.asList( new Object[] { user2Acl, selectedUserAcl } ) ); assertEquals( 0, repositoryObjectAclModel.getSelectedAssignedUsers().size() ); assertStringListMatches( defaultUserNameList, repositoryObjectAclModel.getAvailableUserList() ); repositoryObjectAclModel.updateSelectedAcls(); assertEquals( 0, repositoryObjectAclModel.getSelectedAcls().getAcls().size() ); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) { IdentityProvider provider = resolveProviderOrHandleResponse(request, response, CALLBACK_PATH); if (provider != null) { handleProvider(request, response, provider); } }
@Test public void redirect_with_context_when_failing_because_of_Exception() throws Exception { when(request.getContextPath()).thenReturn("/sonarqube"); FailWithIllegalStateException identityProvider = new FailWithIllegalStateException(); when(request.getRequestURI()).thenReturn("/oauth2/callback/" + identityProvider.getKey()); identityProviderRepository.addIdentityProvider(identityProvider); underTest.doFilter(request, response, chain); verify(response).sendRedirect("/sonarqube/sessions/unauthorized"); }
public long getLangId() { return lang_id; }
@Test public void getLangId() { assertEquals(TestParameters.VP_LANGUAGE_ID, chmItsfHeader.getLangId()); }
public static ServerConfigChangeEvent newEvent() { return new ServerConfigChangeEvent(); }
@Test void test() { Event event = ServerConfigChangeEvent.newEvent(); assertTrue(event instanceof ServerConfigChangeEvent); }
public void execute() { new PathAwareCrawler<>( FormulaExecutorComponentVisitor.newBuilder(metricRepository, measureRepository).buildFor(formulas)) .visit(treeRootHolder.getReportTreeRoot()); }
@Test public void compute_and_aggregate_duplicated_lines() { addDuplicatedBlock(FILE_1_REF, 10); addDuplicatedBlock(FILE_2_REF, 9); addDuplicatedBlock(FILE_4_REF, 7); underTest.execute(); assertRawMeasureValue(FILE_1_REF, DUPLICATED_LINES_KEY, 10); assertRawMeasureValue(FILE_2_REF, DUPLICATED_LINES_KEY, 9); assertRawMeasureValue(FILE_3_REF, DUPLICATED_LINES_KEY, 0); assertRawMeasureValue(FILE_4_REF, DUPLICATED_LINES_KEY, 7); assertRawMeasureValue(DIRECTORY_REF, DUPLICATED_LINES_KEY, 19); assertRawMeasureValue(ROOT_REF, DUPLICATED_LINES_KEY, 26); }
static boolean toBoolean(final JsonNode object) { if (object instanceof BooleanNode) { return object.booleanValue(); } throw invalidConversionException(object, SqlBaseType.BOOLEAN); }
@Test public void shouldConvertToBooleanCorrectly() { final Boolean b = JsonSerdeUtils.toBoolean(BooleanNode.TRUE); assertThat(b, equalTo(true)); }
public static String createApiVersionUrl(String baseUrl, ExtensionJson json) { return createApiVersionUrl(baseUrl, json.namespace, json.name, json.targetPlatform, json.version); }
@Test public void testCreateApiVersionUrl() throws Exception { var baseUrl = "http://localhost/"; assertThat(UrlUtil.createApiVersionUrl(baseUrl, "foo", "bar", "universal", "1.0.0")) .isEqualTo("http://localhost/api/foo/bar/universal/1.0.0"); }
@Override public Future<?> schedule(Executor executor, Runnable command, long delay, TimeUnit unit) { requireNonNull(executor); requireNonNull(command); requireNonNull(unit); if (scheduledExecutorService.isShutdown()) { return DisabledFuture.INSTANCE; } return scheduledExecutorService.schedule(() -> { try { executor.execute(command); } catch (Throwable t) { logger.log(Level.WARNING, "Exception thrown when submitting scheduled task", t); throw t; } }, delay, unit); }
@Test(dataProvider = "runnableSchedulers") public void scheduler(Scheduler scheduler) { var executed = new AtomicBoolean(); Runnable task = () -> executed.set(true); var future = scheduler.schedule(executor, task, 1L, TimeUnit.NANOSECONDS); assertThat(future).isNotNull(); await().untilTrue(executed); }
public static RawTransaction decode(final String hexTransaction) { final byte[] transaction = Numeric.hexStringToByteArray(hexTransaction); TransactionType transactionType = getTransactionType(transaction); switch (transactionType) { case EIP1559: return decodeEIP1559Transaction(transaction); case EIP4844: return decodeEIP4844Transaction(transaction); case EIP2930: return decodeEIP2930Transaction(transaction); default: return decodeLegacyTransaction(transaction); } }
@Test public void testDecoding2930() { final RawTransaction rawTransaction = createEip2930RawTransaction(); final Transaction2930 transaction2930 = (Transaction2930) rawTransaction.getTransaction(); final byte[] encodedMessage = TransactionEncoder.encode(rawTransaction); final String hexMessage = Numeric.toHexString(encodedMessage); final RawTransaction result = TransactionDecoder.decode(hexMessage); assertTrue(result.getTransaction() instanceof Transaction2930); final Transaction2930 resultTransaction2930 = (Transaction2930) result.getTransaction(); assertNotNull(result); assertEquals(transaction2930.getChainId(), resultTransaction2930.getChainId()); assertEquals(transaction2930.getNonce(), resultTransaction2930.getNonce()); assertEquals(transaction2930.getGasPrice(), resultTransaction2930.getGasPrice()); assertEquals(transaction2930.getGasLimit(), resultTransaction2930.getGasLimit()); assertEquals(transaction2930.getTo(), resultTransaction2930.getTo()); assertEquals(transaction2930.getValue(), resultTransaction2930.getValue()); assertEquals(transaction2930.getData(), resultTransaction2930.getData()); assertIterableEquals( transaction2930.getAccessList(), resultTransaction2930.getAccessList()); }
public void setCompressionType( String value ) { compressionType = StringUtil.isVariable( value ) ? value : parseFromToString( value, CompressionType.values(), CompressionType.NONE ).name(); }
@Test public void setCompressionType() { metaBase.setCompressionType( "snappy" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.SNAPPY.toString() ); metaBase.setCompressionType( "Snappy" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.SNAPPY.toString() ); metaBase.setCompressionType( "SNAPPY" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.SNAPPY.toString() ); metaBase.setCompressionType( "deflate" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.DEFLATE.toString() ); metaBase.setCompressionType( "Deflate" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.DEFLATE.toString() ); metaBase.setCompressionType( "DEFLATE" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.DEFLATE.toString() ); metaBase.setCompressionType( "DEFLATE124" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.NONE.toString() ); metaBase.setCompressionType( "None" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.NONE.toString() ); metaBase.setCompressionType( "NONE" ); Assert.assertEquals( metaBase.getCompressionType(), AvroOutputMetaBase.CompressionType.NONE.toString() ); }
public static int getRemoteExecutorQueueSize() { String queueSizeString = System.getProperty("remote.executor.queue.size"); if (NumberUtils.isDigits(queueSizeString)) { int size = Integer.parseInt(queueSizeString); return size > 0 ? size : REMOTE_EXECUTOR_QUEUE_SIZE; } else { return REMOTE_EXECUTOR_QUEUE_SIZE; } }
@Test void testGetRemoteExecutorQueueSize() { int defaultExpectVal = 1 << 14; int defaultVal = RemoteUtils.getRemoteExecutorQueueSize(); assertEquals(defaultExpectVal, defaultVal); System.setProperty("remote.executor.queue.size", "10"); int val1 = RemoteUtils.getRemoteExecutorQueueSize(); assertEquals(10, val1); System.setProperty("remote.executor.queue.size", "-1"); int val2 = RemoteUtils.getRemoteExecutorQueueSize(); assertEquals(defaultExpectVal, val2); }
public void maybeFlushBatches(LeaderAndEpoch leaderAndEpoch) { MetadataProvenance provenance = new MetadataProvenance(lastOffset, lastEpoch, lastContainedLogTimeMs); LogDeltaManifest manifest = LogDeltaManifest.newBuilder() .provenance(provenance) .leaderAndEpoch(leaderAndEpoch) .numBatches(numBatches) .elapsedNs(totalBatchElapsedNs) .numBytes(numBytes) .build(); switch (transactionState) { case STARTED_TRANSACTION: case CONTINUED_TRANSACTION: log.debug("handleCommit: not publishing since a transaction starting at {} is still in progress. " + "{} batch(es) processed so far.", image.offset(), numBatches); break; case ABORTED_TRANSACTION: log.debug("handleCommit: publishing empty delta between {} and {} from {} batch(es) " + "since a transaction was aborted", image.offset(), manifest.provenance().lastContainedOffset(), manifest.numBatches()); applyDeltaAndUpdate(new MetadataDelta.Builder().setImage(image).build(), manifest); break; case ENDED_TRANSACTION: case NO_TRANSACTION: if (log.isDebugEnabled()) { log.debug("handleCommit: Generated a metadata delta between {} and {} from {} batch(es) in {} us.", image.offset(), manifest.provenance().lastContainedOffset(), manifest.numBatches(), NANOSECONDS.toMicros(manifest.elapsedNs())); } applyDeltaAndUpdate(delta, manifest); break; } }
@Test public void testMultipleTransactionsInOneBatchesWithNoOp() { List<ApiMessageAndVersion> batchRecords = new ArrayList<>(); batchRecords.addAll(noOpRecords(1)); batchRecords.addAll(TOPIC_TXN_BATCH_1); batchRecords.addAll(noOpRecords(1)); batchRecords.addAll(TOPIC_TXN_BATCH_2); // A batch with non-transactional records between two transactions causes a delta to get published batchRecords.addAll(noOpRecords(1)); batchRecords.addAll(TXN_BEGIN_SINGLETON); batchRecords.addAll(noOpRecords(1)); batchRecords.addAll(TOPIC_NO_TXN_BATCH); batchRecords.addAll(noOpRecords(1)); batchRecords.addAll(TXN_END_SINGLETON); batchRecords.addAll(noOpRecords(1)); MockMetadataUpdater updater = new MockMetadataUpdater(); MockFaultHandler faultHandler = new MockFaultHandler("testMultipleTransactionsInOneBatches"); MetadataBatchLoader batchLoader = loadSingleBatch(updater, faultHandler, batchRecords); assertEquals(2, updater.updates); assertEquals(0, updater.latestManifest.numBytes()); assertEquals(18, updater.latestImage.provenance().lastContainedOffset()); assertEquals(42, updater.latestImage.provenance().lastContainedEpoch()); assertNotNull(updater.latestImage.topics().getTopic("foo")); assertNull(updater.latestImage.topics().getTopic("bar")); batchLoader.maybeFlushBatches(LEADER_AND_EPOCH); assertEquals(3, updater.updates); assertEquals(100, updater.latestManifest.numBytes()); assertEquals(26, updater.latestImage.provenance().lastContainedOffset()); assertEquals(42, updater.latestImage.provenance().lastContainedEpoch()); assertNotNull(updater.latestImage.topics().getTopic("foo")); assertNotNull(updater.latestImage.topics().getTopic("bar")); }
@Override public String getId() { return "entity-listener"; }
@Test public void testPrepareModifySetNull() { EventTestEntity entity = EventTestEntity.of("prepare-setNull", 20); reactiveRepository .insert(entity) .as(StepVerifier::create) .expectNext(1) .verifyComplete(); Assert.assertEquals(listener.created.getAndSet(0), 1); reactiveRepository .createUpdate() .set("name", "prepare-setNull-set") .setNull("age") .where("id", entity.getId()) .execute() .as(StepVerifier::create) .expectNextCount(1) .verifyComplete(); reactiveRepository .findById(entity.getId()) .mapNotNull(EventTestEntity::getAge) .as(StepVerifier::create) .expectComplete() .verify(); }
@Override public PageResult<TenantPackageDO> getTenantPackagePage(TenantPackagePageReqVO pageReqVO) { return tenantPackageMapper.selectPage(pageReqVO); }
@Test public void testGetTenantPackagePage() { // mock 数据 TenantPackageDO dbTenantPackage = randomPojo(TenantPackageDO.class, o -> { // 等会查询到 o.setName("芋道源码"); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setRemark("源码解析"); o.setCreateTime(buildTime(2022, 10, 10)); }); tenantPackageMapper.insert(dbTenantPackage); // 测试 name 不匹配 tenantPackageMapper.insert(cloneIgnoreId(dbTenantPackage, o -> o.setName("源码"))); // 测试 status 不匹配 tenantPackageMapper.insert(cloneIgnoreId(dbTenantPackage, o -> o.setStatus(CommonStatusEnum.DISABLE.getStatus()))); // 测试 remark 不匹配 tenantPackageMapper.insert(cloneIgnoreId(dbTenantPackage, o -> o.setRemark("解析"))); // 测试 createTime 不匹配 tenantPackageMapper.insert(cloneIgnoreId(dbTenantPackage, o -> o.setCreateTime(buildTime(2022, 11, 11)))); // 准备参数 TenantPackagePageReqVO reqVO = new TenantPackagePageReqVO(); reqVO.setName("芋道"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setRemark("源码"); reqVO.setCreateTime(buildBetweenTime(2022, 10, 9, 2022, 10, 11)); // 调用 PageResult<TenantPackageDO> pageResult = tenantPackageService.getTenantPackagePage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbTenantPackage, pageResult.getList().get(0)); }
@Override public ProtobufSystemInfo.Section toProtobuf() { ProtobufSystemInfo.Section.Builder protobuf = ProtobufSystemInfo.Section.newBuilder(); protobuf.setName("Search State"); ClusterStatsResponse stats = esClient.clusterStats(); setAttribute(protobuf, "State", stats.getHealthStatus().name()); setAttribute(protobuf, "Nodes", stats.getNodeCount()); return protobuf.build(); }
@Test public void test_name() { assertThat(underTest.toProtobuf().getName()).isEqualTo("Search State"); }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { Target target = getTarget(request); if (target == Target.Other) { chain.doFilter(request, response); return; } HttpServletRequest httpRequest = (HttpServletRequest) request; if (isRateLimited(httpRequest, target)) { incrementStats(target); if (serverConfig.isRateLimiterEnabled()) { ((HttpServletResponse) response).setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); return; } } chain.doFilter(request, response); }
@Test public void testCustomClientThrottlingCandidatesCounter() throws Exception { ConfigurationManager.getConfigInstance().setProperty("eureka.rateLimiter.enabled", false); // Custom clients will go up to the window limit whenRequest(FULL_FETCH, CUSTOM_CLIENT); filter.doFilter(request, response, filterChain); filter.doFilter(request, response, filterChain); verify(filterChain, times(2)).doFilter(request, response); // Now we hit the limit long rateLimiterCounter = EurekaMonitors.RATE_LIMITED_CANDIDATES.getCount(); filter.doFilter(request, response, filterChain); assertEquals("Expected rate limiter counter increase", rateLimiterCounter + 1, EurekaMonitors.RATE_LIMITED_CANDIDATES.getCount()); // We just test the counter verify(response, times(0)).setStatus(HttpServletResponse.SC_SERVICE_UNAVAILABLE); }
public static List<String> sortSimpleName(List<String> list) { if (list != null && list.size() > 0) { Collections.sort(list, SIMPLE_NAME_COMPARATOR); } return list; }
@Test void testSortSimpleName() { List<String> list = new ArrayList<String>(); list.add("aaa.z"); list.add("b"); list.add(null); list.add("zzz.a"); list.add("c"); list.add(null); List<String> sorted = CollectionUtils.sortSimpleName(list); assertNull(sorted.get(0)); assertNull(sorted.get(1)); }
@Override public HttpResponse get() throws InterruptedException, ExecutionException { try { final Object result = process(0, null); if (result instanceof Throwable) { throw new ExecutionException((Throwable) result); } return (HttpResponse) result; } finally { isDone = true; } }
@Test(expected = ExecutionException.class) public void errGetTimeoutExecution() throws ExecutionException, InterruptedException, TimeoutException { get(new ExecutionException(new Exception("wrong")), true); }
public <T> void postJson(String url, Header header, Query query, String body, Type responseType, Callback<T> callback) { execute(url, HttpMethod.POST, new RequestHttpEntity(header.setContentType(MediaType.APPLICATION_JSON), query, body), responseType, callback); }
@Test void testPostJson() throws Exception { Header header = Header.newInstance().setContentType(MediaType.APPLICATION_XML); restTemplate.postJson(TEST_URL, header, "body", String.class, mockCallback); verify(requestClient).execute(any(), eq("POST"), any(), any(), eq(mockCallback)); assertEquals(MediaType.APPLICATION_JSON, header.getValue(HttpHeaderConsts.CONTENT_TYPE)); }
public static byte[] parseMAC(String value) { final byte[] machineId; final char separator; switch (value.length()) { case 17: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI48_MAC_ADDRESS_LENGTH]; break; case 23: separator = value.charAt(2); validateMacSeparator(separator); machineId = new byte[EUI64_MAC_ADDRESS_LENGTH]; break; default: throw new IllegalArgumentException("value is not supported [MAC-48, EUI-48, EUI-64]"); } final int end = machineId.length - 1; int j = 0; for (int i = 0; i < end; ++i, j += 3) { final int sIndex = j + 2; machineId[i] = StringUtil.decodeHexByte(value, j); if (value.charAt(sIndex) != separator) { throw new IllegalArgumentException("expected separator '" + separator + " but got '" + value.charAt(sIndex) + "' at index: " + sIndex); } } machineId[end] = StringUtil.decodeHexByte(value, j); return machineId; }
@Test public void testParseMacInvalidEUI64MixedSeparatorA() { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { parseMAC("00-AA-11-FF-FE-BB-22:CC"); } }); }
@Override public void execute(final ConnectionSession connectionSession) { String databaseName = sqlStatement.getFromDatabase().map(schema -> schema.getDatabase().getIdentifier().getValue()).orElseGet(connectionSession::getUsedDatabaseName); queryResultMetaData = createQueryResultMetaData(databaseName); mergedResult = new TransparentMergedResult(getQueryResult(databaseName)); }
@Test void assertShowTablesExecutorWithLikeFilter() throws SQLException { MySQLShowTablesStatement showTablesStatement = new MySQLShowTablesStatement(); ShowFilterSegment showFilterSegment = mock(ShowFilterSegment.class); when(showFilterSegment.getLike()).thenReturn(Optional.of(new ShowLikeSegment(0, 10, "t_account%"))); showTablesStatement.setFilter(showFilterSegment); ShowTablesExecutor executor = new ShowTablesExecutor(showTablesStatement, TypedSPILoader.getService(DatabaseType.class, "MySQL")); Map<String, ShardingSphereDatabase> databases = getDatabases(); ContextManager contextManager = mockContextManager(databases); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); executor.execute(mockConnectionSession()); assertThat(executor.getQueryResultMetaData().getColumnCount(), is(1)); executor.getMergedResult().next(); assertThat(executor.getMergedResult().getValue(1, Object.class), is("t_account")); executor.getMergedResult().next(); assertThat(executor.getMergedResult().getValue(1, Object.class), is("t_account_bak")); executor.getMergedResult().next(); assertThat(executor.getMergedResult().getValue(1, Object.class), is("t_account_detail")); assertFalse(executor.getMergedResult().next()); }
@Override public Result responseMessageForCheckConnectionToSCM(String responseBody) { return jsonResultMessageHandler.toResult(responseBody); }
@Test public void shouldBuildFailureResultFromCheckSCMConnectionResponse() throws Exception { String responseBody = "{\"status\":\"failure\",messages=[\"message-one\",\"message-two\"]}"; Result result = messageHandler.responseMessageForCheckConnectionToSCM(responseBody); assertFailureResult(result, List.of("message-one", "message-two")); }
public static UAssign create(UExpression variable, UExpression expression) { return new AutoValue_UAssign(variable, expression); }
@Test public void equality() { new EqualsTester() .addEqualityGroup(UAssign.create(UFreeIdent.create("foo"), ULiteral.intLit(5))) .addEqualityGroup(UAssign.create(UFreeIdent.create("bar"), ULiteral.intLit(5))) .addEqualityGroup(UAssign.create(UFreeIdent.create("foo"), ULiteral.intLit(20))) .testEquals(); }
public static Builder custom() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void zeroSlowCallDurationThresholdShouldFail() { custom().slowCallDurationThreshold(Duration.ofMillis(0)).build(); }
public static boolean equals(String str1, String str2) { if (null == str1) { return false; } return str1.equals(str2); }
@Test public void testEquals(){ Assert.assertTrue(StringKit.equals("a", "a")); Assert.assertFalse(StringKit.equals("a", "b")); }
public static List<DescribeGroupsResponseData.DescribedGroup> getErrorDescribedGroupList( List<String> groupIds, Errors error ) { return groupIds.stream() .map(groupId -> new DescribeGroupsResponseData.DescribedGroup() .setGroupId(groupId) .setErrorCode(error.code()) ) .collect(Collectors.toList()); }
@Test public void testGetErrorDescribedGroupList() { List<DescribeGroupsResponseData.DescribedGroup> expectedDescribedGroupList = Arrays.asList( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id-1") .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()), new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id-2") .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()), new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-id-3") .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) ); List<DescribeGroupsResponseData.DescribedGroup> describedGroupList = getErrorDescribedGroupList( Arrays.asList("group-id-1", "group-id-2", "group-id-3"), Errors.COORDINATOR_LOAD_IN_PROGRESS ); assertEquals(expectedDescribedGroupList, describedGroupList); }
@Override public void addUnique(String text) { if (this.seen.add(text)) { this.messages.add(new Message(text, system2.now())); } }
@Test(expected = IllegalArgumentException.class) public void addUnique_fails_with_IAE_when_message_is_empty() { underTest.addUnique(""); }
@Override public void updateSocialClient(SocialClientSaveReqVO updateReqVO) { // 校验存在 validateSocialClientExists(updateReqVO.getId()); // 校验重复 validateSocialClientUnique(updateReqVO.getId(), updateReqVO.getUserType(), updateReqVO.getSocialType()); // 更新 SocialClientDO updateObj = BeanUtils.toBean(updateReqVO, SocialClientDO.class); socialClientMapper.updateById(updateObj); }
@Test public void testUpdateSocialClient_success() { // mock 数据 SocialClientDO dbSocialClient = randomPojo(SocialClientDO.class); socialClientMapper.insert(dbSocialClient);// @Sql: 先插入出一条存在的数据 // 准备参数 SocialClientSaveReqVO reqVO = randomPojo(SocialClientSaveReqVO.class, o -> { o.setId(dbSocialClient.getId()); // 设置更新的 ID o.setSocialType(randomEle(SocialTypeEnum.values()).getType()) .setUserType(randomEle(UserTypeEnum.values()).getValue()) .setStatus(randomCommonStatus()); }); // 调用 socialClientService.updateSocialClient(reqVO); // 校验是否更新正确 SocialClientDO socialClient = socialClientMapper.selectById(reqVO.getId()); // 获取最新的 assertPojoEquals(reqVO, socialClient); }
public static DataflowRunner fromOptions(PipelineOptions options) { DataflowPipelineOptions dataflowOptions = PipelineOptionsValidator.validate(DataflowPipelineOptions.class, options); ArrayList<String> missing = new ArrayList<>(); if (dataflowOptions.getAppName() == null) { missing.add("appName"); } if (Strings.isNullOrEmpty(dataflowOptions.getRegion()) && isServiceEndpoint(dataflowOptions.getDataflowEndpoint())) { missing.add("region"); } if (missing.size() > 0) { throw new IllegalArgumentException( "Missing required pipeline options: " + Joiner.on(',').join(missing)); } validateWorkerSettings( PipelineOptionsValidator.validate(DataflowPipelineWorkerPoolOptions.class, options)); PathValidator validator = dataflowOptions.getPathValidator(); String gcpTempLocation; try { gcpTempLocation = dataflowOptions.getGcpTempLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires gcpTempLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(gcpTempLocation); String stagingLocation; try { stagingLocation = dataflowOptions.getStagingLocation(); } catch (Exception e) { throw new IllegalArgumentException( "DataflowRunner requires stagingLocation, " + "but failed to retrieve a value from PipelineOptions", e); } validator.validateOutputFilePrefixSupported(stagingLocation); if (!isNullOrEmpty(dataflowOptions.getSaveProfilesToGcs())) { validator.validateOutputFilePrefixSupported(dataflowOptions.getSaveProfilesToGcs()); } if (dataflowOptions.getFilesToStage() != null) { // The user specifically requested these files, so fail now if they do not exist. // (automatically detected classpath elements are permitted to not exist, so later // staging will not fail on nonexistent files) dataflowOptions.getFilesToStage().stream() .forEach( stagedFileSpec -> { File localFile; if (stagedFileSpec.contains("=")) { String[] components = stagedFileSpec.split("=", 2); localFile = new File(components[1]); } else { localFile = new File(stagedFileSpec); } if (!localFile.exists()) { // should be FileNotFoundException, but for build-time backwards compatibility // cannot add checked exception throw new RuntimeException( String.format("Non-existent files specified in filesToStage: %s", localFile)); } }); } else { dataflowOptions.setFilesToStage( detectClassPathResourcesToStage(DataflowRunner.class.getClassLoader(), options)); if (dataflowOptions.getFilesToStage().isEmpty()) { throw new IllegalArgumentException("No files to stage has been found."); } else { LOG.info( "PipelineOptions.filesToStage was not specified. " + "Defaulting to files from the classpath: will stage {} files. " + "Enable logging at DEBUG level to see which files will be staged.", dataflowOptions.getFilesToStage().size()); LOG.debug("Classpath elements: {}", dataflowOptions.getFilesToStage()); } } // Verify jobName according to service requirements, truncating converting to lowercase if // necessary. String jobName = dataflowOptions.getJobName().toLowerCase(); checkArgument( jobName.matches("[a-z]([-a-z0-9]*[a-z0-9])?"), "JobName invalid; the name must consist of only the characters " + "[-a-z0-9], starting with a letter and ending with a letter " + "or number"); if (!jobName.equals(dataflowOptions.getJobName())) { LOG.info( "PipelineOptions.jobName did not match the service requirements. " + "Using {} instead of {}.", jobName, dataflowOptions.getJobName()); } dataflowOptions.setJobName(jobName); // Verify project String project = dataflowOptions.getProject(); if (project.matches("[0-9]*")) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project number."); } else if (!project.matches(PROJECT_ID_REGEXP)) { throw new IllegalArgumentException( "Project ID '" + project + "' invalid. Please make sure you specified the Project ID, not project" + " description."); } DataflowPipelineDebugOptions debugOptions = dataflowOptions.as(DataflowPipelineDebugOptions.class); // Verify the number of worker threads is a valid value if (debugOptions.getNumberOfWorkerHarnessThreads() < 0) { throw new IllegalArgumentException( "Number of worker harness threads '" + debugOptions.getNumberOfWorkerHarnessThreads() + "' invalid. Please make sure the value is non-negative."); } // Verify that if recordJfrOnGcThrashing is set, the pipeline is at least on java 11 if (dataflowOptions.getRecordJfrOnGcThrashing() && Environments.getJavaVersion() == Environments.JavaVersion.java8) { throw new IllegalArgumentException( "recordJfrOnGcThrashing is only supported on java 9 and up."); } if (dataflowOptions.isStreaming() && dataflowOptions.getGcsUploadBufferSizeBytes() == null) { dataflowOptions.setGcsUploadBufferSizeBytes(GCS_UPLOAD_BUFFER_SIZE_BYTES_DEFAULT); } // Adding the Java version to the SDK name for user's and support convenience. String agentJavaVer = "(JRE 8 environment)"; if (Environments.getJavaVersion() != Environments.JavaVersion.java8) { agentJavaVer = String.format("(JRE %s environment)", Environments.getJavaVersion().specification()); } DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String userAgentName = dataflowRunnerInfo.getName(); Preconditions.checkArgument( !userAgentName.equals(""), "Dataflow runner's `name` property cannot be empty."); String userAgentVersion = dataflowRunnerInfo.getVersion(); Preconditions.checkArgument( !userAgentVersion.equals(""), "Dataflow runner's `version` property cannot be empty."); String userAgent = String.format("%s/%s%s", userAgentName, userAgentVersion, agentJavaVer).replace(" ", "_"); dataflowOptions.setUserAgent(userAgent); return new DataflowRunner(dataflowOptions); }
@Test public void runWithDefaultFilesToStage() throws Exception { DataflowPipelineOptions options = buildPipelineOptions(); options.setFilesToStage(null); DataflowRunner.fromOptions(options); assertFalse(options.getFilesToStage().isEmpty()); }
@Override public boolean equals(Object other) { if (other instanceof NamespaceBundle) { NamespaceBundle obj = (NamespaceBundle) other; return Objects.equals(this.nsname, obj.nsname) && (Objects.equals(this.keyRange.lowerEndpoint(), obj.keyRange.lowerEndpoint()) && Objects.equals(this.keyRange.lowerBoundType(), obj.keyRange.lowerBoundType()) && Objects.equals(this.keyRange.upperEndpoint(), obj.keyRange.upperEndpoint()) && Objects.equals(this.keyRange.upperBoundType(), obj.keyRange.upperBoundType())); } return false; }
@Test public void testEquals() throws Exception { NamespaceBundle bundle = factory.getBundle(NamespaceName.get("pulsar/use/ns1"), Range.range(0l, BoundType.CLOSED, 0x40000000L, BoundType.OPEN)); NamespaceBundle bundle2 = factory.getBundle(NamespaceName.get("pulsar/use/ns1"), Range.range(0x20000000l, BoundType.CLOSED, 0x40000000L, BoundType.OPEN)); assertNotEquals(bundle2, bundle); NamespaceBundle bundle0 = factory.getBundle(NamespaceName.get("pulsar/use/ns1"), Range.range(0l, BoundType.CLOSED, 0x40000000L, BoundType.OPEN)); assertEquals(bundle, bundle0); NamespaceBundle otherBundle = factory.getBundle(NamespaceName.get("pulsar/use/ns2"), Range.range(0l, BoundType.CLOSED, 0x40000000L, BoundType.OPEN)); assertNotEquals(bundle, otherBundle); }
@Override public Column convert(BasicTypeDefine typeDefine) { PhysicalColumn.PhysicalColumnBuilder builder = PhysicalColumn.builder() .name(typeDefine.getName()) .sourceType(typeDefine.getColumnType()) .nullable(typeDefine.isNullable()) .defaultValue(typeDefine.getDefaultValue()) .comment(typeDefine.getComment()); String mysqlDataType = typeDefine.getDataType().toUpperCase(); if (mysqlDataType.endsWith("ZEROFILL")) { mysqlDataType = mysqlDataType.substring(0, mysqlDataType.length() - "ZEROFILL".length()).trim(); } if (typeDefine.isUnsigned() && !(mysqlDataType.endsWith(" UNSIGNED"))) { mysqlDataType = mysqlDataType + " UNSIGNED"; } switch (mysqlDataType) { case MYSQL_NULL: builder.dataType(BasicType.VOID_TYPE); break; case MYSQL_BIT: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.dataType(BasicType.BOOLEAN_TYPE); } else if (typeDefine.getLength() == 1) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(PrimitiveByteArrayType.INSTANCE); // BIT(M) -> BYTE(M/8) long byteLength = typeDefine.getLength() / 8; byteLength += typeDefine.getLength() % 8 > 0 ? 1 : 0; builder.columnLength(byteLength); } break; case MYSQL_TINYINT: if (typeDefine.getColumnType().equalsIgnoreCase("tinyint(1)")) { builder.dataType(BasicType.BOOLEAN_TYPE); } else { builder.dataType(BasicType.BYTE_TYPE); } break; case MYSQL_TINYINT_UNSIGNED: case MYSQL_SMALLINT: builder.dataType(BasicType.SHORT_TYPE); break; case MYSQL_SMALLINT_UNSIGNED: case MYSQL_MEDIUMINT: case MYSQL_MEDIUMINT_UNSIGNED: case MYSQL_INT: case MYSQL_INTEGER: case MYSQL_YEAR: builder.dataType(BasicType.INT_TYPE); break; case MYSQL_INT_UNSIGNED: case MYSQL_INTEGER_UNSIGNED: case MYSQL_BIGINT: builder.dataType(BasicType.LONG_TYPE); break; case MYSQL_BIGINT_UNSIGNED: DecimalType intDecimalType = new DecimalType(20, 0); builder.dataType(intDecimalType); builder.columnLength(Long.valueOf(intDecimalType.getPrecision())); builder.scale(intDecimalType.getScale()); break; case MYSQL_FLOAT: builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_FLOAT_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_FLOAT_UNSIGNED); builder.dataType(BasicType.FLOAT_TYPE); break; case MYSQL_DOUBLE: builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DOUBLE_UNSIGNED: log.warn("{} will probably cause value overflow.", MYSQL_DOUBLE_UNSIGNED); builder.dataType(BasicType.DOUBLE_TYPE); break; case MYSQL_DECIMAL: Preconditions.checkArgument(typeDefine.getPrecision() > 0); DecimalType decimalType; if (typeDefine.getPrecision() > DEFAULT_PRECISION) { log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL); decimalType = new DecimalType(DEFAULT_PRECISION, DEFAULT_SCALE); } else { decimalType = new DecimalType( typeDefine.getPrecision().intValue(), typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); } builder.dataType(decimalType); builder.columnLength(Long.valueOf(decimalType.getPrecision())); builder.scale(decimalType.getScale()); break; case MYSQL_DECIMAL_UNSIGNED: Preconditions.checkArgument(typeDefine.getPrecision() > 0); log.warn("{} will probably cause value overflow.", MYSQL_DECIMAL_UNSIGNED); DecimalType decimalUnsignedType = new DecimalType( typeDefine.getPrecision().intValue() + 1, typeDefine.getScale() == null ? 0 : typeDefine.getScale().intValue()); builder.dataType(decimalUnsignedType); builder.columnLength(Long.valueOf(decimalUnsignedType.getPrecision())); builder.scale(decimalUnsignedType.getScale()); break; case MYSQL_ENUM: builder.dataType(BasicType.STRING_TYPE); if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(100L); } else { builder.columnLength(typeDefine.getLength()); } break; case MYSQL_CHAR: case MYSQL_VARCHAR: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(TypeDefineUtils.charTo4ByteLength(1L)); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_TINYTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_TEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGTEXT: builder.dataType(BasicType.STRING_TYPE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_JSON: builder.dataType(BasicType.STRING_TYPE); break; case MYSQL_BINARY: case MYSQL_VARBINARY: if (typeDefine.getLength() == null || typeDefine.getLength() <= 0) { builder.columnLength(1L); } else { builder.columnLength(typeDefine.getLength()); } builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_TINYBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_8 - 1); break; case MYSQL_BLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_16 - 1); break; case MYSQL_MEDIUMBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_24 - 1); break; case MYSQL_LONGBLOB: builder.dataType(PrimitiveByteArrayType.INSTANCE); builder.columnLength(POWER_2_32 - 1); break; case MYSQL_GEOMETRY: builder.dataType(PrimitiveByteArrayType.INSTANCE); break; case MYSQL_DATE: builder.dataType(LocalTimeType.LOCAL_DATE_TYPE); break; case MYSQL_TIME: builder.dataType(LocalTimeType.LOCAL_TIME_TYPE); builder.scale(typeDefine.getScale()); break; case MYSQL_DATETIME: case MYSQL_TIMESTAMP: builder.dataType(LocalTimeType.LOCAL_DATE_TIME_TYPE); builder.scale(typeDefine.getScale()); break; default: throw CommonError.convertToSeaTunnelTypeError( DatabaseIdentifier.MYSQL, mysqlDataType, typeDefine.getName()); } return builder.build(); }
@Test public void testConvertTime() { BasicTypeDefine<Object> typeDefine = BasicTypeDefine.builder() .name("test") .columnType("time") .dataType("time") .scale(3) .build(); Column column = MySqlTypeConverter.DEFAULT_INSTANCE.convert(typeDefine); Assertions.assertEquals(typeDefine.getName(), column.getName()); Assertions.assertEquals(LocalTimeType.LOCAL_TIME_TYPE, column.getDataType()); Assertions.assertEquals(typeDefine.getScale(), column.getScale()); Assertions.assertEquals(typeDefine.getColumnType(), column.getSourceType()); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testIntegerEq() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", INT_MIN_VALUE - 25)).eval(FILE); assertThat(shouldRead).as("Should not match: all values != 5").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", INT_MIN_VALUE)).eval(FILE); assertThat(shouldRead).as("Should not match: some values != 30").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE - 4)).eval(FILE); assertThat(shouldRead).as("Should not match: some values != 75").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE)).eval(FILE); assertThat(shouldRead).as("Should not match: some values != 79").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE + 1)).eval(FILE); assertThat(shouldRead).as("Should not match: some values != 80").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, equal("always_5", INT_MIN_VALUE - 25)).eval(FILE); assertThat(shouldRead).as("Should match: all values == 5").isTrue(); }
public void decode(ByteBuf buffer) { boolean last; int statusCode; while (true) { switch(state) { case READ_COMMON_HEADER: if (buffer.readableBytes() < SPDY_HEADER_SIZE) { return; } int frameOffset = buffer.readerIndex(); int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; buffer.skipBytes(SPDY_HEADER_SIZE); boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; int version; int type; if (control) { // Decode control frame common header version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); streamId = 0; // Default to session Stream-ID } else { // Decode data frame common header version = spdyVersion; // Default to expected version type = SPDY_DATA_FRAME; streamId = getUnsignedInt(buffer, frameOffset); } flags = buffer.getByte(flagsOffset); length = getUnsignedMedium(buffer, lengthOffset); // Check version first then validity if (version != spdyVersion) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SPDY Version"); } else if (!isValidFrameHeader(streamId, type, flags, length)) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid Frame Error"); } else { state = getNextState(type, length); } break; case READ_DATA_FRAME: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); break; } // Generate data frames that do not exceed maxChunkSize int dataLength = Math.min(maxChunkSize, length); // Wait until entire frame is readable if (buffer.readableBytes() < dataLength) { return; } ByteBuf data = buffer.alloc().buffer(dataLength); data.writeBytes(buffer, dataLength); length -= dataLength; if (length == 0) { state = State.READ_COMMON_HEADER; } last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); delegate.readDataFrame(streamId, last, data); break; case READ_SYN_STREAM_FRAME: if (buffer.readableBytes() < 10) { return; } int offset = buffer.readerIndex(); streamId = getUnsignedInt(buffer, offset); int associatedToStreamId = getUnsignedInt(buffer, offset + 4); byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); last = hasFlag(flags, SPDY_FLAG_FIN); boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); buffer.skipBytes(10); length -= 10; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_STREAM Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); } break; case READ_SYN_REPLY_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SYN_REPLY Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readSynReplyFrame(streamId, last); } break; case READ_RST_STREAM_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (streamId == 0 || statusCode == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid RST_STREAM Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readRstStreamFrame(streamId, statusCode); } break; case READ_SETTINGS_FRAME: if (buffer.readableBytes() < 4) { return; } boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); numSettings = getUnsignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); length -= 4; // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. if ((length & 0x07) != 0 || length >> 3 != numSettings) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid SETTINGS Frame"); } else { state = State.READ_SETTING; delegate.readSettingsFrame(clear); } break; case READ_SETTING: if (numSettings == 0) { state = State.READ_COMMON_HEADER; delegate.readSettingsEnd(); break; } if (buffer.readableBytes() < 8) { return; } byte settingsFlags = buffer.getByte(buffer.readerIndex()); int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); int value = getSignedInt(buffer, buffer.readerIndex() + 4); boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); buffer.skipBytes(8); --numSettings; delegate.readSetting(id, value, persistValue, persisted); break; case READ_PING_FRAME: if (buffer.readableBytes() < 4) { return; } int pingId = getSignedInt(buffer, buffer.readerIndex()); buffer.skipBytes(4); state = State.READ_COMMON_HEADER; delegate.readPingFrame(pingId); break; case READ_GOAWAY_FRAME: if (buffer.readableBytes() < 8) { return; } int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); state = State.READ_COMMON_HEADER; delegate.readGoAwayFrame(lastGoodStreamId, statusCode); break; case READ_HEADERS_FRAME: if (buffer.readableBytes() < 4) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); last = hasFlag(flags, SPDY_FLAG_FIN); buffer.skipBytes(4); length -= 4; if (streamId == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid HEADERS Frame"); } else { state = State.READ_HEADER_BLOCK; delegate.readHeadersFrame(streamId, last); } break; case READ_WINDOW_UPDATE_FRAME: if (buffer.readableBytes() < 8) { return; } streamId = getUnsignedInt(buffer, buffer.readerIndex()); int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); buffer.skipBytes(8); if (deltaWindowSize == 0) { state = State.FRAME_ERROR; delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); } else { state = State.READ_COMMON_HEADER; delegate.readWindowUpdateFrame(streamId, deltaWindowSize); } break; case READ_HEADER_BLOCK: if (length == 0) { state = State.READ_COMMON_HEADER; delegate.readHeaderBlockEnd(); break; } if (!buffer.isReadable()) { return; } int compressedBytes = Math.min(buffer.readableBytes(), length); ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); headerBlock.writeBytes(buffer, compressedBytes); length -= compressedBytes; delegate.readHeaderBlock(headerBlock); break; case DISCARD_FRAME: int numBytes = Math.min(buffer.readableBytes(), length); buffer.skipBytes(numBytes); length -= numBytes; if (length == 0) { state = State.READ_COMMON_HEADER; break; } return; case FRAME_ERROR: buffer.skipBytes(buffer.readableBytes()); return; default: throw new Error("Shouldn't reach here."); } } }
@Test public void testUnknownSpdyGoAwayFrameFlags() throws Exception { short type = 7; byte flags = (byte) 0xFF; // undefined flags int length = 8; int lastGoodStreamId = RANDOM.nextInt() & 0x7FFFFFFF; int statusCode = RANDOM.nextInt() | 0x01; ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); encodeControlFrameHeader(buf, type, flags, length); buf.writeInt(lastGoodStreamId); buf.writeInt(statusCode); decoder.decode(buf); verify(delegate).readGoAwayFrame(lastGoodStreamId, statusCode); assertFalse(buf.isReadable()); buf.release(); }
@Override public Metrics toHour() { MinLabeledFunction metrics = (MinLabeledFunction) createNew(); metrics.setEntityId(getEntityId()); metrics.setTimeBucket(toTimeBucketInHour()); metrics.setServiceId(getServiceId()); metrics.getValue().copyFrom(getValue()); return metrics; }
@Test public void testToHour() { function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_1); function.accept(MeterEntity.newService("service-test", Layer.GENERAL), HTTP_CODE_COUNT_2); function.calculate(); final MinLabeledFunction hourFunction = (MinLabeledFunction) function.toHour(); hourFunction.calculate(); assertThat(hourFunction.getValue()).isEqualTo(HTTP_CODE_COUNT_3); }
public boolean isReturningGeneratedKeys() { if ( getDatabaseMeta() != null ) { return getDatabaseMeta().supportsAutoGeneratedKeys() && returningGeneratedKeys; } return false; }
@Test public void testIsReturningGeneratedKeys() throws Exception { TableOutputMeta tableOutputMeta = new TableOutputMeta(), tableOutputMetaSpy = spy( tableOutputMeta ); DatabaseMeta databaseMeta = mock( DatabaseMeta.class ); doReturn( true ).when( databaseMeta ).supportsAutoGeneratedKeys(); doReturn( databaseMeta ).when( tableOutputMetaSpy ).getDatabaseMeta(); tableOutputMetaSpy.setReturningGeneratedKeys( true ); assertTrue( tableOutputMetaSpy.isReturningGeneratedKeys() ); doReturn( false ).when( databaseMeta ).supportsAutoGeneratedKeys(); assertFalse( tableOutputMetaSpy.isReturningGeneratedKeys() ); tableOutputMetaSpy.setReturningGeneratedKeys( true ); assertFalse( tableOutputMetaSpy.isReturningGeneratedKeys() ); tableOutputMetaSpy.setReturningGeneratedKeys( false ); assertFalse( tableOutputMetaSpy.isReturningGeneratedKeys() ); }
public void close() { synchronized (LOCK) { for (KafkaMbean mbean : this.mbeans.values()) unregister(mbean); } }
@Test public void testJmxPrefix() throws Exception { JmxReporter reporter = new JmxReporter(); MetricsContext metricsContext = new KafkaMetricsContext("kafka.server"); MetricConfig metricConfig = new MetricConfig(); Metrics metrics = new Metrics(metricConfig, new ArrayList<>(Collections.singletonList(reporter)), Time.SYSTEM, metricsContext); MBeanServer server = ManagementFactory.getPlatformMBeanServer(); try { Sensor sensor = metrics.sensor("kafka.requests"); sensor.add(metrics.metricName("pack.bean1.avg", "grp1"), new Avg()); assertEquals("kafka.server", server.getObjectInstance(new ObjectName("kafka.server:type=grp1")).getObjectName().getDomain()); } finally { metrics.close(); } }
@Override public boolean matches(Job localJob, Job storageProviderJob) { return AllowedConcurrentStateChange.super.matches(localJob, storageProviderJob) && localJob.getVersion() == storageProviderJob.getVersion() - 1 && localJob.getLastJobStateOfType(FailedState.class).isPresent(); }
@Test void ifJobHasEnqueuedStateAndWasScheduledNormallyItWillNotMatch() { final Job jobInProgress = aJobInProgress().build(); final Job succeededJob = aCopyOf(jobInProgress).withState(new SucceededState(ofMillis(10), ofMillis(6))).build(); boolean matchesAllowedStateChange = allowedStateChange.matches(jobInProgress, succeededJob); assertThat(matchesAllowedStateChange).isFalse(); }
public static long heapMemoryMax() { return heapMemoryUsage.getMax(); }
@Test public void heapMemoryMax() { long memoryUsed = MemoryUtil.heapMemoryMax(); Assert.assertNotEquals(0, memoryUsed); }
@Override public SendResult send( Message msg) throws MQClientException, RemotingException, MQBrokerException, InterruptedException { msg.setTopic(withNamespace(msg.getTopic())); if (this.getAutoBatch() && !(msg instanceof MessageBatch)) { return sendByAccumulator(msg, null, null); } else { return sendDirect(msg, null, null); } }
@Test public void testSendMessageSync_WithBodyCompressed() throws RemotingException, InterruptedException, MQBrokerException, MQClientException { when(mQClientAPIImpl.getTopicRouteInfoFromNameServer(anyString(), anyLong())).thenReturn(createTopicRoute()); SendResult sendResult = producer.send(bigMessage); assertThat(sendResult.getSendStatus()).isEqualTo(SendStatus.SEND_OK); assertThat(sendResult.getOffsetMsgId()).isEqualTo("123"); assertThat(sendResult.getQueueOffset()).isEqualTo(456L); }
@CanIgnoreReturnValue public final Ordered containsExactlyEntriesIn(Multimap<?, ?> expectedMultimap) { checkNotNull(expectedMultimap, "expectedMultimap"); checkNotNull(actual); ListMultimap<?, ?> missing = difference(expectedMultimap, actual); ListMultimap<?, ?> extra = difference(actual, expectedMultimap); // TODO(kak): Possible enhancement: Include "[1 copy]" if the element does appear in // the subject but not enough times. Similarly for unexpected extra items. if (!missing.isEmpty()) { if (!extra.isEmpty()) { boolean addTypeInfo = hasMatchingToStringPair(missing.entries(), extra.entries()); // Note: The usage of countDuplicatesAndAddTypeInfo() below causes entries no longer to be // grouped by key in the 'missing' and 'unexpected items' parts of the message (we still // show the actual and expected multimaps in the standard format). String missingDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(missing).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(missing)); String extraDisplay = addTypeInfo ? countDuplicatesAndAddTypeInfo(annotateEmptyStringsMultimap(extra).entries()) : countDuplicatesMultimap(annotateEmptyStringsMultimap(extra)); failWithActual( fact("missing", missingDisplay), fact("unexpected", extraDisplay), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } else { failWithActual( fact("missing", countDuplicatesMultimap(annotateEmptyStringsMultimap(missing))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } } else if (!extra.isEmpty()) { failWithActual( fact("unexpected", countDuplicatesMultimap(annotateEmptyStringsMultimap(extra))), simpleFact("---"), fact("expected", annotateEmptyStringsMultimap(expectedMultimap))); return ALREADY_FAILED; } return new MultimapInOrder(/* allowUnexpected = */ false, expectedMultimap); }
@Test public void containsExactlyInOrderDifferentTypes() { ImmutableListMultimap<Integer, String> listMultimap = ImmutableListMultimap.of(3, "one", 3, "six", 3, "two", 4, "five", 4, "four"); ImmutableSetMultimap<Integer, String> setMultimap = ImmutableSetMultimap.copyOf(listMultimap); assertThat(listMultimap).containsExactlyEntriesIn(setMultimap).inOrder(); }
public void init(ApplicationConfiguration applicationConfiguration) throws ModuleNotFoundException, ProviderNotFoundException, ServiceNotProvidedException, CycleDependencyException, ModuleConfigException, ModuleStartException { String[] moduleNames = applicationConfiguration.moduleList(); ServiceLoader<ModuleDefine> moduleServiceLoader = ServiceLoader.load(ModuleDefine.class); ServiceLoader<ModuleProvider> moduleProviderLoader = ServiceLoader.load(ModuleProvider.class); HashSet<String> moduleSet = new HashSet<>(Arrays.asList(moduleNames)); for (ModuleDefine module : moduleServiceLoader) { if (moduleSet.contains(module.name())) { module.prepare( this, applicationConfiguration.getModuleConfiguration(module.name()), moduleProviderLoader, bootingParameters ); loadedModules.put(module.name(), module); moduleSet.remove(module.name()); } } // Finish prepare stage isInPrepareStage = false; if (moduleSet.size() > 0) { throw new ModuleNotFoundException(moduleSet.toString() + " missing."); } BootstrapFlow bootstrapFlow = new BootstrapFlow(loadedModules); bootstrapFlow.start(this); bootstrapFlow.notifyAfterCompleted(); }
@Test public void testInit() throws ServiceNotProvidedException, ModuleNotFoundException, ProviderNotFoundException, DuplicateProviderException, ModuleConfigException, ModuleStartException { ApplicationConfiguration configuration = new ApplicationConfiguration(); configuration.addModule("Test").addProviderConfiguration("TestModule-Provider", new Properties()); configuration.addModule("BaseA").addProviderConfiguration("P-A", new Properties()); configuration.addModule("BaseB").addProviderConfiguration("P-B", new Properties()); ModuleManager manager = new ModuleManager("Test"); manager.init(configuration); BaseModuleA.ServiceABusiness1 serviceABusiness1 = manager.find("BaseA") .provider() .getService(BaseModuleA.ServiceABusiness1.class); Assertions.assertTrue(serviceABusiness1 != null); }
@Override public Write.Append append(final Path file, final TransferStatus status) throws BackgroundException { try { final S3DefaultMultipartService multipartService = new S3DefaultMultipartService(session); final List<MultipartUpload> upload = multipartService.find(file); if(!upload.isEmpty()) { Long size = 0L; for(MultipartPart completed : multipartService.list(upload.iterator().next())) { size += completed.getSize(); } return new Write.Append(true).withStatus(status).withOffset(size); } } catch(AccessDeniedException | InteroperabilityException e) { log.warn(String.format("Ignore failure listing incomplete multipart uploads. %s", e)); } return Write.override; }
@Test public void testAppendBelowLimit() throws Exception { final S3AccessControlListFeature acl = new S3AccessControlListFeature(session); final S3MultipartUploadService feature = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl, 5 * 1024L * 1024L, 5); final Write.Append append = feature.append(new Path("/p", EnumSet.of(Path.Type.file)), new TransferStatus().withLength(0L)); assertFalse(append.append); }
public String getMetricsName(Message in, String defaultValue) { return getStringHeader(in, MetricsConstants.HEADER_METRIC_NAME, defaultValue); }
@Test public void testGetMetricsName() { when(in.getHeader(HEADER_METRIC_NAME, String.class)).thenReturn("A"); assertThat(okProducer.getMetricsName(in, "value"), is("A")); inOrder.verify(in, times(1)).getHeader(HEADER_METRIC_NAME, String.class); inOrder.verifyNoMoreInteractions(); }
public Tracking<DefaultIssue, DefaultIssue> track(Component component, Input<DefaultIssue> rawInput) { return tracker.trackNonClosed(rawInput, referenceBranchInputFactory.create(component)); }
@Test public void testTracking() { Input<DefaultIssue> rawInput = mock(Input.class); Input<DefaultIssue> mergeInput = mock(Input.class); NonClosedTracking<DefaultIssue, DefaultIssue> result = mock(NonClosedTracking.class); when(mergeInputFactory.create(component)).thenReturn(mergeInput); when(tracker.trackNonClosed(rawInput, mergeInput)).thenReturn(result); assertThat(underTest.track(component, rawInput)).isEqualTo(result); }
@Override protected Map<String, ConfigValue> validateSourceConnectorConfig(SourceConnector connector, ConfigDef configDef, Map<String, String> config) { Map<String, ConfigValue> result = super.validateSourceConnectorConfig(connector, configDef, config); validateSourceConnectorExactlyOnceSupport(config, result, connector); validateSourceConnectorTransactionBoundary(config, result, connector); return result; }
@Test public void testConnectorTransactionBoundaryValidationHandlesInvalidValuesGracefully() { herder = exactlyOnceHerder(); Map<String, String> config = new HashMap<>(); config.put(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG, "CONNECTOR.toString()"); SourceConnector connectorMock = mock(SourceConnector.class); Map<String, ConfigValue> validatedConfigs = herder.validateSourceConnectorConfig( connectorMock, SourceConnectorConfig.configDef(), config); List<String> errors = validatedConfigs.get(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG).errorMessages(); assertFalse(errors.isEmpty()); assertTrue( errors.get(0).contains("String must be one of (case insensitive): "), "Error message did not contain expected text: " + errors.get(0)); assertEquals(1, errors.size()); }
public T divide(BigDecimal by) { return create(value.divide(by, MAX_VALUE_SCALE, RoundingMode.DOWN)); }
@Test void testDivide() { final Resource resource = new TestResource(0.04); final BigDecimal by = BigDecimal.valueOf(0.1); assertTestResourceValueEquals(0.4, resource.divide(by)); }
@Override public void doFilter(ServletRequest req, ServletResponse res, FilterChain chain) throws IOException, ServletException { HttpServletRequest request = (HttpServletRequest)req; HttpServletResponse response = (HttpServletResponse)res; // Do not allow framing; OF-997 response.setHeader("X-Frame-Options", JiveGlobals.getProperty("adminConsole.frame-options", "SAMEORIGIN")); // Reset the defaultLoginPage variable String loginPage = defaultLoginPage; if (loginPage == null) { loginPage = request.getContextPath() + (AuthFactory.isOneTimeAccessTokenEnabled() ? "/loginToken.jsp" : "/login.jsp" ); } // Get the page we're on: String url = request.getRequestURI().substring(1); if (url.startsWith("plugins/")) { url = url.substring("plugins/".length()); } // See if it's contained in the exclude list. If so, skip filter execution boolean doExclude = false; for (String exclude : excludes) { if (testURLPassesExclude(url, exclude)) { doExclude = true; break; } } if (!doExclude || IP_ACCESS_IGNORE_EXCLUDES.getValue()) { if (!passesBlocklist(req) || !passesAllowList(req)) { response.sendError(HttpServletResponse.SC_FORBIDDEN); return; } } if (!doExclude) { WebManager manager = new WebManager(); manager.init(request, response, request.getSession(), context); boolean haveOneTimeToken = manager.getAuthToken() instanceof AuthToken.OneTimeAuthToken; User loggedUser = manager.getUser(); boolean loggedAdmin = loggedUser == null ? false : adminManager.isUserAdmin(loggedUser.getUsername(), true); if (!haveOneTimeToken && !loggedAdmin && !authUserFromRequest(request)) { response.sendRedirect(getRedirectURL(request, loginPage, null)); return; } } chain.doFilter(req, res); }
@Test public void nonExcludedUrlWillNotErrorWhenRangeOnAllowlist() throws Exception { AuthCheckFilter.SERVLET_REQUEST_AUTHENTICATOR.setValue(AdminUserServletAuthenticatorClass.class); final AuthCheckFilter filter = new AuthCheckFilter(adminManager, loginLimitManager); final String range = remoteAddr.substring(0, remoteAddr.lastIndexOf('.')) + ".0-" + remoteAddr.substring(0, remoteAddr.lastIndexOf('.')) + ".255"; AuthCheckFilter.IP_ACCESS_ALLOWLIST.setValue(Collections.singleton(range)); filter.doFilter(request, response, filterChain); verify(response, never()).sendError(anyInt()); verify(filterChain, atLeastOnce()).doFilter(any(), any()); }
public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } }
@Test public void invokeValueAfterFailure() { RequestFuture<Void> future = new RequestFuture<>(); future.raise(new RuntimeException()); assertThrows(IllegalStateException.class, future::value); }
@Override public NacosUser authenticate(String username, String rawPassword) throws AccessException { if (StringUtils.isBlank(username) || StringUtils.isBlank(rawPassword)) { throw new AccessException("user not found!"); } NacosUserDetails nacosUserDetails = (NacosUserDetails) userDetailsService.loadUserByUsername(username); if (nacosUserDetails == null || !PasswordEncoderUtil.matches(rawPassword, nacosUserDetails.getPassword())) { throw new AccessException("user not found!"); } return new NacosUser(nacosUserDetails.getUsername(), jwtTokenManager.createToken(username)); }
@Test void testAuthenticate4() { when(userDetailsService.loadUserByUsername(anyString())).thenReturn(null); assertThrows(AccessException.class, () -> { abstractAuthenticationManager.authenticate("nacos", "test"); }); }
@Nonnull @Override public Optional<? extends Padding> parse( @Nullable String str, @Nonnull DetectionLocation detectionLocation) { if (str == null) { return Optional.empty(); } if (str.toUpperCase().contains("OAEP")) { final JcaOAEPPaddingMapper jcaOAEPPaddingMapper = new JcaOAEPPaddingMapper(); return jcaOAEPPaddingMapper.parse(str, detectionLocation); } return map(str, detectionLocation); }
@Test void oaep() { DetectionLocation testDetectionLocation = new DetectionLocation("testfile", 1, 1, List.of("test"), () -> "SSL"); JcaPaddingMapper jcaPaddingMapper = new JcaPaddingMapper(); Optional<? extends INode> asset = jcaPaddingMapper.parse("OAEPWithMD5AndMGF1Padding", testDetectionLocation); assertThat(asset).isPresent(); assertThat(asset.get()).isInstanceOf(OAEP.class); }
public synchronized void shutdown() { if (!isAlive.compareAndSet(true, false)) { return; } executor.shutdownNow(); ClientExecutionServiceImpl.awaitExecutorTermination("cluster", executor, logger); for (ClientConnection connection : activeConnections.values()) { connection.close("Hazelcast client is shutting down", null); } stopNetworking(); connectionListeners.clear(); clusterDiscoveryService.current().destroy(); }
@Test public void testSkipMemberListDuringReconnection() { HazelcastInstance instance = factory.newHazelcastInstance(smallInstanceConfigWithoutJetAndMetrics()); Address address = instance.getCluster().getLocalMember().getAddress(); String addressString = address.getHost() + ":" + address.getPort(); ClientConfig config = new ClientConfig(); config.setProperty("hazelcast.client.internal.skip.member.list.during.reconnection", "true"); config.getNetworkConfig().getClusterRoutingConfig().setRoutingMode(RoutingMode.SINGLE_MEMBER); config.getNetworkConfig().addAddress(addressString); config.getConnectionStrategyConfig().getConnectionRetryConfig().setClusterConnectTimeoutMillis(3_000); // There are two members, and the SINGLE_MEMBER routing client is connecting // to one of them. (the address of the `instance` defined above) HazelcastInstance client = factory.newHazelcastClient(config); assertEquals(2, client.getCluster().getMembers().size()); instance.shutdown(); // We shut down the `instance` the client is connected to but // there is still a member running. If the client was to try to // connect to members from the member list, it would succeed // and the assertion below would never be true. assertTrueEventually(() -> assertFalse(client.getLifecycleService().isRunning())); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void longlivedCcLocalPortal() throws Exception { JwtClaims claims = ClaimsUtil.getTestCcClaims("f7d42348-c647-4efb-a52d-4c5787421e73", Arrays.asList("portal.r", "portal.w")); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***Long lived token for portal lightapi***: " + jwt); }
public final void isEmpty() { if (!Iterables.isEmpty(checkNotNull(actual))) { failWithActual(simpleFact("expected to be empty")); } }
@Test public void iterableIsEmptyWithFailure() { expectFailureWhenTestingThat(asList(1, null, 3)).isEmpty(); assertFailureKeys("expected to be empty", "but was"); }
public Optional<String> extract(final String uri) { if (uri.startsWith(this.target) && uri.length() != this.target.length()) { return Optional.of(uri.replaceFirst(this.target, "")); } return empty(); }
@Test public void should_return_null_if_no_relative_path_found() { MountTo to = new MountTo("/dir"); assertThat(to.extract("/dir/").isPresent(), is(false)); }
public void addMetricsRecorder(final RocksDBMetricsRecorder metricsRecorder) { final String metricsRecorderName = metricsRecorderName(metricsRecorder); if (metricsRecordersToTrigger.containsKey(metricsRecorderName)) { throw new IllegalStateException("RocksDB metrics recorder for store \"" + metricsRecorder.storeName() + "\" of task " + metricsRecorder.taskId().toString() + " has already been added. " + "This is a bug in Kafka Streams."); } metricsRecordersToTrigger.put(metricsRecorderName, metricsRecorder); }
@Test public void shouldThrowIfRecorderToAddHasBeenAlreadyAdded() { when(recorder1.storeName()).thenReturn(STORE_NAME1); when(recorder1.taskId()).thenReturn(TASK_ID1); recordingTrigger.addMetricsRecorder(recorder1); assertThrows( IllegalStateException.class, () -> recordingTrigger.addMetricsRecorder(recorder1) ); }
public <T> T fromXmlPartial(String partial, Class<T> o) throws Exception { return fromXmlPartial(toInputStream(partial, UTF_8), o); }
@Test void shouldLoadPipelineWithMultipleMaterials() throws Exception { String pipelineXmlPartial = """ <pipeline name="pipeline"> <materials> <svn url="/hgrepo1" dest="folder1" /> <svn url="/hgrepo2" dest="folder2" /> <svn url="/hgrepo3" dest="folder3" /> </materials> <stage name="mingle"> <jobs> <job name="functional"> <artifacts> <log src="artifact1.xml" dest="cruise-output" /> </artifacts> </job> </jobs> </stage> </pipeline> """; PipelineConfig pipeline = xmlLoader.fromXmlPartial(pipelineXmlPartial, PipelineConfig.class); assertThat(pipeline.materialConfigs().size()).isEqualTo(3); ScmMaterialConfig material = (ScmMaterialConfig) pipeline.materialConfigs().get(0); assertThat(material.getFolder()).isEqualTo("folder1"); }
public static StringSetData empty() { return EmptyStringSetData.INSTANCE; }
@Test public void testEmpty() { StringSetData empty = StringSetData.empty(); assertTrue(empty.stringSet().isEmpty()); }
@SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler, boolean userPercentage, FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); if (conf.getBoolean( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); int interval = conf.getInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS); yarnSiteConfig.setInt(PREFIX + "schedule-asynchronously.scheduling-interval-ms", interval); } // This should be always true to trigger cs auto // refresh queue. yarnSiteConfig.setBoolean( YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION, FairSchedulerConfiguration.DEFAULT_PREEMPTION)) { preemptionEnabled = true; String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); int waitTimeBeforeKill = conf.getInt( FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL); yarnSiteConfig.setInt( CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, waitTimeBeforeKill); long waitBeforeNextStarvationCheck = conf.getLong( FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS); yarnSiteConfig.setLong( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, waitBeforeNextStarvationCheck); } else { if (preemptionMode == FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ""); } } // For auto created queue's auto deletion. if (!userPercentage) { String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); // Set the expired for deletion interval to 10s, consistent with fs. yarnSiteConfig.setInt(CapacitySchedulerConfiguration. AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10); } if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true); } else { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); } // Make auto cs conf refresh enabled. yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, addMonitorPolicy(QueueConfigurationAutoRefreshPolicy .class.getCanonicalName(), yarnSiteConfig)); int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN, FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN); if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) { yarnSiteConfig.setInt( CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, maxAssign); } float localityThresholdNode = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE); if (localityThresholdNode != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) { yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY, localityThresholdNode); } float localityThresholdRack = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK); if (localityThresholdRack != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) { yarnSiteConfig.setFloat( CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY, localityThresholdRack); } if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) { sizeBasedWeight = true; } if (drfUsed) { yarnSiteConfig.set( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } if (enableAsyncScheduler) { yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); } }
@Test public void testSiteLocalityThresholdConversion() { yarnConfig.set(FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, "123.123"); yarnConfig.set(FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, "321.321"); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, null); assertEquals("Locality threshold node", "123.123", yarnConvertedConfig.get( CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY)); assertEquals("Locality threshold rack", "321.321", yarnConvertedConfig.get( CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY)); }
public static boolean isInvalidStanzaSentPriorToResourceBinding(final Packet stanza, final ClientSession session) { // Openfire sets 'authenticated' only after resource binding. if (session.getStatus() == Session.Status.AUTHENTICATED) { return false; } // Beware, the 'to' address in the stanza will have been overwritten by the final JID intendedRecipient = stanza.getTo(); final JID serverDomain = new JID(XMPPServer.getInstance().getServerInfo().getXMPPDomain()); // If there's no 'to' address, then the stanza is implicitly addressed at the user itself. if (intendedRecipient == null) { return false; } // TODO: after authentication (but prior to resource binding), it should be possible to verify that the // intended recipient's bare JID corresponds with the authorized user. Openfire currently does not have an API // that can be used to obtain the authorized username, prior to resource binding. if (intendedRecipient.equals(serverDomain)) { return false; } return true; }
@Test public void testIsInvalid_noToAddress_authenticated() throws Exception { // Setup test fixture. final Packet stanza = new Message(); final LocalClientSession session = mock(LocalClientSession.class, withSettings().strictness(Strictness.LENIENT)); when(session.getStatus()).thenReturn(Session.Status.AUTHENTICATED); // Openfire sets 'AUTHENTICATED' only after resource binding has been done. // Execute system under test. final boolean result = SessionPacketRouter.isInvalidStanzaSentPriorToResourceBinding(stanza, session); // Verify results. assertFalse(result); }
@Override public void onSuccess(T result) { markTimings(); _callback.onSuccess(result); }
@Test public void testOrdering() { final RequestContext requestContext = new RequestContext(); final Callback<Long> callback = new Callback<Long>() { @Override public void onSuccess(Long result) { Map<TimingKey, TimingContextUtil.TimingContext> timings = TimingContextUtil.getTimingsMap(requestContext); // Ensure all keys are present Assert.assertTrue(timings.containsKey(KEY_H)); Assert.assertTrue(timings.containsKey(KEY_M)); Assert.assertTrue(timings.containsKey(KEY_L)); // Ensure timing start times/durations are consistent based on their ordering in the callback TimingContextUtil.TimingContext contextH = timings.get(KEY_H); TimingContextUtil.TimingContext contextM = timings.get(KEY_M); TimingContextUtil.TimingContext contextL = timings.get(KEY_L); Assert.assertTrue(contextM.getStartTimeNano() < contextL.getStartTimeNano()); Assert.assertTrue(contextL.getStartTimeNano() < contextH.getStartTimeNano()); Assert.assertTrue(contextL.getDurationNano() < contextM.getDurationNano()); Assert.assertTrue(contextH.getDurationNano() < contextM.getDurationNano()); } @Override public void onError(Throwable e) {} }; final Callback<Long> timingCallback = new TimingCallback.Builder<>(callback, requestContext) .addBeginTimingKey(KEY_M) .addBeginTimingKey(KEY_L) .addEndTimingKey(KEY_L) .addBeginTimingKey(KEY_H) .addEndTimingKey(KEY_H) .addEndTimingKey(KEY_M) .build(); timingCallback.onSuccess(1L); }
void doAudit(InstanceConfigAuditModel auditModel) { String instanceCacheKey = assembleInstanceKey(auditModel.getAppId(), auditModel .getClusterName(), auditModel.getIp(), auditModel.getDataCenter()); Long instanceId = instanceCache.getIfPresent(instanceCacheKey); if (instanceId == null) { instanceId = prepareInstanceId(auditModel); instanceCache.put(instanceCacheKey, instanceId); } //load instance config release key from cache, and check if release key is the same String instanceConfigCacheKey = assembleInstanceConfigKey(instanceId, auditModel .getConfigAppId(), auditModel.getConfigNamespace()); String cacheReleaseKey = instanceConfigReleaseKeyCache.getIfPresent(instanceConfigCacheKey); //if release key is the same, then skip audit if (cacheReleaseKey != null && Objects.equals(cacheReleaseKey, auditModel.getReleaseKey())) { return; } instanceConfigReleaseKeyCache.put(instanceConfigCacheKey, auditModel.getReleaseKey()); //if release key is not the same or cannot find in cache, then do audit InstanceConfig instanceConfig = instanceService.findInstanceConfig(instanceId, auditModel .getConfigAppId(), auditModel.getConfigNamespace()); if (instanceConfig != null) { if (!Objects.equals(instanceConfig.getReleaseKey(), auditModel.getReleaseKey())) { instanceConfig.setConfigClusterName(auditModel.getConfigClusterName()); instanceConfig.setReleaseKey(auditModel.getReleaseKey()); instanceConfig.setReleaseDeliveryTime(auditModel.getOfferTime()); } else if (offerTimeAndLastModifiedTimeCloseEnough(auditModel.getOfferTime(), instanceConfig.getDataChangeLastModifiedTime())) { //when releaseKey is the same, optimize to reduce writes if the record was updated not long ago return; } //we need to update no matter the release key is the same or not, to ensure the //last modified time is updated each day instanceConfig.setDataChangeLastModifiedTime(auditModel.getOfferTime()); instanceService.updateInstanceConfig(instanceConfig); return; } instanceConfig = new InstanceConfig(); instanceConfig.setInstanceId(instanceId); instanceConfig.setConfigAppId(auditModel.getConfigAppId()); instanceConfig.setConfigClusterName(auditModel.getConfigClusterName()); instanceConfig.setConfigNamespaceName(auditModel.getConfigNamespace()); instanceConfig.setReleaseKey(auditModel.getReleaseKey()); instanceConfig.setReleaseDeliveryTime(auditModel.getOfferTime()); instanceConfig.setDataChangeCreatedTime(auditModel.getOfferTime()); try { instanceService.createInstanceConfig(instanceConfig); } catch (DataIntegrityViolationException ex) { //concurrent insertion, safe to ignore } }
@Test public void testDoAudit() throws Exception { long someInstanceId = 1; Instance someInstance = mock(Instance.class); when(someInstance.getId()).thenReturn(someInstanceId); when(instanceService.createInstance(any(Instance.class))).thenReturn(someInstance); instanceConfigAuditUtil.doAudit(someAuditModel); verify(instanceService, times(1)).findInstance(someAppId, someClusterName, someDataCenter, someIp); verify(instanceService, times(1)).createInstance(any(Instance.class)); verify(instanceService, times(1)).findInstanceConfig(someInstanceId, someConfigAppId, someConfigNamespace); verify(instanceService, times(1)).createInstanceConfig(any(InstanceConfig.class)); }
public Object execute(ProceedingJoinPoint proceedingJoinPoint, Method method, String fallbackMethodValue, CheckedSupplier<Object> primaryFunction) throws Throwable { String fallbackMethodName = spelResolver.resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue); FallbackMethod fallbackMethod = null; if (StringUtils.hasLength(fallbackMethodName)) { try { fallbackMethod = FallbackMethod .create(fallbackMethodName, method, proceedingJoinPoint.getArgs(), proceedingJoinPoint.getTarget(), proceedingJoinPoint.getThis()); } catch (NoSuchMethodException ex) { logger.warn("No fallback method match found", ex); } } if (fallbackMethod == null) { return primaryFunction.get(); } else { return fallbackDecorators.decorate(fallbackMethod, primaryFunction).get(); } }
@Test public void testPrimaryMethodExecutionWithFallbackNotFound() throws Throwable { Method method = this.getClass().getMethod("getName", String.class); final CheckedSupplier<Object> primaryFunction = () -> getName("Name"); final String fallbackMethodValue = "incorrectFallbackMethodName"; when(proceedingJoinPoint.getArgs()).thenReturn(new Object[]{}); when(proceedingJoinPoint.getTarget()).thenReturn(this); when(spelResolver.resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue)).thenReturn(fallbackMethodValue); when(fallbackDecorators.decorate(any(),eq(primaryFunction))).thenReturn(primaryFunction); final Object result = fallbackExecutor.execute(proceedingJoinPoint, method, fallbackMethodValue, primaryFunction); assertThat(result).isEqualTo("Name"); verify(spelResolver, times(1)).resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue); verify(fallbackDecorators, never()).decorate(any(),any()); }
@Deprecated public static String getJwt(JwtClaims claims) throws JoseException { String jwt; RSAPrivateKey privateKey = (RSAPrivateKey) getPrivateKey( jwtConfig.getKey().getFilename(),jwtConfig.getKey().getPassword(), jwtConfig.getKey().getKeyName()); // A JWT is a JWS and/or a JWE with JSON claims as the payload. // In this example it is a JWS nested inside a JWE // So we first create a JsonWebSignature object. JsonWebSignature jws = new JsonWebSignature(); // The payload of the JWS is JSON content of the JWT Claims jws.setPayload(claims.toJson()); // The JWT is signed using the sender's private key jws.setKey(privateKey); // Get provider from security config file, it should be two digit // And the provider id will set as prefix for keyid in the token header, for example: 05100 // if there is no provider id, we use "00" for the default value String provider_id = ""; if (jwtConfig.getProviderId() != null) { provider_id = jwtConfig.getProviderId(); if (provider_id.length() == 1) { provider_id = "0" + provider_id; } else if (provider_id.length() > 2) { logger.error("provider_id defined in the security.yml file is invalid; the length should be 2"); provider_id = provider_id.substring(0, 2); } } jws.setKeyIdHeaderValue(provider_id + jwtConfig.getKey().getKid()); // Set the signature algorithm on the JWT/JWS that will integrity protect the claims jws.setAlgorithmHeaderValue(AlgorithmIdentifiers.RSA_USING_SHA256); // Sign the JWS and produce the compact serialization, which will be the inner JWT/JWS // representation, which is a string consisting of three dot ('.') separated // base64url-encoded parts in the form Header.Payload.Signature jwt = jws.getCompactSerialization(); return jwt; }
@Test public void sidecarExampleBootstrap() throws Exception { JwtClaims claims = ClaimsUtil.getTestCcClaimsScopeService("f7d42348-c647-4efb-a52d-4c5787421e72", "portal.r portal.w", "example-service"); claims.setExpirationTimeMinutesInTheFuture(5256000); String jwt = JwtIssuer.getJwt(claims, long_kid, KeyUtil.deserializePrivateKey(long_key, KeyUtil.RSA)); System.out.println("***Reference Long lived Bootstrap token for config server and controller: " + jwt); }
public static Optional<Object> getAdjacentValue(Type type, Object value, boolean isPrevious) { if (!type.isOrderable()) { throw new IllegalStateException("Type is not orderable: " + type); } requireNonNull(value, "value is null"); if (type.equals(BIGINT) || type instanceof TimestampType) { return getBigintAdjacentValue(value, isPrevious); } if (type.equals(INTEGER) || type.equals(DATE)) { return getIntegerAdjacentValue(value, isPrevious); } if (type.equals(SMALLINT)) { return getSmallIntAdjacentValue(value, isPrevious); } if (type.equals(TINYINT)) { return getTinyIntAdjacentValue(value, isPrevious); } if (type.equals(DOUBLE)) { return getDoubleAdjacentValue(value, isPrevious); } if (type.equals(REAL)) { return getRealAdjacentValue(value, isPrevious); } return Optional.empty(); }
@Test public void testPreviousValueForBigint() { long minValue = Long.MIN_VALUE; long maxValue = Long.MAX_VALUE; assertThat(getAdjacentValue(BIGINT, minValue, true)) .isEqualTo(Optional.empty()); assertThat(getAdjacentValue(BIGINT, minValue + 1, true)) .isEqualTo(Optional.of(minValue)); assertThat(getAdjacentValue(BIGINT, 1234L, true)) .isEqualTo(Optional.of(1233L)); assertThat(getAdjacentValue(BIGINT, maxValue - 1, true)) .isEqualTo(Optional.of(maxValue - 2)); assertThat(getAdjacentValue(BIGINT, maxValue, true)) .isEqualTo(Optional.of(maxValue - 1)); }
@Override public ParDoFn create( PipelineOptions options, CloudObject cloudUserFn, @Nullable List<SideInputInfo> sideInputInfos, TupleTag<?> mainOutputTag, Map<TupleTag<?>, Integer> outputTupleTagsToReceiverIndices, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { DoFnInstanceManager instanceManager = fnCache.get( operationContext.nameContext().systemName(), () -> DoFnInstanceManagers.cloningPool(doFnExtractor.getDoFnInfo(cloudUserFn), options)); DoFnInfo<?, ?> doFnInfo = instanceManager.peek(); DataflowExecutionContext.DataflowStepContext stepContext = executionContext.getStepContext(operationContext); Iterable<PCollectionView<?>> sideInputViews = doFnInfo.getSideInputViews(); SideInputReader sideInputReader = executionContext.getSideInputReader(sideInputInfos, sideInputViews, operationContext); if (doFnInfo.getDoFn() instanceof BatchStatefulParDoOverrides.BatchStatefulDoFn) { // HACK: BatchStatefulDoFn is a class from DataflowRunner's overrides // that just instructs the worker to execute it differently. This will // be replaced by metadata in the Runner API payload BatchStatefulParDoOverrides.BatchStatefulDoFn fn = (BatchStatefulParDoOverrides.BatchStatefulDoFn) doFnInfo.getDoFn(); DoFn underlyingFn = fn.getUnderlyingDoFn(); return new BatchModeUngroupingParDoFn( (BatchModeExecutionContext.StepContext) stepContext, new SimpleParDoFn( options, DoFnInstanceManagers.singleInstance(doFnInfo.withFn(underlyingFn)), sideInputReader, doFnInfo.getMainOutput(), outputTupleTagsToReceiverIndices, stepContext, operationContext, doFnInfo.getDoFnSchemaInformation(), doFnInfo.getSideInputMapping(), runnerFactory)); } else if (doFnInfo.getDoFn() instanceof StreamingPCollectionViewWriterFn) { // HACK: StreamingPCollectionViewWriterFn is a class from // DataflowPipelineTranslator. Using the class as an indicator is a migration path // to simply having an indicator string. checkArgument( stepContext instanceof StreamingModeExecutionContext.StreamingModeStepContext, "stepContext must be a StreamingModeStepContext to use StreamingPCollectionViewWriterFn"); DataflowRunner.StreamingPCollectionViewWriterFn<Object> writerFn = (StreamingPCollectionViewWriterFn<Object>) doFnInfo.getDoFn(); return new StreamingPCollectionViewWriterParDoFn( (StreamingModeExecutionContext.StreamingModeStepContext) stepContext, writerFn.getView().getTagInternal(), writerFn.getDataCoder(), (Coder<BoundedWindow>) doFnInfo.getWindowingStrategy().getWindowFn().windowCoder()); } else { return new SimpleParDoFn( options, instanceManager, sideInputReader, doFnInfo.getMainOutput(), outputTupleTagsToReceiverIndices, stepContext, operationContext, doFnInfo.getDoFnSchemaInformation(), doFnInfo.getSideInputMapping(), runnerFactory); } }
@Test public void testFactoryReuseInStep() throws Exception { PipelineOptions options = PipelineOptionsFactory.create(); CounterSet counters = new CounterSet(); TestDoFn initialFn = new TestDoFn(Collections.<TupleTag<String>>emptyList()); CloudObject cloudObject = getCloudObject(initialFn); TestOperationContext operationContext = TestOperationContext.create(counters); ParDoFn parDoFn = factory.create( options, cloudObject, null, MAIN_OUTPUT, ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage"), operationContext); Receiver rcvr = new OutputReceiver(); parDoFn.startBundle(rcvr); parDoFn.processElement(WindowedValue.valueInGlobalWindow("foo")); TestDoFn fn = (TestDoFn) ((SimpleParDoFn) parDoFn).getDoFnInfo().getDoFn(); assertThat(fn, not(theInstance(initialFn))); parDoFn.finishBundle(); assertThat(fn.state, equalTo(TestDoFn.State.FINISHED)); // The fn should be reused for the second call to create ParDoFn secondParDoFn = factory.create( options, cloudObject, null, MAIN_OUTPUT, ImmutableMap.<TupleTag<?>, Integer>of(MAIN_OUTPUT, 0), BatchModeExecutionContext.forTesting(options, "testStage"), operationContext); // The fn should still be finished from the last call; it should not be set up again assertThat(fn.state, equalTo(TestDoFn.State.FINISHED)); secondParDoFn.startBundle(rcvr); secondParDoFn.processElement(WindowedValue.valueInGlobalWindow("spam")); TestDoFn reobtainedFn = (TestDoFn) ((SimpleParDoFn) secondParDoFn).getDoFnInfo().getDoFn(); secondParDoFn.finishBundle(); assertThat(reobtainedFn.state, equalTo(TestDoFn.State.FINISHED)); assertThat(fn, theInstance(reobtainedFn)); }
private static FeedbackDelayGenerator resolveDelayGenerator( final Context ctx, final UdpChannel channel, final boolean isMulticastSemantics) { if (isMulticastSemantics) { return ctx.multicastFeedbackDelayGenerator(); } final Long nakDelayNs = channel.nakDelayNs(); if (null != nakDelayNs) { final long retryDelayNs = nakDelayNs * ctx.nakUnicastRetryDelayRatio(); return new StaticDelayGenerator(nakDelayNs, retryDelayNs); } else { return ctx.unicastFeedbackDelayGenerator(); } }
@Test void shouldInferFeedbackGeneratorBasedOnUnicastAddress() { final MediaDriver.Context context = new MediaDriver.Context() .multicastFeedbackDelayGenerator(new OptimalMulticastDelayGenerator(10, 10)) .unicastFeedbackDelayGenerator(new StaticDelayGenerator(10)); final UdpChannel udpChannel = UdpChannel.parse("aeron:udp?endpoint=192.168.0.1:24326"); final FeedbackDelayGenerator feedbackDelayGenerator = DriverConductor.resolveDelayGenerator( context, udpChannel, InferableBoolean.INFER, (short)0); assertSame(context.unicastFeedbackDelayGenerator(), feedbackDelayGenerator); }
public boolean deleteRole(Role role) { return rolesConfig.remove(role); }
@Test public void shouldBombIfDeletingARoleWhichDoesNotExist() throws Exception { try { SecurityConfig securityConfig = security(passwordFileAuthConfig(), admins()); securityConfig.deleteRole(new RoleConfig(new CaseInsensitiveString("role99"))); fail("Should have blown up with an exception on the previous line as deleting role99 should blow up"); } catch (RuntimeException e) { assertTrue(Pattern.compile("does not exist").matcher(e.getMessage()).find()); } }
@VisibleForTesting int execute(String[] args) { return commander.execute(args); }
@Test public void testGenerateDocs() throws Exception { PrintStream oldStream = System.out; try { ByteArrayOutputStream baoStream = new ByteArrayOutputStream(); System.setOut(new PrintStream(baoStream)); new TokensCliUtils().execute(new String[]{"gen-doc"}); String message = baoStream.toString(); String[] innerClassList = { TokensCliUtils.CommandCreateSecretKey.class.getName(), TokensCliUtils.CommandCreateKeyPair.class.getName(), TokensCliUtils.CommandCreateToken.class.getName(), TokensCliUtils.CommandShowToken.class.getName(), TokensCliUtils.CommandValidateToken.class.getName() }; for (String name : innerClassList) { assertInnerClass(name, message); } } finally { System.setOut(oldStream); } }
public static LinkedHashMap<String, KiePMMLRegressionTable> getRegressionTables(final RegressionCompilationDTO compilationDTO) { logger.trace("getRegressionTables {}", compilationDTO.getRegressionTables()); LinkedHashMap<String, KiePMMLRegressionTable> toReturn = new LinkedHashMap<>(); for (RegressionTable regressionTable : compilationDTO.getRegressionTables()) { final KiePMMLRegressionTable kiePMMLRegressionTable = getRegressionTable(regressionTable, compilationDTO); String targetCategory = regressionTable.getTargetCategory() != null ? regressionTable.getTargetCategory().toString() : ""; toReturn.put(targetCategory, kiePMMLRegressionTable); } return toReturn; }
@Test void getRegressionTables() { regressionTable = getRegressionTable(3.5, "professional"); RegressionTable regressionTable2 = getRegressionTable(3.9, "hobby"); RegressionModel regressionModel = new RegressionModel(); regressionModel.setNormalizationMethod(RegressionModel.NormalizationMethod.CAUCHIT); regressionModel.addRegressionTables(regressionTable, regressionTable2); regressionModel.setModelName(getGeneratedClassName("RegressionModel")); String targetField = "targetField"; DataField dataField = new DataField(); dataField.setName(targetField); dataField.setOpType(OpType.CATEGORICAL); DataDictionary dataDictionary = new DataDictionary(); dataDictionary.addDataFields(dataField); MiningField miningField = new MiningField(); miningField.setUsageType(MiningField.UsageType.TARGET); miningField.setName(dataField.getName()); MiningSchema miningSchema = new MiningSchema(); miningSchema.addMiningFields(miningField); regressionModel.setMiningSchema(miningSchema); PMML pmml = new PMML(); pmml.setDataDictionary(dataDictionary); pmml.addModels(regressionModel); final CommonCompilationDTO<RegressionModel> source = CommonCompilationDTO.fromGeneratedPackageNameAndFields(PACKAGE_NAME, pmml, regressionModel, new PMMLCompilationContextMock(), "FILENAME"); final RegressionCompilationDTO compilationDTO = RegressionCompilationDTO.fromCompilationDTORegressionTablesAndNormalizationMethod(source, regressionModel.getRegressionTables(), regressionModel.getNormalizationMethod()); Map<String, KiePMMLRegressionTable> retrieved = KiePMMLRegressionTableFactory.getRegressionTables(compilationDTO); assertThat(retrieved).isNotNull(); assertThat(retrieved).hasSameSizeAs(regressionModel.getRegressionTables()); regressionModel.getRegressionTables().forEach(regrTabl -> { assertThat(retrieved).containsKey(regrTabl.getTargetCategory().toString()); commonEvaluateRegressionTable(retrieved.get(regrTabl.getTargetCategory().toString()), regrTabl); }); }
public void isNotNull() { standardIsNotEqualTo(null); }
@Test public void isNotNullWhenSubjectForbidsIsEqualTo() { assertAbout(objectsForbiddingEqualityCheck()).that(new Object()).isNotNull(); }