focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public Long createGroup(MemberGroupCreateReqVO createReqVO) { // 插入 MemberGroupDO group = MemberGroupConvert.INSTANCE.convert(createReqVO); memberGroupMapper.insert(group); // 返回 return group.getId(); }
@Test public void testCreateGroup_success() { // 准备参数 MemberGroupCreateReqVO reqVO = randomPojo(MemberGroupCreateReqVO.class, o -> o.setStatus(randomCommonStatus())); // 调用 Long groupId = groupService.createGroup(reqVO); // 断言 assertNotNull(groupId); // 校验记录的属性是否正确 MemberGroupDO group = groupMapper.selectById(groupId); assertPojoEquals(reqVO, group); }
public static String execCommand(String... cmd) throws IOException { return new ShellCommand(cmd).run(); }
@Test public void execCommand() throws Exception { String testString = "alluxio"; // Execute echo for testing command execution. String result = ShellUtils.execCommand("bash", "-c", "echo " + testString); assertEquals(testString + "\n", result); }
@Override public void unregister(URL url) { if (url == null) { throw new IllegalArgumentException("unregister url == null"); } if (url.getPort() != 0) { if (logger.isInfoEnabled()) { logger.info("Unregister: " + url); } } registered.remove(url); }
@Test void testUnregister() { // test one unregister URL url = new ServiceConfigURL("dubbo", "192.168.0.1", 2200); abstractRegistry.register(url); abstractRegistry.unregister(url); MatcherAssert.assertThat( false, Matchers.equalTo(abstractRegistry.getRegistered().contains(url))); // test multiple unregisters for (URL u : abstractRegistry.getRegistered()) { abstractRegistry.unregister(u); } List<URL> urlList = getList(); for (URL urlSub : urlList) { abstractRegistry.register(urlSub); } for (URL urlSub : urlList) { abstractRegistry.unregister(urlSub); } MatcherAssert.assertThat( 0, Matchers.equalTo(abstractRegistry.getRegistered().size())); }
static Expression getProbabilityMapFunctionExpression(final RegressionModel.NormalizationMethod normalizationMethod, final boolean isBinary) { if (UNSUPPORTED_NORMALIZATION_METHODS.contains(normalizationMethod)) { throw new KiePMMLInternalException(String.format("Unsupported NormalizationMethod %s", normalizationMethod)); } else { return getProbabilityMapFunctionSupportedExpression(normalizationMethod, isBinary); } }
@Test void getProbabilityMapFunctionExpressionWithSupportedMethods() { SUPPORTED_NORMALIZATION_METHODS.forEach(normalizationMethod -> { Expression retrieved = KiePMMLClassificationTableFactory.getProbabilityMapFunctionExpression(normalizationMethod, false); try { String text = getFileContent(TEST_01_SOURCE); Expression expected = JavaParserUtils.parseExpression(String.format(text, normalizationMethod.name())); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); } catch (IOException e) { fail(e.getMessage()); } }); }
@Override @ManagedOperation(description = "Clear the store") public void clear() { redisTemplate.getConnectionFactory().getConnection().flushDb(); }
@Test public void shouldClearRepository() { idempotentRepository.clear(); verify(redisConnection).flushDb(); }
static Tuple<String, Class> splitClassAndName(String classLabel) { String[] stringNameSplit = classLabel.split("@"); // If i don't have a @, then no name is provided, use the class as the name. if (stringNameSplit.length == 1) { try { return new Tuple<>(classLabel, Class.forName(classLabel)); } catch (ClassNotFoundException e) { throw new RuntimeException("Configured class: " + classLabel + " has not been found"); } } else if (stringNameSplit.length > 1) { // Found a @, use that as the name, and try { return new Tuple<>(stringNameSplit[1], Class.forName(stringNameSplit[0])); } catch (ClassNotFoundException e) { throw new RuntimeException("Configured class: " + stringNameSplit[0] + " has not been found. Declared label was: " + stringNameSplit[1]); } } throw new RuntimeException("Invalid format provided for class label: " + classLabel); }
@Test public void validClassNameWithoutAt_split_returnsCorrect() throws Exception { Tuple<String, Class> sample1 = Handler.splitClassAndName("com.networknt.handler.sample.SampleHttpHandler1"); Assert.assertEquals("com.networknt.handler.sample.SampleHttpHandler1", sample1.first); Assert.assertEquals(Class.forName("com.networknt.handler.sample.SampleHttpHandler1"), sample1.second); }
public void setProfile(final Set<String> indexSetsIds, final String profileId, final boolean rotateImmediately) { checkProfile(profileId); checkAllIndicesSupportProfileChange(indexSetsIds); for (String indexSetId : indexSetsIds) { try { indexSetService.get(indexSetId).ifPresent(indexSetConfig -> { var updatedIndexSetConfig = setProfileForIndexSet(profileId, indexSetConfig); if (rotateImmediately) { updatedIndexSetConfig.ifPresent(this::cycleIndexSet); } }); } catch (Exception ex) { LOG.error("Failed to update field type in index set : " + indexSetId, ex); throw ex; } } }
@Test void testSetsProfileIfItIsCorrect() { doReturn(Optional.of(existingIndexSet)).when(indexSetService).get("existing_index_set"); final String profileId = "000000000000000000000007"; IndexFieldTypeProfile profile = new IndexFieldTypeProfile( profileId, "Nice profile!", "Nice profile!", new CustomFieldMappings(List.of(new CustomFieldMapping("bubamara", "ip"))) ); doReturn(Optional.of(profile)).when(profileService).get(profileId); toTest.setProfile(Set.of(existingIndexSet.id()), profileId, false); verify(mongoIndexSetService).save( existingIndexSet.toBuilder() .fieldTypeProfile(profileId) .build()); verifyNoInteractions(existingMongoIndexSet); }
@Override public int length() { return 1; }
@Test public void testLength() { System.out.println("length"); GeometricDistribution instance = new GeometricDistribution(0.3); instance.rand(); assertEquals(1, instance.length()); }
@Override public void execute(ComputationStep.Context context) { executeForBranch(treeRootHolder.getRoot()); }
@Test public void no_more_used_event_if_qp_is_removed() { QualityProfile qp = qp(QP_NAME_1, LANGUAGE_KEY_1, new Date()); qProfileStatusRepository.register(qp.getQpKey(), REMOVED); mockQualityProfileMeasures(treeRootHolder.getRoot(), arrayOf(qp), null); Language language = mockLanguageInRepository(LANGUAGE_KEY_1); underTest.execute(new TestComputationStepContext()); verify(eventRepository).add(eventArgumentCaptor.capture()); verifyNoMoreInteractions(eventRepository); verifyEvent(eventArgumentCaptor.getValue(), "Stop using \"" + qp.getQpName() + "\" (" + language.getName() + ")", null, null); }
public static QuickTextPagerView createQuickTextView( Context context, ViewGroup parent, QuickKeyHistoryRecords quickKeyHistoryRecords, DefaultSkinTonePrefTracker defaultSkinTonePrefTracker, DefaultGenderPrefTracker defaultGenderPrefTracker) { LayoutInflater inflater = LayoutInflater.from(context); QuickTextPagerView rootView = (QuickTextPagerView) inflater.inflate(R.layout.quick_text_popup_root_view, parent, false); // hard setting the height - this should be the same height as the standard keyboard ViewGroup.LayoutParams params = rootView.getLayoutParams(); params.height = parent.getHeight(); rootView.setLayoutParams(params); rootView.setQuickKeyHistoryRecords(quickKeyHistoryRecords); rootView.setEmojiVariantsPrefTrackers(defaultSkinTonePrefTracker, defaultGenderPrefTracker); return rootView; }
@Test public void testCreateQuickTextView() throws Exception { LinearLayout linearLayout = new LinearLayout(getApplicationContext()); QuickTextPagerView view = QuickTextViewFactory.createQuickTextView( getApplicationContext(), linearLayout, new QuickKeyHistoryRecords(prefs(getApplicationContext())), mock(DefaultSkinTonePrefTracker.class), mock(DefaultGenderPrefTracker.class)); Assert.assertNotNull(view); Assert.assertEquals(ViewGroup.LayoutParams.MATCH_PARENT, view.getLayoutParams().width); }
@Override public void delete(SegmentDirectoryLoaderContext segmentLoaderContext) throws Exception { String segmentName = segmentLoaderContext.getSegmentName(); String[] lastTierPath = getSegmentTierPersistedLocally(segmentName, segmentLoaderContext); File lastDataDir = lastTierPath[1] != null ? new File(lastTierPath[1]) : getDefaultDataDir(segmentLoaderContext); if (lastDataDir.exists()) { FileUtils.deleteQuietly(lastDataDir); LOGGER.info("Deleted segment directory {} on last known tier: {}", lastDataDir, TierConfigUtils.normalizeTierName(lastTierPath[0])); } deleteSegmentTierPersistedLocally(segmentName, segmentLoaderContext); }
@Test public void testDeleteSegmentOnDefaultTier() throws Exception { TierBasedSegmentDirectoryLoader loader = new TierBasedSegmentDirectoryLoader(); SegmentDirectoryLoaderContext loaderCtx = new SegmentDirectoryLoaderContext.Builder().setSegmentName("seg01") .setTableDataDir(TEMP_DIR.getAbsolutePath() + "/" + TABLE_NAME_WITH_TYPE).build(); // When segDir is on the default tier. File tableDataDir = new File(TEMP_DIR.getAbsolutePath(), TABLE_NAME_WITH_TYPE); File segDataDir = new File(tableDataDir, "seg01"); FileUtils.touch(segDataDir); loader.delete(loaderCtx); assertFalse(segDataDir.exists()); }
@Override public RemotingCommand processRequest(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final RemotingCommand response = RemotingCommand.createResponseCommand(null); final EndTransactionRequestHeader requestHeader = (EndTransactionRequestHeader) request.decodeCommandCustomHeader(EndTransactionRequestHeader.class); LOGGER.debug("Transaction request:{}", requestHeader); if (BrokerRole.SLAVE == brokerController.getMessageStoreConfig().getBrokerRole()) { response.setCode(ResponseCode.SLAVE_NOT_AVAILABLE); LOGGER.warn("Message store is slave mode, so end transaction is forbidden. "); return response; } if (requestHeader.getFromTransactionCheck()) { switch (requestHeader.getCommitOrRollback()) { case MessageSysFlag.TRANSACTION_NOT_TYPE: { LOGGER.warn("Check producer[{}] transaction state, but it's pending status." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); return null; } case MessageSysFlag.TRANSACTION_COMMIT_TYPE: { LOGGER.warn("Check producer[{}] transaction state, the producer commit the message." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); break; } case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: { LOGGER.warn("Check producer[{}] transaction state, the producer rollback the message." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); break; } default: return null; } } else { switch (requestHeader.getCommitOrRollback()) { case MessageSysFlag.TRANSACTION_NOT_TYPE: { LOGGER.warn("The producer[{}] end transaction in sending message, and it's pending status." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); return null; } case MessageSysFlag.TRANSACTION_COMMIT_TYPE: { break; } case MessageSysFlag.TRANSACTION_ROLLBACK_TYPE: { LOGGER.warn("The producer[{}] end transaction in sending message, rollback the message." + "RequestHeader: {} Remark: {}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.toString(), request.getRemark()); break; } default: return null; } } OperationResult result = new OperationResult(); if (MessageSysFlag.TRANSACTION_COMMIT_TYPE == requestHeader.getCommitOrRollback()) { result = this.brokerController.getTransactionalMessageService().commitMessage(requestHeader); if (result.getResponseCode() == ResponseCode.SUCCESS) { if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) { response.setCode(ResponseCode.ILLEGAL_OPERATION); LOGGER.warn("Message commit fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check", requestHeader.getMsgId(), requestHeader.getCommitLogOffset()); return response; } RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader); if (res.getCode() == ResponseCode.SUCCESS) { MessageExtBrokerInner msgInner = endMessageTransaction(result.getPrepareMessage()); msgInner.setSysFlag(MessageSysFlag.resetTransactionValue(msgInner.getSysFlag(), requestHeader.getCommitOrRollback())); msgInner.setQueueOffset(requestHeader.getTranStateTableOffset()); msgInner.setPreparedTransactionOffset(requestHeader.getCommitLogOffset()); msgInner.setStoreTimestamp(result.getPrepareMessage().getStoreTimestamp()); MessageAccessor.clearProperty(msgInner, MessageConst.PROPERTY_TRANSACTION_PREPARED); RemotingCommand sendResult = sendFinalMessage(msgInner); if (sendResult.getCode() == ResponseCode.SUCCESS) { this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage()); // successful committed, then total num of half-messages minus 1 this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(msgInner.getTopic(), -1); BrokerMetricsManager.commitMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder() .put(LABEL_TOPIC, msgInner.getTopic()) .build()); // record the commit latency. Long commitLatency = (System.currentTimeMillis() - result.getPrepareMessage().getBornTimestamp()) / 1000; BrokerMetricsManager.transactionFinishLatency.record(commitLatency, BrokerMetricsManager.newAttributesBuilder() .put(LABEL_TOPIC, msgInner.getTopic()) .build()); } return sendResult; } return res; } } else if (MessageSysFlag.TRANSACTION_ROLLBACK_TYPE == requestHeader.getCommitOrRollback()) { result = this.brokerController.getTransactionalMessageService().rollbackMessage(requestHeader); if (result.getResponseCode() == ResponseCode.SUCCESS) { if (rejectCommitOrRollback(requestHeader, result.getPrepareMessage())) { response.setCode(ResponseCode.ILLEGAL_OPERATION); LOGGER.warn("Message rollback fail [producer end]. currentTimeMillis - bornTime > checkImmunityTime, msgId={},commitLogOffset={}, wait check", requestHeader.getMsgId(), requestHeader.getCommitLogOffset()); return response; } RemotingCommand res = checkPrepareMessage(result.getPrepareMessage(), requestHeader); if (res.getCode() == ResponseCode.SUCCESS) { this.brokerController.getTransactionalMessageService().deletePrepareMessage(result.getPrepareMessage()); // roll back, then total num of half-messages minus 1 this.brokerController.getTransactionalMessageService().getTransactionMetrics().addAndGet(result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC), -1); BrokerMetricsManager.rollBackMessagesTotal.add(1, BrokerMetricsManager.newAttributesBuilder() .put(LABEL_TOPIC, result.getPrepareMessage().getProperty(MessageConst.PROPERTY_REAL_TOPIC)) .build()); } return res; } } response.setCode(result.getResponseCode()); response.setRemark(result.getResponseRemark()); return response; }
@Test public void testProcessRequest_NotType() throws RemotingCommandException { RemotingCommand request = createEndTransactionMsgCommand(MessageSysFlag.TRANSACTION_NOT_TYPE, true); RemotingCommand response = endTransactionProcessor.processRequest(handlerContext, request); assertThat(response).isNull(); }
@Override public boolean acquirePermission(final int permits) { long timeoutInNanos = state.get().config.getTimeoutDuration().toNanos(); State modifiedState = updateStateWithBackOff(permits, timeoutInNanos); boolean result = waitForPermissionIfNecessary(timeoutInNanos, modifiedState.nanosToWait); publishRateLimiterAcquisitionEvent(result, permits); return result; }
@Test public void waitingThreadIsInterrupted() throws Exception { setup(Duration.ofNanos(CYCLE_IN_NANOS)); setTimeOnNanos(CYCLE_IN_NANOS); boolean permission = rateLimiter.acquirePermission(); then(permission).isTrue(); then(metrics.getAvailablePermissions()).isZero(); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS); then(metrics.getNumberOfWaitingThreads()).isZero(); AtomicReference<Boolean> reservedPermission = new AtomicReference<>(null); AtomicBoolean wasInterrupted = new AtomicBoolean(false); Thread caller = new Thread( () -> { reservedPermission.set(rateLimiter.acquirePermission()); wasInterrupted.set(Thread.currentThread().isInterrupted()); } ); caller.setDaemon(true); caller.start(); awaitImpatiently() .atMost(5, SECONDS) .until(caller::getState, equalTo(Thread.State.TIMED_WAITING)); then(metrics.getAvailablePermissions()).isEqualTo(-1); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS * 2); then(metrics.getNumberOfWaitingThreads()).isEqualTo(1); caller.interrupt(); awaitImpatiently() .atMost(5, SECONDS) .until(reservedPermission::get, equalTo(false)); then(wasInterrupted.get()).isTrue(); then(metrics.getAvailablePermissions()).isEqualTo(-1); then(metrics.getNanosToWait()).isEqualTo(CYCLE_IN_NANOS * 2); then(metrics.getNumberOfWaitingThreads()).isZero(); }
public int doWork() { final long nowNs = nanoClock.nanoTime(); trackTime(nowNs); int workCount = 0; workCount += processTimers(nowNs); if (!asyncClientCommandInFlight) { workCount += clientCommandAdapter.receive(); } workCount += drainCommandQueue(); workCount += trackStreamPositions(workCount, nowNs); workCount += nameResolver.doWork(cachedEpochClock.time()); workCount += freeEndOfLifeResources(ctx.resourceFreeLimit()); return workCount; }
@Test void shouldErrorWhenConflictingDefaultReliableSubscriptionAdded() { driverProxy.addSubscription(CHANNEL_4000 + "|reliable=false", STREAM_ID_1); driverConductor.doWork(); final long id2 = driverProxy.addSubscription(CHANNEL_4000, STREAM_ID_1); driverConductor.doWork(); verify(mockClientProxy).onError(eq(id2), any(ErrorCode.class), anyString()); }
public int getUnknown_0024() { return unknown_0024; }
@Test public void testGetUnknown_0024() { assertEquals(TestParameters.VP_ITSP_UNKNOWN_0024, chmItspHeader.getUnknown_0024()); }
@Override public long add(double longitude, double latitude, V member) { return get(addAsync(longitude, latitude, member)); }
@Test public void testAddEntries() { RGeo<String> geo = redisson.getGeo("test"); assertThat(geo.add(new GeoEntry(3.11, 9.10321, "city1"), new GeoEntry(81.1231, 38.65478, "city2"))).isEqualTo(2); }
@Override public Collection<MaintenanceDomain> getAllMaintenanceDomain() { log.debug("Retrieving all MDs from distributed store"); return store.getAllMaintenanceDomain(); }
@Test public void testGetAllMaintenanceDomain() { Collection<MaintenanceDomain> mdList = service.getAllMaintenanceDomain(); assertEquals(1, mdList.size()); MaintenanceDomain md = mdList.iterator().next(); assertEquals(1, md.mdNumericId()); assertEquals(2, md.maintenanceAssociationList().size()); md.maintenanceAssociationList().iterator().forEachRemaining(ma -> assertTrue(ma.maId().maName().endsWith(String.valueOf(ma.maNumericId()))) ); }
public void isNotIn(@Nullable Iterable<?> iterable) { checkNotNull(iterable); if (Iterables.contains(iterable, actual)) { failWithActual("expected not to be any of", iterable); } }
@Test public void isNotInNullFailure() { expectFailure .whenTesting() .that((String) null) .isNotIn(oneShotIterable("a", "b", (String) null)); }
public static <T> CsvReaderFormat<T> forSchema( CsvSchema schema, TypeInformation<T> typeInformation) { return forSchema(JacksonMapperFactory::createCsvMapper, ignored -> schema, typeInformation); }
@Test void testForSchemaWithMapperSerializabilityWithUnserializableMapper() throws IOException, ClassNotFoundException { final CsvReaderFormat<Pojo> format = CsvReaderFormat.forSchema( () -> { final CsvMapper csvMapper = new CsvMapper(); // this module is not serializable csvMapper.registerModule(new JavaTimeModule()); return csvMapper; }, mapper -> mapper.schemaFor(Pojo.class), TypeInformation.of(Pojo.class)); final byte[] bytes = InstantiationUtil.serializeObject(format); InstantiationUtil.deserializeObject(bytes, CsvReaderFormatTest.class.getClassLoader()); }
public static UAssignOp create(UExpression variable, Kind operator, UExpression expression) { checkArgument( TAG.containsKey(operator), "Tree kind %s does not represent a compound assignment operator", operator); return new AutoValue_UAssignOp(variable, operator, expression); }
@Test public void equality() { new EqualsTester() .addEqualityGroup( UAssignOp.create(UFreeIdent.create("x"), Kind.PLUS_ASSIGNMENT, UFreeIdent.create("y"))) .addEqualityGroup( UAssignOp.create(UFreeIdent.create("x"), Kind.PLUS_ASSIGNMENT, ULiteral.intLit(2))) .addEqualityGroup( UAssignOp.create(UFreeIdent.create("y"), Kind.PLUS_ASSIGNMENT, ULiteral.intLit(2))) .testEquals(); }
public void process() { // Check agent information if (StringUtils.isEmpty(service) || StringUtil.isEmpty(serviceInstance) || timestamp == null) { return; } // Get all meter builders. final List<MetricConvert> converts = processService.converts(); if (CollectionUtils.isEmpty(converts)) { return; } try { converts.forEach(convert -> convert.toMeter(meters.entrySet().stream().collect(toImmutableMap( Map.Entry::getKey, v -> SampleFamilyBuilder.newBuilder( v.getValue().stream().map(s -> s.build(service, serviceInstance, timestamp)).toArray(Sample[]::new) ).defaultHistogramBucketUnit(TimeUnit.MILLISECONDS).build() )))); } catch (Exception e) { log.warn("Process meters failure.", e); } }
@Test public void testProcess() { AtomicReference<AvgHistogramFunction> data = new AtomicReference<>(); doAnswer(invocationOnMock -> { if (AvgHistogramFunction.class.isAssignableFrom(invocationOnMock.getArgument(0).getClass())) { data.set(invocationOnMock.getArgument(0)); } return null; }).when(meterSystem).doStreamingCalculation(any()); processor.read(MeterData.newBuilder() .setService(service) .setServiceInstance(serviceInstance) .setTimestamp(System.currentTimeMillis()) .setHistogram(MeterHistogram.newBuilder() .setName("test_histogram") .addValues(MeterBucketValue.newBuilder().setIsNegativeInfinity(true).setCount(10).build()) .addValues(MeterBucketValue.newBuilder().setBucket(0).setCount(20).build()) .addValues(MeterBucketValue.newBuilder().setBucket(10).setCount(10).build()) .build()) .build()); processor.process(); // verify data final AvgHistogramFunction func = data.get(); final DataTable summation = new DataTable(); summation.put(Bucket.INFINITE_NEGATIVE, 10L); summation.put("0", 20L); summation.put("10", 10L); Assertions.assertEquals(summation, func.getSummation()); final DataTable count = new DataTable(); count.put(Bucket.INFINITE_NEGATIVE, 1L); count.put("0", 1L); count.put("10", 1L); Assertions.assertEquals(count, func.getCount()); }
public static boolean nowLoggingTo(String filename) { if (filename.contains("\n")) { throw new IllegalArgumentException("Cannot use filename with newline: "+filename); } long s = System.currentTimeMillis() / 1000; String meta = "" + s + " " + filename + "\n"; byte[] data = meta.getBytes(UTF_8); try (OutputStream out = metaFile()) { out.write(data); } catch (java.io.IOException e) { System.err.println("Saving meta-data about logfile "+filename+" failed: "+e); return false; } return true; }
@Test public void canSave() { System.err.println("VH: "+System.getenv("VESPA_HOME")); File dir = new File(getDefaults().underVespaHome(LogFileDb.DBDIR)); assertTrue(!dir.exists() || IOUtils.recursiveDeleteDir(dir)); System.err.println("using directory: "+dir); File extraDir = new File(getDefaults().underVespaHome("logs/extra")); assertTrue(!extraDir.exists() || IOUtils.recursiveDeleteDir(extraDir)); String fn = getDefaults().underVespaHome("logs/extra/foo-bar.log"); assertTrue(LogFileDb.nowLoggingTo(fn)); fn = getDefaults().underVespaHome("logs/extra/stamped-1.log"); assertTrue(LogFileDb.nowLoggingTo(fn)); }
public void searchForAddOns() { mDialogController.showDialog(LEAVE); }
@Test public void testCancelHappyPath() { Application context = ApplicationProvider.getApplicationContext(); ShadowApplication shadowApplication = Shadows.shadowOf(context); final AddOnStoreSearchController underTest = new AddOnStoreSearchController(context, "add on"); underTest.searchForAddOns(); var leaveDialog = GeneralDialogTestUtil.getLatestShownDialog(); Assert.assertEquals( "Leaving AnySoftKeyboard", GeneralDialogTestUtil.getTitleFromDialog(leaveDialog)); var button = leaveDialog.getButton(DialogInterface.BUTTON_NEGATIVE); Shadows.shadowOf(button).getOnClickListener().onClick(button); Assert.assertSame( GeneralDialogTestUtil.NO_DIALOG, GeneralDialogTestUtil.getLatestShownDialog()); Assert.assertNull(shadowApplication.getNextStartedActivity()); }
public ConfigDef define(ConfigKey key) { if (configKeys.containsKey(key.name)) { throw new ConfigException("Configuration " + key.name + " is defined twice."); } if (key.group != null && !groups.contains(key.group)) { groups.add(key.group); } configKeys.put(key.name, key); return this; }
@Test public void testNoExceptionIsThrownWhenListSizeEqualsTheLimit() { final List<String> lst = asList("a", "b", "c"); assertDoesNotThrow(() -> new ConfigDef().define("lst", Type.LIST, lst, ListSize.atMostOfSize(lst.size()), Importance.HIGH, "lst doc")); }
static <T> RackAwareGraphConstructor<T> create(final AssignmentConfigs assignmentConfigs, final Map<Subtopology, Set<TaskId>> tasksForTopicGroup) { return create(assignmentConfigs.rackAwareAssignmentStrategy(), new ArrayList<>(new TreeMap<>(tasksForTopicGroup).values())); }
@Test public void shouldReturnBalanceSubtopologyConstructor() { final AssignmentConfigs config = new AssignorConfiguration( new StreamsConfig(configProps(StreamsConfig.RACK_AWARE_ASSIGNMENT_STRATEGY_BALANCE_SUBTOPOLOGY)).originals()).assignmentConfigs(); final RackAwareGraphConstructor constructor = RackAwareGraphConstructorFactory.create(config, mkMap()); assertThat(constructor, instanceOf(BalanceSubtopologyGraphConstructor.class)); }
@Override public CiConfiguration loadConfiguration() { String revision = system.envVariable(PROPERTY_COMMIT); if (isEmpty(revision)) { LoggerFactory.getLogger(getClass()).warn("Missing environment variable " + PROPERTY_COMMIT); } return new CiConfigurationImpl(revision, getName()); }
@Test public void configuration_of_branch() { setEnvVariable("CIRRUS_CHANGE_IN_REPO", "abd12fc"); assertThat(underTest.loadConfiguration().getScmRevision()).hasValue("abd12fc"); }
@VisibleForTesting protected static TFeLocksItem resolveLockInfo(Database db) { var lock = db.getRwLock(); TFeLocksItem lockItem = new TFeLocksItem(); lockItem.setLock_type("DATABASE"); lockItem.setLock_object(db.getFullName()); Thread owner = lock.getOwner(); Set<Thread> sharedLockThreads = lock.getSharedLockThreads(); long currentTime = System.currentTimeMillis(); if (owner != null) { lockItem.setLock_mode("EXCLUSIVE"); lockItem.setGranted(true); JsonObject ownerInfo = new JsonObject(); ownerInfo.addProperty("threadId", owner.getId()); ownerInfo.addProperty("threadName", owner.getName()); lockItem.setThread_info(ownerInfo.toString()); // wait start long lockStartTime = lock.getExclusiveLockStartTimeMs(); lockItem.setStart_time(lockStartTime); lockItem.setHold_time_ms(currentTime - lockStartTime); } else if (CollectionUtils.isNotEmpty(sharedLockThreads)) { lockItem.setLock_mode("SHARED"); lockItem.setGranted(true); // lock start long lockStart = SetUtils.emptyIfNull(sharedLockThreads).stream() .map(lock::getSharedLockStartTimeMs) .filter(x -> x > 0) .min(Comparator.naturalOrder()).orElse(0L); lockItem.setStart_time(lockStart); lockItem.setHold_time_ms(currentTime - lockStart); // thread info JsonArray sharedLockInfo = new JsonArray(); for (Thread thread : SetUtils.emptyIfNull(sharedLockThreads)) { JsonObject lockInfo = new JsonObject(); lockInfo.addProperty("threadId", thread.getId()); lockInfo.addProperty("threadName", thread.getName()); sharedLockInfo.add(lockInfo); } lockItem.setThread_info(sharedLockInfo.toString()); } else { lockItem.setGranted(false); } // waiters lockItem.setWaiter_list(LockChecker.getLockWaiterInfoJsonArray(lock.getQueuedThreads()).toString()); return lockItem; }
@Test public void testResolveLockItem() throws InterruptedException { Config.lock_manager_enabled = false; Database db = new Database(1, "test_lock"); // empty lock { TFeLocksItem item = SysFeLocks.resolveLockInfo(db); assertEquals("TFeLocksItem(lock_type:DATABASE, lock_object:test_lock, granted:false, waiter_list:[])", item.toString()); } // exclusive owner { Locker locker = new Locker(); locker.lockDatabase(db, LockType.WRITE); TFeLocksItem item = SysFeLocks.resolveLockInfo(db); assertEquals("EXCLUSIVE", item.getLock_mode()); assertTrue(item.isGranted()); assertTrue(item.getStart_time() > 0); assertTrue(item.getHold_time_ms() >= 0); assertEquals("[]", item.getWaiter_list()); // add a waiter Thread waiter = new Thread(() -> { locker.lockDatabase(db, LockType.WRITE); locker.unLockDatabase(db, LockType.WRITE); }, "waiter"); waiter.start(); while (waiter.getState() != State.WAITING) { Thread.sleep(1000); } item = SysFeLocks.resolveLockInfo(db); assertEquals(String.format("[{\"threadId\":%d,\"threadName\":\"%s\"}]", waiter.getId(), waiter.getName()), item.getWaiter_list()); locker.unLockDatabase(db, LockType.WRITE); } // shared lock { Locker locker = new Locker(); locker.lockDatabase(db, LockType.READ); TFeLocksItem item = SysFeLocks.resolveLockInfo(db); assertEquals("SHARED", item.getLock_mode()); assertTrue(item.isGranted()); assertTrue(item.getStart_time() > 0); assertTrue(item.getHold_time_ms() >= 0); assertEquals("[]", item.getWaiter_list()); // add a waiter Thread waiter = new Thread(() -> { locker.lockDatabase(db, LockType.WRITE); locker.unLockDatabase(db, LockType.WRITE); }, "waiter"); waiter.start(); // 1. start waiter (blocked) // 2. waiter acquired the write lock // 3. two threads share the lock // 4. two threads release the lock while (waiter.getState() != State.WAITING) { Thread.sleep(1000); } item = SysFeLocks.resolveLockInfo(db); assertEquals(String.format("[{\"threadId\":%d,\"threadName\":\"%s\"}]", waiter.getId(), waiter.getName()), item.getWaiter_list()); locker.unLockDatabase(db, LockType.READ); } Config.lock_manager_enabled = true; }
public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment " + file + " to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; }
@Test public void testTruncateNotCalledIfSizeIsBiggerThanTargetSize() throws IOException { FileChannel channelMock = mock(FileChannel.class); when(channelMock.size()).thenReturn(42L); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); try { fileRecords.truncateTo(43); fail("Should throw KafkaException"); } catch (KafkaException e) { // expected } verify(channelMock, atLeastOnce()).size(); }
public static long[] toLongArray(LongArrayList longArrayList) { long[] longArrayListElements = longArrayList.elements(); return longArrayListElements.length == longArrayList.size() ? longArrayListElements : longArrayList.toLongArray(); }
@Test public void testToLongArray() { // Test empty list LongArrayList longArrayList = new LongArrayList(); long[] longArray = ArrayListUtils.toLongArray(longArrayList); assertEquals(longArray.length, 0); // Test list with one element longArrayList.add(1L); longArray = ArrayListUtils.toLongArray(longArrayList); assertEquals(longArray.length, 1); assertEquals(longArray[0], 1L); // Test list with multiple elements longArrayList.add(2L); longArrayList.add(3L); longArray = ArrayListUtils.toLongArray(longArrayList); assertEquals(longArray.length, 3); assertEquals(longArray[0], 1L); assertEquals(longArray[1], 2L); assertEquals(longArray[2], 3L); }
public static File createTmpFile(String dir, String prefix, String suffix) throws IOException { return Files.createTempFile(Paths.get(dir), prefix, suffix).toFile(); }
@Test void testCreateTmpFile() throws IOException { File tmpFile = null; try { tmpFile = DiskUtils.createTmpFile("nacos1", ".ut"); assertTrue(tmpFile.getName().startsWith("nacos1")); assertTrue(tmpFile.getName().endsWith(".ut")); } finally { if (tmpFile != null) { tmpFile.deleteOnExit(); } } }
public static Http2Headers toHttp2Headers(HttpMessage in, boolean validateHeaders) { HttpHeaders inHeaders = in.headers(); final Http2Headers out = new DefaultHttp2Headers(validateHeaders, inHeaders.size()); if (in instanceof HttpRequest) { HttpRequest request = (HttpRequest) in; String host = inHeaders.getAsString(HttpHeaderNames.HOST); if (isOriginForm(request.uri()) || isAsteriskForm(request.uri())) { out.path(new AsciiString(request.uri())); setHttp2Scheme(inHeaders, out); } else { URI requestTargetUri = URI.create(request.uri()); out.path(toHttp2Path(requestTargetUri)); // Take from the request-line if HOST header was empty host = isNullOrEmpty(host) ? requestTargetUri.getAuthority() : host; setHttp2Scheme(inHeaders, requestTargetUri, out); } setHttp2Authority(host, out); out.method(request.method().asciiName()); } else if (in instanceof HttpResponse) { HttpResponse response = (HttpResponse) in; out.status(response.status().codeAsText()); } // Add the HTTP headers which have not been consumed above toHttp2Headers(inHeaders, out); return out; }
@Test public void cookieTailSemicolon() { final HttpHeaders inHeaders = new DefaultHttpHeaders(); inHeaders.add(COOKIE, "one=foo;"); final Http2Headers out = new DefaultHttp2Headers(); HttpConversionUtil.toHttp2Headers(inHeaders, out); assertEquals("one=foo;", out.get(COOKIE)); // not split }
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch) { trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch); }
@Test (expectedExceptions = UnsupportedOperationException.class) public void testReadOnly() { RecordBar bar = new RecordBar(); bar.setLocation("mountain view"); bar.data().put("SF", "CA"); bar.data().makeReadOnly(); RestUtils.trimRecordTemplate(bar, false); }
public static MeterInstruction meterTraffic(final MeterId meterId) { checkNotNull(meterId, "meter id cannot be null"); return new MeterInstruction(meterId); }
@Test public void testMeterTrafficMethod() { final Instruction instruction = Instructions.meterTraffic(meterId1); final Instructions.MeterInstruction meterInstruction = checkAndConvert(instruction, Instruction.Type.METER, Instructions.MeterInstruction.class); assertThat(meterInstruction.meterId(), is(meterId1)); }
public Properties getProperties() { return properties; }
@Test public void testUriWithQueryInterceptors() throws SQLException { String queryInterceptor = TestForUriQueryInterceptor.class.getName(); PrestoDriverUri parameters = createDriverUri("presto://localhost:8080?queryInterceptors=" + queryInterceptor); Properties properties = parameters.getProperties(); assertEquals(properties.getProperty(QUERY_INTERCEPTORS.getKey()), queryInterceptor); }
public static void setProperty(Object bean, String expression, Object value) { BeanPath.create(expression).set(bean, value); }
@Test public void setPropertiesTest() { final Map<String, Object> resultMap = MapUtil.newHashMap(); BeanUtil.setProperty(resultMap, "codeList[0].name", "张三"); assertEquals("{codeList=[{name=张三}]}", resultMap.toString()); }
public boolean add(final Integer value) { return add(value.intValue()); }
@Test void addingAnElementTwiceDoesNothing() { assertTrue(testSet.add(1)); assertFalse(testSet.add(1)); }
public static BagOfWords fit(DataFrame data, Function<String, String[]> tokenizer, int k, String... columns) { HashMap<String, Integer> words = new HashMap<>(); for (var column : columns) { for (var text : data.column(column).toStringArray()) { for (var word : tokenizer.apply(text)) { words.merge(word, 1, Integer::sum); } } } String[] features = new String[words.size()]; int[] count = new int[words.size()]; int i = 0; for (String word : words.keySet()) { features[i] = word; count[i++] = -words.get(word); } QuickSort.sort(count, features); return new BagOfWords(columns, tokenizer, Arrays.copyOf(features, Math.min(k, features.length)), false); }
@Test public void testFit() throws IOException { System.out.println("fit"); try { DataFrame data = Read.arff(Paths.getTestData("weka/string.arff")); System.out.println(data); BagOfWords bag = BagOfWords.fit(data, tokenizer, 10,"LCSH"); DataFrame df = bag.apply(data); System.out.println(df); assertEquals(data.nrow(), df.nrow()); assertEquals(10, df.ncol()); assertEquals(10, bag.features().length); assertEquals("--", bag.features()[0]); assertEquals("Union", bag.features()[9]); for (int i = 0; i < 10; i++) { var feature = bag.features()[i]; System.out.println(feature); } } catch (Exception ex) { ex.printStackTrace(); } }
@Nullable @Override public GenericRow decode(byte[] payload, GenericRow destination) { try { destination = (GenericRow) _decodeMethod.invoke(null, payload, destination); } catch (Exception e) { throw new RuntimeException(e); } return destination; }
@Test public void testComplexClass() throws Exception { ProtoBufCodeGenMessageDecoder messageDecoder = setupDecoder("complex_types.jar", "org.apache.pinot.plugin.inputformat.protobuf.ComplexTypes$TestMessage", getSourceFieldsForComplexType()); Map<String, Object> inputRecord = createComplexTypeRecord(); GenericRow destination = new GenericRow(); messageDecoder.decode(getComplexTypeObject(inputRecord).toByteArray(), destination); ComplexTypes.TestMessage msg = getComplexTypeObject(inputRecord); msg.getComplexMapMap().forEach((k, v) -> { v.getAllFields().forEach((fieldDescriptor, value) -> { assertEquals( ((Map<String, Object>) ((Map<String, Object>) destination.getValue("complex_map")) .get(k)).get(fieldDescriptor.getName()), value); }); }); for (String col : getSourceFieldsForComplexType()) { assertNotNull(destination.getValue(col)); } for (String col : getSourceFieldsForComplexType()) { if (col.contains("field")) { assertEquals(destination.getValue(col), inputRecord.get(col)); } } }
public static NamenodeRole convert(NamenodeRoleProto role) { switch (role) { case NAMENODE: return NamenodeRole.NAMENODE; case BACKUP: return NamenodeRole.BACKUP; case CHECKPOINT: return NamenodeRole.CHECKPOINT; } return null; }
@Test public void testConvertBlocksWithLocations() { boolean[] testSuite = new boolean[]{false, true}; for (int i = 0; i < testSuite.length; i++) { BlockWithLocations[] list = new BlockWithLocations[]{ getBlockWithLocations(1, testSuite[i]), getBlockWithLocations(2, testSuite[i])}; BlocksWithLocations locs = new BlocksWithLocations(list); BlocksWithLocationsProto locsProto = PBHelper.convert(locs); BlocksWithLocations locs2 = PBHelper.convert(locsProto); BlockWithLocations[] blocks = locs.getBlocks(); BlockWithLocations[] blocks2 = locs2.getBlocks(); assertEquals(blocks.length, blocks2.length); for (int j = 0; j < blocks.length; j++) { compare(blocks[j], blocks2[j]); } } }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void shouldNotHaveNullNamedOnAggregate() { assertThrows(NullPointerException.class, () -> windowedCogroupedStream.aggregate(MockInitializer.STRING_INIT, null, Materialized.as("test"))); }
@ApiOperation(value = "Delete Tenant (deleteTenant)", notes = "Deletes the tenant, it's customers, rule chains, devices and all other related entities. Referencing non-existing tenant Id will cause an error." + SYSTEM_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('SYS_ADMIN')") @RequestMapping(value = "/tenant/{tenantId}", method = RequestMethod.DELETE) @ResponseStatus(value = HttpStatus.OK) public void deleteTenant(@Parameter(description = TENANT_ID_PARAM_DESCRIPTION) @PathVariable(TENANT_ID) String strTenantId) throws Exception { checkParameter(TENANT_ID, strTenantId); TenantId tenantId = TenantId.fromUUID(toUUID(strTenantId)); Tenant tenant = checkTenantId(tenantId, Operation.DELETE); tbTenantService.delete(tenant); }
@Test public void testDeleteTenant() throws Exception { loginSysAdmin(); Tenant tenant = new Tenant(); tenant.setTitle("My tenant"); Tenant savedTenant = saveTenant(tenant); String tenantIdStr = savedTenant.getId().getId().toString(); deleteTenant(savedTenant.getId()); doGet("/api/tenant/" + tenantIdStr) .andExpect(status().isNotFound()) .andExpect(statusReason(containsString(msgErrorNoFound("Tenant", tenantIdStr)))); }
@Override public void write(int b) throws IOException { checkClosed(); if (chunkSize - currentBufferPointer <= 0) { expandBuffer(); } currentBuffer.put((byte) b); currentBufferPointer++; pointer++; if (pointer > size) { size = pointer; } }
@Test void testEOFBugInSeek() throws IOException { try (RandomAccess randomAccessRwedWrite = new RandomAccessReadWriteBuffer()) { byte[] bytes = new byte[RandomAccessReadBuffer.DEFAULT_CHUNK_SIZE_4KB]; for (int i = 0; i < NUM_ITERATIONS; i++) { long p0 = randomAccessRwedWrite.getPosition(); randomAccessRwedWrite.write(bytes); long p1 = randomAccessRwedWrite.getPosition(); assertEquals(RandomAccessReadBuffer.DEFAULT_CHUNK_SIZE_4KB, p1 - p0); randomAccessRwedWrite.write(bytes); long p2 = randomAccessRwedWrite.getPosition(); assertEquals(RandomAccessReadBuffer.DEFAULT_CHUNK_SIZE_4KB, p2 - p1); randomAccessRwedWrite.seek(0); randomAccessRwedWrite.seek(i * 2 * RandomAccessReadBuffer.DEFAULT_CHUNK_SIZE_4KB); } } }
@Override @NonNull public String getId() { return ID; }
@Test public void shouldFailOnValidation2() throws Exception { User user = login(); this.jwtToken = getJwtToken(j.jenkins, user.getId(), user.getId()); Map<String,Object> resp = post("/organizations/" + getOrgName() + "/pipelines/", MapsHelper.of("name", "demo", "$class", "io.jenkins.blueocean.blueocean_git_pipeline.GitPipelineCreateRequest" ), 400); assertEquals(resp.get("code"), 400); List<Map> errors = (List<Map>) resp.get("errors"); assertEquals("scmConfig", errors.get(0).get("field")); assertEquals("MISSING", errors.get(0).get("code")); assertNull(getOrgRoot().getItem("demo")); }
public void executor(final ConfigGroupEnum type, final String json, final String eventType) { ENUM_MAP.get(type).handle(json, eventType); }
@Test public void testPluginDeleteExecutor() { String json = getJson(); websocketDataHandler.executor(ConfigGroupEnum.PLUGIN, json, DataEventTypeEnum.DELETE.name()); List<PluginData> pluginDataList = new PluginDataHandler(pluginDataSubscriber).convert(json); pluginDataList.forEach(verify(pluginDataSubscriber)::unSubscribe); }
@Override public String getSessionId() { return sessionID; }
@Test public void testLockRequestWithChunkedFraming() { log.info("Starting lock async"); assertNotNull("Incorrect sessionId", session3.getSessionId()); try { assertTrue("NETCONF lock request failed", session3.lock()); } catch (NetconfException e) { e.printStackTrace(); fail("NETCONF lock test failed: " + e.getMessage()); } log.info("Finishing lock async"); }
public <T> void get(String url, Header header, Query query, Type responseType, Callback<T> callback) { execute(url, HttpMethod.GET, new RequestHttpEntity(header, query), responseType, callback); }
@Test void testGet() throws Exception { restTemplate.get(TEST_URL, Header.EMPTY, Query.EMPTY, String.class, mockCallback); verify(requestClient).execute(any(), eq("GET"), any(), any(), eq(mockCallback)); }
public static VertexFlameGraph createFullFlameGraphFrom(VertexThreadInfoStats sample) { EnumSet<Thread.State> included = EnumSet.allOf(Thread.State.class); return createFlameGraphFromSample(sample, included); }
@Test void testLambdaClassNamesCleanUp() { Map<ExecutionAttemptID, Collection<ThreadInfoSample>> samplesBySubtask = generateSamples(); VertexThreadInfoStats sample = new VertexThreadInfoStats(0, 0, 0, samplesBySubtask); VertexFlameGraph graph = VertexFlameGraphFactory.createFullFlameGraphFrom(sample); int encounteredLambdas = verifyRecursively(graph.getRoot()); if (encounteredLambdas == 0) { fail("No lambdas encountered in the test, cleanup functionality was not tested"); } }
@Path("stop/{id}") @POST @Produces(MediaType.APPLICATION_JSON) public Response stopApp(@PathParam("id") String id) { AppCatalogSolrClient sc = new AppCatalogSolrClient(); AppEntry app = sc.findAppEntry(id); Service yarnApp = app.getYarnfile(); yarnApp.setState(ServiceState.STOPPED); try { YarnServiceClient yc = new YarnServiceClient(); yc.stopApp(yarnApp); } catch (JsonProcessingException e) { return Response.status(Status.BAD_REQUEST).build(); } return Response.ok().build(); }
@Test void testStopApp() throws Exception { String id = "application 1"; AppDetailsController ac = Mockito.mock(AppDetailsController.class); Service yarnfile = new Service(); Component comp = new Component(); Container c = new Container(); c.setId("container-1"); List<Container> containers = new ArrayList<Container>(); containers.add(c); comp.setContainers(containers); yarnfile.addComponent(comp); Response expected = Response.ok().build(); when(ac.stopApp(id)).thenReturn(Response.ok().build()); final Response actual = ac.stopApp(id); assertEquals(expected.getStatus(), actual.getStatus()); }
public static void validateRequestHeadersAndUpdateResourceContext(final Map<String, String> headers, final Set<String> customMimeTypesSupported, ServerResourceContext resourceContext) { validateRequestHeadersAndUpdateResourceContext(headers, customMimeTypesSupported, resourceContext, new RequestContext()); }
@Test() public void testValidateRequestHeadersWithValidAcceptHeaderAndNoMatch() throws Exception { Map<String, String> headers = new HashMap<>(); headers.put("Accept", "text/html"); ServerResourceContext resourceContext = new ResourceContextImpl(); try { RestUtils.validateRequestHeadersAndUpdateResourceContext(headers, Collections.emptySet(), resourceContext); Assert.fail(); } catch (RestLiServiceException e) { Assert.assertEquals(e.getStatus(), HttpStatus.S_406_NOT_ACCEPTABLE); Assert.assertEquals(e.getMessage(), "None of the types in the request's 'Accept' header are supported. " + "Supported MIME types are: " + RestConstants.SUPPORTED_MIME_TYPES + "[]"); Assert.assertEquals(resourceContext.getResponseMimeType(), null); } }
public <T> T readValue(Class<T> type, InputStream entityStream) throws IOException { ObjectReader reader = DeserializerStringCache.init( Optional.ofNullable(objectReaderByClass.get(type)).map(Supplier::get).orElseGet(()->mapper.readerFor(type)) ); try { return reader.readValue(entityStream); } finally { DeserializerStringCache.clear(reader, CacheScope.GLOBAL_SCOPE); } }
@Test public void testApplicationsXStreamEncodeJacksonDecode() throws Exception { Applications original = APPLICATIONS; // Encode ByteArrayOutputStream captureStream = new ByteArrayOutputStream(); new EntityBodyConverter().write(original, captureStream, MediaType.APPLICATION_JSON_TYPE); byte[] encoded = captureStream.toByteArray(); String encodedString = new String(encoded); // Decode InputStream source = new ByteArrayInputStream(encoded); Applications decoded = codec.readValue(Applications.class, source); assertTrue(EurekaEntityComparators.equal(decoded, original)); }
@Nullable static Buffer sliceNextBuffer(ByteBuffer memory) { final int remaining = memory.remaining(); // we only check the correct case where data is exhausted // all other cases can only occur if our write logic is wrong and will already throw // buffer underflow exceptions which will cause the read to fail. if (remaining == 0) { return null; } final BufferHeader header = parseBufferHeader(memory); memory.limit(memory.position() + header.getLength()); ByteBuffer buf = memory.slice(); memory.position(memory.limit()); memory.limit(memory.capacity()); MemorySegment memorySegment = MemorySegmentFactory.wrapOffHeapMemory(buf); return new NetworkBuffer( memorySegment, FreeingBufferRecycler.INSTANCE, header.getDataType(), header.isCompressed(), header.getLength()); }
@Test void readFromEmptyByteBuffer() { final ByteBuffer memory = ByteBuffer.allocateDirect(100); memory.position(memory.limit()); final Buffer result = BufferReaderWriterUtil.sliceNextBuffer(memory); assertThat(result).isNull(); }
@Override public MergedResult merge(final List<QueryResult> queryResults, final SQLStatementContext sqlStatementContext, final ShardingSphereDatabase database, final ConnectionContext connectionContext) throws SQLException { if (1 == queryResults.size() && !isNeedAggregateRewrite(sqlStatementContext)) { return new IteratorStreamMergedResult(queryResults); } Map<String, Integer> columnLabelIndexMap = getColumnLabelIndexMap(queryResults.get(0)); SelectStatementContext selectStatementContext = (SelectStatementContext) sqlStatementContext; selectStatementContext.setIndexes(columnLabelIndexMap); MergedResult mergedResult = build(queryResults, selectStatementContext, columnLabelIndexMap, database); return decorate(queryResults, selectStatementContext, mergedResult); }
@Test void assertBuildOrderByStreamMergedResultWithSQLServerLimit() throws SQLException { final ShardingDQLResultMerger resultMerger = new ShardingDQLResultMerger(TypedSPILoader.getService(DatabaseType.class, "SQLServer")); ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS); when(database.getSchema(DefaultDatabase.LOGIC_NAME)).thenReturn(mock(ShardingSphereSchema.class)); SQLServerSelectStatement selectStatement = (SQLServerSelectStatement) buildSelectStatement(new SQLServerSelectStatement()); selectStatement.setOrderBy(new OrderBySegment(0, 0, Collections.singletonList(new IndexOrderByItemSegment(0, 0, 1, OrderDirection.DESC, NullsOrderType.FIRST)))); selectStatement.setProjections(new ProjectionsSegment(0, 0)); selectStatement.setLimit(new LimitSegment(0, 0, new NumberLiteralRowNumberValueSegment(0, 0, 1L, true), null)); SelectStatementContext selectStatementContext = new SelectStatementContext(createShardingSphereMetaData(database), Collections.emptyList(), selectStatement, DefaultDatabase.LOGIC_NAME, Collections.emptyList()); MergedResult actual = resultMerger.merge(createQueryResults(), selectStatementContext, createSQLServerDatabase(), mock(ConnectionContext.class)); assertThat(actual, instanceOf(TopAndRowNumberDecoratorMergedResult.class)); assertThat(((TopAndRowNumberDecoratorMergedResult) actual).getMergedResult(), instanceOf(OrderByStreamMergedResult.class)); }
public synchronized void reportData(String clusterName, UriProperties property, boolean fromNewLb) { ClusterMatchRecord cluster = _clusters.computeIfAbsent(clusterName, k -> new ClusterMatchRecord()); if (fromNewLb) { cluster._newLb = property; } else { cluster._oldLb = property; } _totalUris -= cluster._uris; _matchedUris -= cluster._matched; LOG.debug("Updated URI properties for cluster {}:\nOld LB: {}\nNew LB: {}", clusterName, cluster._oldLb, cluster._newLb); if (cluster._oldLb == null && cluster._newLb == null) { _clusters.remove(clusterName); updateJmxMetrics(clusterName, null); return; } cluster._matched = 0; if (cluster._oldLb == null || cluster._newLb == null) { LOG.debug("Added new URI properties for {} for {} LB.", clusterName, fromNewLb ? "New" : "Old"); cluster._uris = (cluster._oldLb == null) ? cluster._newLb.Uris().size() : cluster._oldLb.Uris().size(); _totalUris += cluster._uris; updateJmxMetrics(clusterName, cluster); return; } cluster._uris = cluster._oldLb.Uris().size(); Set<URI> newLbUris = new HashSet<>(cluster._newLb.Uris()); for (URI uri : cluster._oldLb.Uris()) { if (!newLbUris.remove(uri)) { continue; } if (compareURI(uri, cluster._oldLb, cluster._newLb)) { cluster._matched++; } } // add the remaining unmatched URIs in newLbUris to the uri count cluster._uris += newLbUris.size(); if (cluster._matched != cluster._uris) { infoOrDebugIfLimited( "Mismatched uri properties for cluster {} (match score: {}, total uris: {}):\nOld LB: {}\nNew LB: {}", clusterName, (double) cluster._matched / (double) cluster._uris, cluster._uris, cluster._oldLb, cluster._newLb); } else { LOG.debug("Matched uri properties for cluster {} (matched {} out of {} URIs)", clusterName, cluster._matched, cluster._uris); } _totalUris += cluster._uris; _matchedUris += cluster._matched; updateJmxMetrics(clusterName, cluster); }
@Test public void testReportData() { UriPropertiesDualReadMonitorTestFixture fixture = new UriPropertiesDualReadMonitorTestFixture(); UriPropertiesDualReadMonitor monitor = fixture.getMonitor(); // new lb has uri 1 monitor.reportData(CLUSTER_1, URI_PROPERTIES_1, true); verifyJmxMetricParams(fixture, CLUSTER_1, new ClusterMatchRecord(null, URI_PROPERTIES_1, 1, 0), 0.0); // old lb has uri 2 monitor.reportData(CLUSTER_1, URI_PROPERTIES_2, false); verifyJmxMetricParams(fixture, CLUSTER_1, new ClusterMatchRecord(URI_PROPERTIES_2, URI_PROPERTIES_1, 2, 0), 0.0); // old lb updated with both uri 1 and 2 monitor.reportData(CLUSTER_1, URI_PROPERTIES_URI_1_AND_2, false); verifyJmxMetricParams(fixture, CLUSTER_1, new ClusterMatchRecord(URI_PROPERTIES_URI_1_AND_2, URI_PROPERTIES_1, 2, 1), 0.5); // new lb updated with both uri 1 and 2 monitor.reportData(CLUSTER_1, URI_PROPERTIES_URI_1_AND_2, true); verifyJmxMetricParams(fixture, CLUSTER_1, new ClusterMatchRecord(URI_PROPERTIES_URI_1_AND_2, URI_PROPERTIES_URI_1_AND_2, 2, 2), 1.0); // add data for cluster 2, old lb with uri 3 and 4 monitor.reportData(CLUSTER_2, URI_PROPERTIES_URI_3_AND_4, false); assertEquals(monitor.getTotalUris(), 4); assertEquals(monitor.getMatchedUris(), 2); verifyJmxMetricParams(fixture, CLUSTER_2, new ClusterMatchRecord(URI_PROPERTIES_URI_3_AND_4, null, 2, 0), 0.5); // new lb updated with uri 3 with different uri specific properties and uri 4 with different weight monitor.reportData(CLUSTER_2, URI_PROPERTIES_URI_3_DIFF_SPECIFIC_PROPERTIES_AND_4_DIFF_WEIGHT, true); assertEquals(monitor.getTotalUris(), 4); assertEquals(monitor.getMatchedUris(), 2); verifyJmxMetricParams(fixture, CLUSTER_2, new ClusterMatchRecord(URI_PROPERTIES_URI_3_AND_4, URI_PROPERTIES_URI_3_DIFF_SPECIFIC_PROPERTIES_AND_4_DIFF_WEIGHT, 2, 0), 0.5); // old lb updated with uri 3 with still different uri specific properties and uri 4 with same weight as new lb monitor.reportData(CLUSTER_2, URI_PROPERTIES_URI_3_ANOTHER_DIFF_SPECIFIC_PROPERTIES_AND_4_DIFF_WEIGHT, false); assertEquals(monitor.getTotalUris(), 4); assertEquals(monitor.getMatchedUris(), 3); verifyJmxMetricParams(fixture, CLUSTER_2, new ClusterMatchRecord(URI_PROPERTIES_URI_3_ANOTHER_DIFF_SPECIFIC_PROPERTIES_AND_4_DIFF_WEIGHT, URI_PROPERTIES_URI_3_DIFF_SPECIFIC_PROPERTIES_AND_4_DIFF_WEIGHT, 2, 1), 0.75); // delete both lbs data for cluster 2 monitor.reportData(CLUSTER_2, null, true); monitor.reportData(CLUSTER_2, null, false); verifyJmxMetricParams(fixture, CLUSTER_2, null, 1.0); }
@Override public void checkExit(final int status) { if (inUdfExecution()) { throw new SecurityException("A UDF attempted to call System.exit"); } super.checkExit(status); }
@Test public void shouldAllowExit() { ExtensionSecurityManager.INSTANCE.checkExit(0); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void testOptions() throws ScanException { { List<Token> tl = new TokenStream("%x{t}").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "x")); List<String> ol = new ArrayList<String>(); ol.add("t"); witness.add(new Token(Token.OPTION, ol)); assertEquals(witness, tl); } { List<Token> tl = new TokenStream("%x{t,y}").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "x")); List<String> ol = new ArrayList<String>(); ol.add("t"); ol.add("y"); witness.add(new Token(Token.OPTION, ol)); assertEquals(witness, tl); } { List<Token> tl = new TokenStream("%x{\"hello world.\", \"12y \"}").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "x")); List<String> ol = new ArrayList<String>(); ol.add("hello world."); ol.add("12y "); witness.add(new Token(Token.OPTION, ol)); assertEquals(witness, tl); } { List<Token> tl = new TokenStream("%x{'opt}'}").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "x")); List<String> ol = new ArrayList<String>(); ol.add("opt}"); witness.add(new Token(Token.OPTION, ol)); assertEquals(witness, tl); } }
public static void checkDataSourceConnection(HikariDataSource ds) { java.sql.Connection connection = null; try { connection = ds.getConnection(); } catch (Exception e) { throw new RuntimeException(e); } finally { if (connection != null) { try { connection.close(); } catch (Exception e) { LOGGER.error(e.getMessage(), e); } } } }
@Test void testCheckConnectionThrowException() throws SQLException { assertThrows(RuntimeException.class, () -> { HikariDataSource ds = mock(HikariDataSource.class); when(ds.getConnection()).thenThrow(new RuntimeException()); ConnectionCheckUtil.checkDataSourceConnection(ds); verify(ds).getConnection(); }); }
public static String removeDoubleQuotes(String string) { return string != null ? string.replace("\"", "") : null; }
@Test void removeDoubleQuotes() { String text = "Some text"; String textWithQuotes = "\"Some text\""; assertEquals(text, Numeric.removeDoubleQuotes(textWithQuotes)); }
public boolean couldHoldIgnoringSharedMemory(NormalizedResources other, double thisTotalMemoryMb, double otherTotalMemoryMb) { if (this.cpu < other.getTotalCpu()) { return false; } return couldHoldIgnoringSharedMemoryAndCpu(other, thisTotalMemoryMb, otherTotalMemoryMb); }
@Test public void testCouldHoldWithEnoughResources() { Map<String, Double> allResources = new HashMap<>(); allResources.put(Constants.COMMON_CPU_RESOURCE_NAME, 2.0); allResources.put(gpuResourceName, 2.0); NormalizedResources resources = new NormalizedResources(normalize(allResources)); NormalizedResources resourcesToCheck = new NormalizedResources(normalize(allResources)); boolean couldHold = resources.couldHoldIgnoringSharedMemory(resourcesToCheck, 100, 100); assertThat(couldHold, is(true)); }
public Protocol forName(final String identifier) { return this.forName(identifier, null); }
@Test public void testFindProtocolWithProviderInIdentifier() { final TestProtocol dav = new TestProtocol(Scheme.dav) { @Override public String getIdentifier() { return "dav"; } @Override public String getProvider() { return "provider"; } }; final ProtocolFactory f = new ProtocolFactory(new LinkedHashSet<>(Collections.singletonList(dav))); assertEquals(dav, f.forName("dav")); assertEquals(dav, f.forName("dav-provider")); }
@SuppressWarnings("unchecked") private RFuture<V> getAndSet(V newValue, TransactionalOperation operation) { checkState(); return executeLocked(() -> { if (state != null) { Object prevValue = Optional.of(state).filter(s -> s != NULL).orElse(null); operations.add(operation); state = Optional.ofNullable((Object) newValue).orElse(NULL); return CompletableFuture.completedFuture((V) prevValue); } return getAsync().thenApply(res -> { state = Optional.ofNullable((Object) newValue).orElse(NULL); operations.add(operation); return res; }); }); }
@Test public void testGetAndSet() { RBucket<String> b = redisson.getBucket("test"); b.set("123"); RTransaction transaction = redisson.createTransaction(TransactionOptions.defaults()); RBucket<String> bucket = transaction.getBucket("test"); assertThat(bucket.getAndSet("0")).isEqualTo("123"); assertThat(bucket.get()).isEqualTo("0"); assertThat(bucket.getAndSet("324")).isEqualTo("0"); transaction.commit(); assertThat(redisson.getKeys().count()).isEqualTo(1); assertThat(b.get()).isEqualTo("324"); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> children = new AttributedList<>(); for(List<DavResource> list : ListUtils.partition(this.list(directory), new HostPreferences(session.getHost()).getInteger("webdav.listing.chunksize"))) { for(final DavResource resource : list) { if(new SimplePathPredicate(new Path(resource.getHref().getPath(), EnumSet.of(Path.Type.directory))).test(directory)) { log.warn(String.format("Ignore resource %s", resource)); // Do not include self if(resource.isDirectory()) { continue; } throw new NotfoundException(directory.getAbsolute()); } final PathAttributes attr = attributes.toAttributes(resource); final Path file = new Path(directory, PathNormalizer.name(resource.getHref().getPath()), resource.isDirectory() ? EnumSet.of(Path.Type.directory) : EnumSet.of(Path.Type.file), attr); children.add(file); listener.chunk(directory, children); } } return children; } catch(SardineException e) { throw new DAVExceptionMappingService().map("Listing directory {0} failed", e, directory); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, directory); } }
@Test public void testListFileException() throws Exception { final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); assertThrows(NotfoundException.class, () -> new DAVListService(session).list(new Path(test.getAbsolute(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new DisabledListProgressListener())); new DAVDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testIsNull() { Expression expr = resolve(Expressions.$("field1").isNull()); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr); assertThat(actual).isPresent(); UnboundPredicate<Object> expected = org.apache.iceberg.expressions.Expressions.isNull("field1"); assertPredicatesMatch(expected, actual.get()); }
public static <R> R callConstructor( Class<? extends R> clazz, ClassParameter<?>... classParameters) { perfStatsCollector.incrementCount("ReflectionHelpers.callConstructor-" + clazz.getName()); try { final Class<?>[] classes = ClassParameter.getClasses(classParameters); final Object[] values = ClassParameter.getValues(classParameters); Constructor<? extends R> constructor = clazz.getDeclaredConstructor(classes); constructor.setAccessible(true); return constructor.newInstance(values); } catch (InstantiationException e) { throw new RuntimeException("error instantiating " + clazz.getName(), e); } catch (InvocationTargetException e) { if (e.getTargetException() instanceof RuntimeException) { throw (RuntimeException) e.getTargetException(); } if (e.getTargetException() instanceof Error) { throw (Error) e.getTargetException(); } throw new RuntimeException(e.getTargetException()); } catch (Exception e) { throw new RuntimeException(e); } }
@Test public void callConstructorReflectively_rethrowsError() { try { ReflectionHelpers.callConstructor(ThrowsError.class); fail("Expected exception not thrown"); } catch (RuntimeException e) { throw new RuntimeException("Incorrect exception thrown", e); } catch (TestError e) { } }
@Override public MySQLPacketPayload createPacketPayload(final ByteBuf message, final Charset charset) { return new MySQLPacketPayload(message, charset); }
@Test void assertCreatePacketPayload() { assertThat(new MySQLPacketCodecEngine().createPacketPayload(byteBuf, StandardCharsets.UTF_8).getByteBuf(), is(byteBuf)); }
static JavaType constructType(Type type) { try { return constructTypeInner(type); } catch (Exception e) { throw new InvalidDataTableTypeException(type, e); } }
@Test void should_provide_canonical_representation_of_list_wild_card_number() { JavaType javaType = TypeFactory.constructType(LIST_OF_WILD_CARD_NUMBER); assertThat(javaType.getTypeName(), is(LIST_OF_WILD_CARD_NUMBER.getTypeName())); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testDirAcls() throws Exception { final String defUser1 = "default:user:glarch:r-x"; final String defSpec1 = "aclspec=" + defUser1; final String dir = "/aclDirTest"; String statusJson; List<String> aclEntries; createHttpFSServer(false, false); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); fs.mkdirs(new Path(dir)); /* getfilestatus and liststatus don't have 'aclBit' in their reply */ statusJson = getStatus(dir, "GETFILESTATUS"); Assert.assertEquals(-1, statusJson.indexOf("aclBit")); /* No ACLs, either */ statusJson = getStatus(dir, "GETACLSTATUS"); aclEntries = getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); /* Give it a default ACL and verify */ putCmd(dir, "SETACL", defSpec1); statusJson = getStatus(dir, "GETFILESTATUS"); Assert.assertNotEquals(-1, statusJson.indexOf("aclBit")); statusJson = getStatus(dir, "GETACLSTATUS"); aclEntries = getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 5); /* 4 Entries are default:(user|group|mask|other):perm */ Assert.assertTrue(aclEntries.contains(defUser1)); /* Remove the default ACL and re-verify */ putCmd(dir, "REMOVEDEFAULTACL", null); statusJson = getStatus(dir, "GETFILESTATUS"); Assert.assertEquals(-1, statusJson.indexOf("aclBit")); statusJson = getStatus(dir, "GETACLSTATUS"); aclEntries = getAclEntries(statusJson); Assert.assertTrue(aclEntries.size() == 0); }
public static Optional<String> encrypt(String key, String text) { try { Cipher cipher = Cipher.getInstance(AES_PADDING); byte[] keyBytes = Base64.getDecoder().decode(key.getBytes(DEFAULT_ENCODE)); SecretKey secretKey = new SecretKeySpec(keyBytes, ALGORITHM); cipher.init(Cipher.ENCRYPT_MODE, secretKey); byte[] encryptBytes = cipher.doFinal(text.getBytes(DEFAULT_ENCODE)); byte[] bytes = new byte[IV_LENGTH + text.getBytes(DEFAULT_ENCODE).length + LENGTH]; System.arraycopy(cipher.getIV(), 0, bytes, 0, IV_LENGTH); System.arraycopy(encryptBytes, 0, bytes, IV_LENGTH, encryptBytes.length); return Optional.of(new String(Base64.getEncoder().encode(bytes), DEFAULT_ENCODE)); } catch (IOException | GeneralSecurityException e) { return Optional.empty(); } }
@Test void encrypt() { Optional<String> optional = AesUtil.generateKey(); Assertions.assertTrue(optional.isPresent()); String key = optional.get(); Optional<String> encryptTextOptional = AesUtil.encrypt(key, TEXT); Assertions.assertTrue(encryptTextOptional.isPresent()); Optional<String> decryptTextOptional = AesUtil.decrypt(key, encryptTextOptional.get()); Assertions.assertTrue(decryptTextOptional.isPresent()); Assertions.assertEquals(decryptTextOptional.get(), TEXT); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedWildCardInvalidTypes2() { String[] forwardedFields = {"*"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, fiveIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
@VisibleForTesting static List<Reporter> getReporters() { return self.reporters; }
@Test public void allReportersOldHiveConfig() throws Exception { String jsonFile = System.getProperty("java.io.tmpdir") + System.getProperty("file.separator") + "TestMetricsOutput.json"; Configuration conf = MetastoreConf.newMetastoreConf(); conf.set(MetastoreConf.ConfVars.HIVE_METRICS_REPORTER.getHiveName(), "JSON_FILE,JMX,CONSOLE,HADOOP2"); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METRICS_JSON_FILE_LOCATION, jsonFile); initializeMetrics(conf); Assert.assertEquals(4, Metrics.getReporters().size()); }
@VisibleForTesting String findControllerInMtab(String controller, Map<String, Set<String>> entries) { for (Entry<String, Set<String>> e : entries.entrySet()) { if (e.getValue().contains(controller)) { if (new File(e.getKey()).canRead()) { return e.getKey(); } else { LOG.warn(String.format( "Skipping inaccessible cgroup mount point %s", e.getKey())); } } } return null; }
@Test public void testSelectCgroup() { File cpu = new File(cgroupDir, "cpu"); File cpuNoExist = new File(cgroupDir, "cpuNoExist"); File memory = new File(cgroupDir, "memory"); try { CgroupsLCEResourcesHandler handler = new CgroupsLCEResourcesHandler(); Map<String, Set<String>> cgroups = new LinkedHashMap<>(); Assert.assertTrue("temp dir should be created", cpu.mkdirs()); Assert.assertTrue("temp dir should be created", memory.mkdirs()); Assert.assertFalse("temp dir should not be created", cpuNoExist.exists()); cgroups.put( memory.getAbsolutePath(), Collections.singleton("memory")); cgroups.put( cpuNoExist.getAbsolutePath(), Collections.singleton("cpu")); cgroups.put(cpu.getAbsolutePath(), Collections.singleton("cpu")); String selectedCPU = handler.findControllerInMtab("cpu", cgroups); Assert.assertEquals("Wrong CPU mount point selected", cpu.getAbsolutePath(), selectedCPU); } finally { FileUtils.deleteQuietly(cpu); FileUtils.deleteQuietly(memory); } }
static boolean isEndpointAvailable(String url) { return !RestClient.create(url, 1) .withRequestTimeoutSeconds(1) .withRetries(1) .withHeader("Metadata-Flavor", "Google") .get() .getBody() .isEmpty(); }
@Test public void isEndpointAvailable() { // given String endpoint = "/some-endpoint"; String url = String.format("http://localhost:%d%s", wireMockRule.port(), endpoint); stubFor(get(urlEqualTo(endpoint)).willReturn(aResponse().withStatus(HttpURLConnection.HTTP_OK).withBody("some-body"))); // when boolean isAvailable = GcpDiscoveryStrategyFactory.isEndpointAvailable(url); // then assertTrue(isAvailable); }
public long onStatusMessage( final StatusMessageFlyweight flyweight, final InetSocketAddress receiverAddress, final long senderLimit, final int initialTermId, final int positionBitsToShift, final long timeNs) { return processStatusMessage(flyweight, senderLimit, initialTermId, positionBitsToShift, timeNs, true); }
@Test void shouldNotIncludeReceiverMoreThanWindowSizeBehindMinPosition() { final UdpChannel udpChannel = UdpChannel.parse( "aeron:udp?endpoint=224.20.30.39:24326|interface=localhost|fc=min,g:/2"); flowControl.initialize( newContext(), countersManager, udpChannel, 0, 0, 0, 0, 0); final int senderLimit = 5000; final int termOffset0 = WINDOW_LENGTH * 2; final int termOffset1 = termOffset0 - (WINDOW_LENGTH + 1); final int termOffset2 = termOffset0 - (WINDOW_LENGTH); assertEquals(senderLimit, onStatusMessage(flowControl, 1, termOffset0, senderLimit)); assertEquals(senderLimit, onStatusMessage(flowControl, 2, termOffset1, senderLimit)); assertEquals(termOffset2 + WINDOW_LENGTH, onStatusMessage(flowControl, 3, termOffset2, senderLimit)); }
public String listAllJob() { List<JobStatusData> status = new ArrayList<>(); final List<JobState> runningJobStateList = runningJobMasterMap.values().stream() .map(master -> toJobStateMapper(master, true)) .collect(Collectors.toList()); Set<Long> runningJonIds = runningJobStateList.stream().map(JobState::getJobId).collect(Collectors.toSet()); Stream.concat( runningJobStateList.stream(), finishedJobStateImap.values().stream() .filter(jobState -> !runningJonIds.contains(jobState.getJobId()))) .forEach( jobState -> { JobStatusData jobStatusData = new JobStatusData( jobState.getJobId(), jobState.getJobName(), jobState.getJobStatus(), jobState.getSubmitTime(), jobState.getFinishTime()); status.add(jobStatusData); }); try { return objectMapper.writeValueAsString(status); } catch (JsonProcessingException e) { logger.severe("Failed to list all job", e); throw new SeaTunnelEngineException(e); } }
@Test public void testlistJobState() throws Exception { startJob(JOB_1, "fake_to_console.conf"); // waiting for JOB_1 status turn to RUNNING await().atMost(60000, TimeUnit.MILLISECONDS) .untilAsserted( () -> Assertions.assertTrue( server.getCoordinatorService() .getJobHistoryService() .listAllJob() .contains( String.format( "\"jobId\":%s,\"jobName\":\"Test\",\"jobStatus\":\"RUNNING\"", JOB_1)))); // waiting for JOB_1 status turn to FINISHED await().atMost(60000, TimeUnit.MILLISECONDS) .untilAsserted( () -> Assertions.assertTrue( server.getCoordinatorService() .getJobHistoryService() .listAllJob() .contains( String.format( "\"jobId\":%s,\"jobName\":\"Test\",\"jobStatus\":\"FINISHED\"", JOB_1)))); startJob(JOB_2, "fake_to_console.conf"); // waiting for JOB_2 status turn to FINISHED and JOB_2 status turn to RUNNING await().atMost(60000, TimeUnit.MILLISECONDS) .untilAsserted( () -> Assertions.assertTrue( server.getCoordinatorService() .getJobHistoryService() .listAllJob() .contains( String.format( "\"jobId\":%s,\"jobName\":\"Test\",\"jobStatus\":\"FINISHED\"", JOB_1)) && server.getCoordinatorService() .getJobHistoryService() .listAllJob() .contains( String.format( "\"jobId\":%s,\"jobName\":\"Test\",\"jobStatus\":\"RUNNING\"", JOB_2)))); }
public synchronized ResultSet fetchResults(FetchOrientation orientation, int maxFetchSize) { long token; switch (orientation) { case FETCH_NEXT: token = currentToken; break; case FETCH_PRIOR: token = currentToken - 1; break; default: throw new UnsupportedOperationException( String.format("Unknown fetch orientation: %s.", orientation)); } if (orientation == FetchOrientation.FETCH_NEXT && bufferedResults.isEmpty()) { // make sure data is available in the buffer resultStore.waitUntilHasData(); } return fetchResults(token, maxFetchSize); }
@Test void testFetchFailedResult() { String message = "Artificial Exception"; ResultFetcher fetcher = buildResultFetcher( Arrays.asList(TestIterator.createErrorIterator(message), data.iterator()), data.size()); assertThatThrownBy( () -> { Long token = 0L; while (token != null) { // Use loop to fetch results from the ErrorIterator token = fetcher.fetchResults(token, Integer.MAX_VALUE) .getNextToken(); } }) .satisfies(FlinkAssertions.anyCauseMatches(message)); }
public QueryBuilders.QueryBuilder convert(Expr conjunct) { return visit(conjunct); }
@Test public void testTranslateRawQuery() { SlotRef serviceSlotRef = mockSlotRef("service", Type.STRING); // normal test String normalValue = "{\"term\":{\"service\":{\"value\":\"starrocks\"}}}"; StringLiteral normalValueLiteral = new StringLiteral(normalValue); List<Expr> params = new ArrayList<>(); params.add(serviceSlotRef); params.add(normalValueLiteral); FunctionCallExpr normalESQueryExpr = new FunctionCallExpr("esquery", params); Assert.assertEquals(normalValue, queryConverter.convert(normalESQueryExpr).toString()); // illegal test String illegalValue = "{\"term\":{\"service\":{\"value\":\"starrocks\"}},\"k\":3}"; StringLiteral illegalValueLiteral = new StringLiteral(illegalValue); List<Expr> illegalParams = new ArrayList<>(); illegalParams.add(serviceSlotRef); illegalParams.add(illegalValueLiteral); FunctionCallExpr illegalESQueryExpr = new FunctionCallExpr("esquery", illegalParams); ExceptionChecker.expectThrows(StarRocksConnectorException.class, () -> queryConverter.convert(illegalESQueryExpr)); }
public CompletableFuture<VertexThreadInfoStats> triggerThreadInfoRequest( Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionsWithGateways, int numSamples, Duration delayBetweenSamples, int maxStackTraceDepth) { checkNotNull(executionsWithGateways, "Tasks to sample"); checkArgument(executionsWithGateways.size() > 0, "No tasks to sample"); checkArgument(numSamples >= 1, "No number of samples"); checkArgument(maxStackTraceDepth >= 0, "Negative maximum stack trace depth"); // Execution IDs of running tasks grouped by the task manager Collection<ImmutableSet<ExecutionAttemptID>> runningSubtasksIds = executionsWithGateways.keySet(); synchronized (lock) { if (isShutDown) { return FutureUtils.completedExceptionally(new IllegalStateException("Shut down")); } final int requestId = requestIdCounter++; log.debug("Triggering thread info request {}", requestId); final PendingThreadInfoRequest pending = new PendingThreadInfoRequest(requestId, runningSubtasksIds); // requestTimeout is treated as the time on top of the expected sampling duration. // Discard the request if it takes too long. We don't send cancel // messages to the task managers, but only wait for the responses // and then ignore them. long expectedDuration = numSamples * delayBetweenSamples.toMillis(); Time timeout = Time.milliseconds(expectedDuration + requestTimeout.toMillis()); // Add the pending request before scheduling the discard task to // prevent races with removing it again. pendingRequests.put(requestId, pending); ThreadInfoSamplesRequest requestParams = new ThreadInfoSamplesRequest( requestId, numSamples, delayBetweenSamples, maxStackTraceDepth); requestThreadInfo(executionsWithGateways, requestParams, timeout); return pending.getStatsFuture(); } }
@Test void testThreadInfoRequestTimeout() throws Exception { Map<ImmutableSet<ExecutionAttemptID>, CompletableFuture<TaskExecutorThreadInfoGateway>> executionWithGateways = createMockSubtaskWithGateways( CompletionType.SUCCESSFULLY, CompletionType.TIMEOUT); CompletableFuture<VertexThreadInfoStats> requestFuture = coordinator.triggerThreadInfoRequest( executionWithGateways, DEFAULT_NUMBER_OF_SAMPLES, DEFAULT_DELAY_BETWEEN_SAMPLES, DEFAULT_MAX_STACK_TRACE_DEPTH); try { assertThatThrownBy(requestFuture::get, "The request must be failed.") .satisfies(anyCauseMatches(REQUEST_TIMEOUT_MESSAGE)); } finally { coordinator.shutDown(); } }
public void cancel(final String appName, final String id) throws Exception { long expiryTime = System.currentTimeMillis() + maxProcessingDelayMs; batchingDispatcher.process( taskId("cancel", appName, id), new InstanceReplicationTask(targetHost, Action.Cancel, appName, id) { @Override public EurekaHttpResponse<Void> execute() { return replicationClient.cancel(appName, id); } @Override public void handleFailure(int statusCode, Object responseEntity) throws Throwable { super.handleFailure(statusCode, responseEntity); if (statusCode == 404) { logger.warn("{}: missing entry.", getTaskName()); } } }, expiryTime ); }
@Test public void testCancelBatchReplication() throws Exception { createPeerEurekaNode().cancel(instanceInfo.getAppName(), instanceInfo.getId()); ReplicationInstance replicationInstance = expectSingleBatchRequest(); assertThat(replicationInstance.getAction(), is(equalTo(Action.Cancel))); }
static CatalogLoader createCatalogLoader( String name, Map<String, String> properties, Configuration hadoopConf) { String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL); if (catalogImpl != null) { String catalogType = properties.get(ICEBERG_CATALOG_TYPE); Preconditions.checkArgument( catalogType == null, "Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s", name, catalogType, catalogImpl); return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl); } String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE); switch (catalogType.toLowerCase(Locale.ENGLISH)) { case ICEBERG_CATALOG_TYPE_HIVE: // The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in // that case it will // fallback to parse those values from hadoop configuration which is loaded from classpath. String hiveConfDir = properties.get(HIVE_CONF_DIR); String hadoopConfDir = properties.get(HADOOP_CONF_DIR); Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir); return CatalogLoader.hive(name, newHadoopConf, properties); case ICEBERG_CATALOG_TYPE_HADOOP: return CatalogLoader.hadoop(name, hadoopConf, properties); case ICEBERG_CATALOG_TYPE_REST: return CatalogLoader.rest(name, hadoopConf, properties); default: throw new UnsupportedOperationException( "Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')"); } }
@Test public void testCreateCatalogCustomWithHiveCatalogTypeSet() { String catalogName = "customCatalog"; props.put(CatalogProperties.CATALOG_IMPL, CustomHadoopCatalog.class.getName()); props.put( FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, FlinkCatalogFactory.ICEBERG_CATALOG_TYPE_HIVE); assertThatThrownBy( () -> FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith( "Cannot create catalog customCatalog, both catalog-type and catalog-impl are set"); }
public CompletableFuture<Void> canceledAt(byte[] user, Instant canceledAt) { checkUserLength(user); UpdateItemRequest request = UpdateItemRequest.builder() .tableName(table) .key(Map.of(KEY_USER, b(user))) .returnValues(ReturnValue.NONE) .updateExpression("SET " + "#accessed_at = :accessed_at, " + "#canceled_at = :canceled_at " + "REMOVE #subscription_id") .expressionAttributeNames(Map.of( "#accessed_at", KEY_ACCESSED_AT, "#canceled_at", KEY_CANCELED_AT, "#subscription_id", KEY_SUBSCRIPTION_ID)) .expressionAttributeValues(Map.of( ":accessed_at", n(canceledAt.getEpochSecond()), ":canceled_at", n(canceledAt.getEpochSecond()))) .build(); return client.updateItem(request).thenApply(updateItemResponse -> null); }
@Test void testCanceledAt() { Instant canceled = Instant.ofEpochSecond(NOW_EPOCH_SECONDS + 42); assertThat(subscriptions.create(user, password, created)).succeedsWithin(DEFAULT_TIMEOUT); assertThat(subscriptions.canceledAt(user, canceled)).succeedsWithin(DEFAULT_TIMEOUT); assertThat(subscriptions.get(user, password)).succeedsWithin(DEFAULT_TIMEOUT).satisfies(getResult -> { assertThat(getResult).isNotNull(); assertThat(getResult.type).isEqualTo(FOUND); assertThat(getResult.record).isNotNull().satisfies(record -> { assertThat(record.accessedAt).isEqualTo(canceled); assertThat(record.canceledAt).isEqualTo(canceled); assertThat(record.subscriptionId).isNull(); }); }); }
@Override public Set<TransferItem> find(final CommandLine input, final TerminalAction action, final Path remote) { if(input.getOptionValues(action.name()).length == 2) { switch(action) { case download: return new DownloadTransferItemFinder().find(input, action, remote); case upload: case synchronize: return new UploadTransferItemFinder().find(input, action, remote); } } else { switch(action) { case upload: case synchronize: return Collections.emptySet(); } } // Relative to current working directory using prefix finder. return Collections.singleton( new TransferItem(remote, LocalFactory.get(prefixer.normalize(remote.getName()))) ); }
@Test public void testNoLocalInOptionsDownload() throws Exception { final CommandLineParser parser = new PosixParser(); final CommandLine input = parser.parse(TerminalOptionsBuilder.options(), new String[]{"--download", "rackspace://cdn.cyberduck.ch/remote"}); final Set<TransferItem> found = new SingleTransferItemFinder().find(input, TerminalAction.download, new Path("/cdn.cyberduck.ch/remote", EnumSet.of(Path.Type.file))); assertFalse(found.isEmpty()); assertEquals(new TransferItem(new Path("/cdn.cyberduck.ch/remote", EnumSet.of(Path.Type.file)), LocalFactory.get(System.getProperty("user.dir") + "/remote")), found.iterator().next()); }
@Override public String toString() { return getClass().getSimpleName() + '{' + getName() + '}'; }
@Test public void toString_lower_bounded() { @SuppressWarnings("unused") class LowerBounded<T extends List<? super String>> { } JavaWildcardType wildcardType = importWildcardTypeOf(LowerBounded.class); assertThat(wildcardType.toString()) .contains(JavaWildcardType.class.getSimpleName()) .contains("? super java.lang.String") .doesNotContain("extends"); }
@Override public Response submitApplication(ApplicationSubmissionContextInfo newApp, HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException { long startTime = clock.getTime(); // We verify the parameters to ensure that newApp is not empty and // that the format of applicationId is correct. if (newApp == null || newApp.getApplicationId() == null) { routerMetrics.incrAppsFailedSubmitted(); String errMsg = "Missing ApplicationSubmissionContextInfo or " + "applicationSubmissionContext information."; RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN, TARGET_WEB_SERVICE, errMsg); return Response.status(Status.BAD_REQUEST).entity(errMsg).build(); } try { String applicationId = newApp.getApplicationId(); RouterServerUtil.validateApplicationId(applicationId); } catch (IllegalArgumentException e) { routerMetrics.incrAppsFailedSubmitted(); RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN, TARGET_WEB_SERVICE, e.getMessage()); return Response.status(Status.BAD_REQUEST).entity(e.getLocalizedMessage()).build(); } List<SubClusterId> blackList = new ArrayList<>(); try { int activeSubClustersCount = federationFacade.getActiveSubClustersCount(); int actualRetryNums = Math.min(activeSubClustersCount, numSubmitRetries); Response response = ((FederationActionRetry<Response>) (retryCount) -> invokeSubmitApplication(newApp, blackList, hsr, retryCount)). runWithRetries(actualRetryNums, submitIntervalTime); if (response != null) { long stopTime = clock.getTime(); routerMetrics.succeededAppsSubmitted(stopTime - startTime); return response; } } catch (Exception e) { routerMetrics.incrAppsFailedSubmitted(); RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN, TARGET_WEB_SERVICE, e.getMessage()); return Response.status(Status.SERVICE_UNAVAILABLE).entity(e.getLocalizedMessage()).build(); } routerMetrics.incrAppsFailedSubmitted(); String errMsg = String.format("Application %s with appId %s failed to be submitted.", newApp.getApplicationName(), newApp.getApplicationId()); LOG.error(errMsg); RouterAuditLogger.logFailure(getUser().getShortUserName(), SUBMIT_NEW_APP, UNKNOWN, TARGET_WEB_SERVICE, errMsg); return Response.status(Status.SERVICE_UNAVAILABLE).entity(errMsg).build(); }
@Test public void testSubmitApplicationEmptyRequest() throws IOException, InterruptedException { // ApplicationSubmissionContextInfo null Response response = interceptor.submitApplication(null, null); Assert.assertEquals(BAD_REQUEST, response.getStatus()); // ApplicationSubmissionContextInfo empty response = interceptor .submitApplication(new ApplicationSubmissionContextInfo(), null); Assert.assertEquals(BAD_REQUEST, response.getStatus()); ApplicationSubmissionContextInfo context = new ApplicationSubmissionContextInfo(); response = interceptor.submitApplication(context, null); Assert.assertEquals(BAD_REQUEST, response.getStatus()); }
@Override public Class[] services() { List<Class> classes = new ArrayList<>(); classes.add(ConfigService.class); classes.add(ServerStatusService.class); classes.add(DownSamplingConfigService.class); classes.add(NamingControl.class); classes.add(IComponentLibraryCatalogService.class); classes.add(HierarchyDefinitionService.class); classes.add(IWorkerInstanceGetter.class); classes.add(IWorkerInstanceSetter.class); classes.add(MeterSystem.class); addServerInterface(classes); addReceiverInterface(classes); addInsideService(classes); addCacheService(classes); addQueryService(classes); addProfileService(classes); addOALService(classes); addManagementService(classes); addEBPFProfilingService(classes); classes.add(CommandService.class); classes.add(HierarchyService.class); return classes.toArray(new Class[]{}); }
@Test public void testOpenServiceList() { CoreModule coreModule = new CoreModule(); Assertions.assertEquals(44, coreModule.services().length); }
public void expectLogMessage(int level, String tag, Matcher<String> messageMatcher) { expectLog(level, tag, messageMatcher, null); }
@Test public void testExpectErrorLogDoesNotFail() { Log.e("Mytag", "What's up"); rule.expectLogMessage(Log.ERROR, "Mytag", "What's up"); }
public static String getHostAddress() throws SocketException, UnknownHostException { boolean isIPv6Preferred = Boolean.parseBoolean(System.getProperty("java.net.preferIPv6Addresses")); DatagramSocket ds = new DatagramSocket(); try { ds.connect(isIPv6Preferred ? Inet6Address.getByName(DUMMY_OUT_IPV6) : Inet4Address.getByName(DUMMY_OUT_IPV4), HTTP_PORT); } catch (java.io.UncheckedIOException e) { LOGGER.warn(e.getMessage()); if (isIPv6Preferred) { LOGGER.warn("No IPv6 route available on host, falling back to IPv4"); ds.connect(Inet4Address.getByName(DUMMY_OUT_IPV4), HTTP_PORT); } else { LOGGER.warn("No IPv4 route available on host, falling back to IPv6"); ds.connect(Inet6Address.getByName(DUMMY_OUT_IPV6), HTTP_PORT); } } InetAddress localAddress = ds.getLocalAddress(); if (localAddress.isAnyLocalAddress()) { localAddress = isIPv6Preferred ? getLocalIPv6Address() : InetAddress.getLocalHost(); } return localAddress.getHostAddress(); }
@Test(description = "Test getHostAddress with no preferIPv6Addresses in dual stack environment") public void testGetHostAddressDualStackEnv() { InetAddress mockInetAddress = mock(InetAddress.class); when(mockInetAddress.isAnyLocalAddress()).thenReturn(false); when(mockInetAddress.getHostAddress()).thenReturn(LOCAL_ADDRESS_IPV4); try (MockedConstruction<DatagramSocket> mockedConstructionDatagramSocket = mockConstruction(DatagramSocket.class, initDatagramSocket(mockInetAddress, NetworkEnv.DUAL_STACK))) { String hostAddress = NetUtils.getHostAddress(); DatagramSocket mockDatagramSocket = mockedConstructionDatagramSocket.constructed().get(0); assertEquals(LOCAL_ADDRESS_IPV4, hostAddress); assertEquals(1, mockedConstructionDatagramSocket.constructed().size()); verify(mockDatagramSocket, times(1)).connect(any(), anyInt()); } catch (SocketException | UnknownHostException e) { Assert.fail("Should not throw: " + e.getMessage()); } }
@Override public List<Document> get() { try (var input = markdownResource.getInputStream()) { Node node = parser.parseReader(new InputStreamReader(input)); DocumentVisitor documentVisitor = new DocumentVisitor(config); node.accept(documentVisitor); return documentVisitor.getDocuments(); } catch (IOException e) { throw new RuntimeException(e); } }
@Test void testSimpleMarkdownDocumentWithHardAndSoftLineBreaks() { MarkdownDocumentReader reader = new MarkdownDocumentReader("classpath:/simple.md"); List<Document> documents = reader.get(); assertThat(documents).hasSize(1); Document documentsFirst = documents.get(0); assertThat(documentsFirst.getMetadata()).isEmpty(); assertThat(documentsFirst.getContent()).isEqualTo( "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec tincidunt velit non bibendum gravida. Cras accumsan tincidunt ornare. Donec hendrerit consequat tellus blandit accumsan. Aenean aliquam metus at arcu elementum dignissim.Nullam nisi dui, egestas nec sem nec, interdum lobortis enim. Pellentesque odio orci, faucibus eu luctus nec, venenatis et magna. Vestibulum nec eros non felis fermentum posuere eget ac risus.Aenean eu leo eu nibh tristique posuere quis quis massa. Nullam lacinia luctus sem ut vehicula."); }
@Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); }
@Test public void testConnectionDelayConnectedWithNoExponentialBackoff() { awaitReady(clientWithNoExponentialBackoff, node); long now = time.milliseconds(); long delay = clientWithNoExponentialBackoff.connectionDelay(node, now); assertEquals(Long.MAX_VALUE, delay); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator363() { UrlValidator urlValidator = new UrlValidator(); assertTrue(urlValidator.isValid("http://www.example.org/a/b/hello..world")); assertTrue(urlValidator.isValid("http://www.example.org/a/hello..world")); assertTrue(urlValidator.isValid("http://www.example.org/hello.world/")); assertTrue(urlValidator.isValid("http://www.example.org/hello..world/")); assertTrue(urlValidator.isValid("http://www.example.org/hello.world")); assertTrue(urlValidator.isValid("http://www.example.org/hello..world")); assertTrue(urlValidator.isValid("http://www.example.org/..world")); assertTrue(urlValidator.isValid("http://www.example.org/.../world")); assertFalse(urlValidator.isValid("http://www.example.org/../world")); assertFalse(urlValidator.isValid("http://www.example.org/..")); assertFalse(urlValidator.isValid("http://www.example.org/../")); assertFalse(urlValidator.isValid("http://www.example.org/./..")); assertFalse(urlValidator.isValid("http://www.example.org/././..")); assertTrue(urlValidator.isValid("http://www.example.org/...")); assertTrue(urlValidator.isValid("http://www.example.org/.../")); assertTrue(urlValidator.isValid("http://www.example.org/.../..")); }
@Override public Num calculate(BarSeries series, Position position) { return position.hasProfit() ? series.one() : series.zero(); }
@Test public void calculateWithNoPositions() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); assertNumEquals(0, getCriterion().calculate(series, new BaseTradingRecord())); }
@Udf(description = "Returns a new string encoded using the outputEncoding ") public String encode( @UdfParameter( description = "The source string. If null, then function returns null.") final String str, @UdfParameter( description = "The input encoding." + " If null, then function returns null.") final String inputEncoding, @UdfParameter( description = "The output encoding." + " If null, then function returns null.") final String outputEncoding) { if (str == null || inputEncoding == null || outputEncoding == null) { return null; } final String encodedString = inputEncoding.toLowerCase() + outputEncoding.toLowerCase(); final Encode.Encoder encoder = ENCODER_MAP.get(encodedString); if (encoder == null) { throw new KsqlFunctionException("Supported input and output encodings are: " + "hex, utf8, ascii and base64"); } return encoder.apply(str); }
@Test public void shouldEncodeAsciiToUtf8() { assertThat(udf.encode("Example!", "ascii", "utf8"), is("Example!")); assertThat(udf.encode("Plant trees", "ascii", "utf8"), is("Plant trees")); assertThat(udf.encode("1 + 1 = 1", "ascii", "utf8"), is("1 + 1 = 1")); assertThat(udf.encode("Ελλάδα", "ascii", "utf8"), is("??????")); assertThat(udf.encode("Übermensch", "ascii", "utf8"), is("?bermensch")); }
@Override public int size() { return map.size(); }
@Test public void testSize() { map.put(23, "foo"); map.put(42, "bar"); assertEquals(2, adapter.size()); }
public final void isNotNaN() { if (actual == null) { failWithActual(simpleFact("expected a float other than NaN")); } else { isNotEqualTo(NaN); } }
@Test public void isNotNaN() { assertThat(1.23f).isNotNaN(); assertThat(Float.MAX_VALUE).isNotNaN(); assertThat(-1.0 * Float.MIN_VALUE).isNotNaN(); assertThat(Float.POSITIVE_INFINITY).isNotNaN(); assertThat(Float.NEGATIVE_INFINITY).isNotNaN(); }
public static Criterion matchIPDst(IpPrefix ip) { return new IPCriterion(ip, Type.IPV4_DST); }
@Test public void testMatchIPDstMethod() { Criterion matchIPDst = Criteria.matchIPDst(ip1); IPCriterion ipCriterion = checkAndConvert(matchIPDst, Criterion.Type.IPV4_DST, IPCriterion.class); assertThat(ipCriterion.ip(), is(equalTo(ip1))); }
static CatalogLoader createCatalogLoader( String name, Map<String, String> properties, Configuration hadoopConf) { String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL); if (catalogImpl != null) { String catalogType = properties.get(ICEBERG_CATALOG_TYPE); Preconditions.checkArgument( catalogType == null, "Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s", name, catalogType, catalogImpl); return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl); } String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE); switch (catalogType.toLowerCase(Locale.ENGLISH)) { case ICEBERG_CATALOG_TYPE_HIVE: // The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in // that case it will // fallback to parse those values from hadoop configuration which is loaded from classpath. String hiveConfDir = properties.get(HIVE_CONF_DIR); String hadoopConfDir = properties.get(HADOOP_CONF_DIR); Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir); return CatalogLoader.hive(name, newHadoopConf, properties); case ICEBERG_CATALOG_TYPE_HADOOP: return CatalogLoader.hadoop(name, hadoopConf, properties); case ICEBERG_CATALOG_TYPE_REST: return CatalogLoader.rest(name, hadoopConf, properties); default: throw new UnsupportedOperationException( "Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')"); } }
@Test public void testLoadCatalogUnknown() { String catalogName = "unknownCatalog"; props.put(FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, "fooType"); assertThatThrownBy( () -> FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())) .isInstanceOf(UnsupportedOperationException.class) .hasMessageStartingWith("Unknown catalog-type: fooType"); }
public static <Req extends RpcRequest> Matcher<Req> methodEquals(String method) { if (method == null) throw new NullPointerException("method == null"); if (method.isEmpty()) throw new NullPointerException("method is empty"); return new RpcMethodEquals<Req>(method); }
@Test void methodEquals_matched() { when(request.method()).thenReturn("Check"); assertThat(methodEquals("Check").matches(request)).isTrue(); }
@Override protected void verifyConditions(ScesimModelDescriptor scesimModelDescriptor, ScenarioRunnerData scenarioRunnerData, ExpressionEvaluatorFactory expressionEvaluatorFactory, Map<String, Object> requestContext) { DMNResult dmnResult = (DMNResult) requestContext.get(DMNScenarioExecutableBuilder.DMN_RESULT); List<DMNMessage> dmnMessages = dmnResult.getMessages(); for (ScenarioExpect output : scenarioRunnerData.getExpects()) { FactIdentifier factIdentifier = output.getFactIdentifier(); String decisionName = factIdentifier.getName(); DMNDecisionResult decisionResult = dmnResult.getDecisionResultByName(decisionName); if (decisionResult == null) { throw new ScenarioException("DMN execution has not generated a decision result with name " + decisionName); } for (FactMappingValue expectedResult : output.getExpectedResult()) { ExpressionIdentifier expressionIdentifier = expectedResult.getExpressionIdentifier(); FactMapping factMapping = scesimModelDescriptor.getFactMapping(factIdentifier, expressionIdentifier) .orElseThrow(() -> new IllegalStateException("Wrong expression, this should not happen")); ExpressionEvaluator expressionEvaluator = expressionEvaluatorFactory.getOrCreate(expectedResult); ScenarioResult scenarioResult = fillResult(expectedResult, () -> getSingleFactValueResult(factMapping, expectedResult, decisionResult, dmnMessages, expressionEvaluator), expressionEvaluator); scenarioRunnerData.addResult(scenarioResult); } } }
@Test public void verifyConditions_decisionResultIsNotNullButMalformed() { // test 3 - now result is not null but data structure is wrong (expected steps but data is a simple string) ScenarioRunnerData scenarioRunnerData = new ScenarioRunnerData(); scenarioRunnerData.addExpect(new ScenarioExpect(personFactIdentifier, List.of(firstNameExpectedValue))); when(dmnResultMock.getDecisionResultByName(anyString())).thenReturn(dmnDecisionResultMock); when(dmnDecisionResultMock.getEvaluationStatus()).thenReturn(DecisionEvaluationStatus.SUCCEEDED); when(dmnDecisionResultMock.getResult()).thenReturn(""); assertThatThrownBy(() -> runnerHelper.verifyConditions(simulation.getScesimModelDescriptor(), scenarioRunnerData, expressionEvaluatorFactory, requestContextMock)) .isInstanceOf(ScenarioException.class) .hasMessage("Wrong resultRaw structure because it is not a complex type as expected"); }
public static double getSquaredDistanceToSegment( final double pFromX, final double pFromY, final double pAX, final double pAY, final double pBX, final double pBY ) { return getSquaredDistanceToProjection(pFromX, pFromY, pAX, pAY, pBX, pBY, getProjectionFactorToSegment(pFromX, pFromY, pAX, pAY, pBX, pBY)); }
@Test public void test_getSquareDistanceToSegment() { final int xA = 100; final int yA = 200; Assert.assertEquals(0, Distance.getSquaredDistanceToSegment(xA, yA, xA, yA, xA, yA), mDelta); Assert.assertEquals(10 * 10, Distance.getSquaredDistanceToSegment(xA, yA, xA + 10, yA, xA + 10, yA), mDelta); Assert.assertEquals(20 * 20, Distance.getSquaredDistanceToSegment(xA, yA, xA, yA + 20, xA, yA + 20), mDelta); Assert.assertEquals(20 * 20, Distance.getSquaredDistanceToSegment(xA, yA + 20, xA, yA, xA + 100, yA), mDelta); Assert.assertEquals(10 * 10 + 30 * 30, Distance.getSquaredDistanceToSegment(xA - 10, yA - 30, xA, yA, xA + 100, yA), mDelta); Assert.assertEquals(100 * 100 + 70 * 70, Distance.getSquaredDistanceToSegment(xA + 200, yA - 70, xA, yA, xA + 100, yA), mDelta); Assert.assertEquals(7000 * 7000, Distance.getSquaredDistanceToSegment(xA + 200, yA - 7000, xA, yA, xA + 200, yA), mDelta); Assert.assertEquals(7000 * 7000, Distance.getSquaredDistanceToSegment(xA + 200, yA - 7000, xA, yA, xA + 1000, yA), mDelta); }
@ScalarOperator(MULTIPLY) @SqlType(StandardTypes.BIGINT) public static long multiply(@SqlType(StandardTypes.BIGINT) long left, @SqlType(StandardTypes.BIGINT) long right) { try { return Math.multiplyExact(left, right); } catch (ArithmeticException e) { throw new PrestoException(NUMERIC_VALUE_OUT_OF_RANGE, format("bigint multiplication overflow: %s * %s", left, right), e); } }
@Test public void testMultiply() { assertFunction("100000000037 * 37", BIGINT, 100000000037L * 37L); assertFunction("37 * 100000000017", BIGINT, 37 * 100000000017L); assertFunction("100000000017 * 37", BIGINT, 100000000017L * 37L); assertFunction("100000000017 * 10000017", BIGINT, 100000000017L * 10000017L); }
public Optional<StreamDestinationFilterRuleDTO> findByIdForStream(String streamId, String id) { collection.find(and(eq(FIELD_STREAM_ID, streamId), idEq(id))); return utils.getById(id); }
@Test @MongoDBFixtures("StreamDestinationFilterServiceTest-2024-07-01-1.json") void findByIdForStream() { assertThat(service.findByIdForStream("foo", "54e3deadbeefdeadbeef0000")).get().satisfies(dto -> { assertThat(dto.title()).isEqualTo("Test Filter 1"); assertThat(dto.streamId()).isEqualTo("54e3deadbeefdeadbeef1000"); assertThat(dto.destinationType()).isEqualTo("indexer"); assertThat(dto.status()).isEqualTo(StreamDestinationFilterRuleDTO.Status.ENABLED); assertThat(dto.rule()).satisfies(rule -> { assertThat(rule.operator()).isEqualTo(RuleBuilderStep.Operator.OR); assertThat(rule.conditions()).hasSize(2); assertThat(rule.actions()).isNull(); }); }); }
public CheckboxAnswer getAnswerByQuestionId(long questionId) { if (!checkboxAnswers.containsKey(questionId)) { throw new MissingCheckboxAnswerForQuestionException(questionId); } return checkboxAnswers.get(questionId); }
@Test void 질문_ID로_선택형_답변을_반환한다() { // given CheckboxAnswers checkboxAnswers = new CheckboxAnswers(List.of(new CheckboxAnswer(1, List.of(1L)))); // when CheckboxAnswer actual = checkboxAnswers.getAnswerByQuestionId(1); // then assertThat(actual.getSelectedOptionIds()) .extracting(CheckBoxAnswerSelectedOption::getSelectedOptionId) .containsExactly(1L); }