focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static URI parse(String gluePath) { requireNonNull(gluePath, "gluePath may not be null"); if (gluePath.isEmpty()) { return rootPackageUri(); } // Legacy from the Cucumber Eclipse plugin // Older versions of Cucumber allowed it. if (CLASSPATH_SCHEME_PREFIX.equals(gluePath)) { return rootPackageUri(); } if (nonStandardPathSeparatorInUse(gluePath)) { String standardized = replaceNonStandardPathSeparator(gluePath); return parseAssumeClasspathScheme(standardized); } if (isProbablyPackage(gluePath)) { String path = resourceNameOfPackageName(gluePath); return parseAssumeClasspathScheme(path); } return parseAssumeClasspathScheme(gluePath); }
@Test void can_parse_relative_path_form() { URI uri = GluePath.parse("com/example/app"); assertAll( () -> assertThat(uri.getScheme(), is("classpath")), () -> assertThat(uri.getSchemeSpecificPart(), is("/com/example/app"))); }
public static SlidingWindows ofTimeDifferenceAndGrace(final Duration timeDifference, final Duration afterWindowEnd) throws IllegalArgumentException { final String timeDifferenceMsgPrefix = prepareMillisCheckFailMsgPrefix(timeDifference, "timeDifference"); final long timeDifferenceMs = validateMillisecondDuration(timeDifference, timeDifferenceMsgPrefix); final String afterWindowEndMsgPrefix = prepareMillisCheckFailMsgPrefix(afterWindowEnd, "afterWindowEnd"); final long afterWindowEndMs = validateMillisecondDuration(afterWindowEnd, afterWindowEndMsgPrefix); return new SlidingWindows(timeDifferenceMs, afterWindowEndMs); }
@Test public void equalsAndHashcodeShouldNotBeEqualForDifferentGracePeriod() { final long timeDifference = 1L + (long) (Math.random() * (10L - 1L)); final long graceOne = 1L + (long) (Math.random() * (10L - 1L)); final long graceTwo = 21L + (long) (Math.random() * (41L - 21L)); verifyInEquality( SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(timeDifference), ofMillis(graceOne)), SlidingWindows.ofTimeDifferenceAndGrace(ofMillis(timeDifference), ofMillis(graceTwo)) ); }
public Span newChild(TraceContext parent) { if (parent == null) throw new NullPointerException("parent == null"); return _toSpan(parent, decorateContext(parent, parent.spanId())); }
@Test void newChild() { TraceContext parent = tracer.newTrace().context(); assertThat(tracer.newChild(parent)) .satisfies(c -> { assertThat(c.context().traceIdString()).isEqualTo(parent.traceIdString()); assertThat(c.context().parentIdString()).isEqualTo(parent.spanIdString()); }) .isInstanceOf(RealSpan.class); }
@Override public Optional<CompletableFuture<TaskManagerLocation>> getTaskManagerLocation( ExecutionVertexID executionVertexId) { ExecutionVertex ev = getExecutionVertex(executionVertexId); if (ev.getExecutionState() != ExecutionState.CREATED) { return Optional.of(ev.getCurrentTaskManagerLocationFuture()); } else { return Optional.empty(); } }
@Test void testGetNonExistingExecutionVertexWillThrowException() throws Exception { final JobVertex jobVertex = ExecutionGraphTestUtils.createNoOpVertex(1); final ExecutionGraph eg = ExecutionGraphTestUtils.createExecutionGraph( EXECUTOR_EXTENSION.getExecutor(), jobVertex); final ExecutionGraphToInputsLocationsRetrieverAdapter inputsLocationsRetriever = new ExecutionGraphToInputsLocationsRetrieverAdapter(eg); ExecutionVertexID invalidExecutionVertexId = new ExecutionVertexID(new JobVertexID(), 0); assertThatThrownBy( () -> inputsLocationsRetriever.getTaskManagerLocation( invalidExecutionVertexId), "Should throw exception if execution vertex doesn't exist!") .isInstanceOf(IllegalStateException.class); }
@Override public void updateInstance(String serviceName, Instance instance) throws NacosException { updateInstance(serviceName, Constants.DEFAULT_GROUP, instance); }
@Test void testUpdateInstance1() throws NacosException { //given String serviceName = "service1"; String groupName = "group1"; Instance instance = new Instance(); //when nacosNamingMaintainService.updateInstance(serviceName, groupName, instance); //then verify(serverProxy, times(1)).updateInstance(serviceName, groupName, instance); }
@Override public void visit(Entry entry) { if(Boolean.FALSE.equals(entry.getAttribute("allowed"))) return; if (containsSubmenu(entry)) addSubmenu(entry); else addActionItem(entry); }
@Test public void whenPopupMenuBecomesInvisible_popupListenerIsCalled() throws Exception { if(Compat.isMacOsX()) return; Entry parentMenuEntry = new Entry(); final JMenu parentMenu = new JMenu(); new EntryAccessor().setComponent(parentMenuEntry, parentMenu); parentMenuEntry.addChild(menuEntry); menuEntry.addChild(actionEntry); menuActionGroupBuilder.visit(menuEntry); JMenu item = (JMenu)new EntryAccessor().getComponent(menuEntry); item.getPopupMenu().setVisible(true); item.getPopupMenu().setVisible(false); Thread.sleep(100); SwingUtilities.invokeAndWait(new Runnable() { @Override public void run() { } }); verify(popupListener).childEntriesHidden(menuEntry); }
public static PositionBound at(final Position position) { return new PositionBound(position); }
@Test public void shouldNotHash() { final PositionBound bound = PositionBound.at(Position.emptyPosition()); assertThrows(UnsupportedOperationException.class, bound::hashCode); // going overboard... final HashSet<PositionBound> set = new HashSet<>(); assertThrows(UnsupportedOperationException.class, () -> set.add(bound)); final HashMap<PositionBound, Integer> map = new HashMap<>(); assertThrows(UnsupportedOperationException.class, () -> map.put(bound, 5)); }
@Override public List<DeptDO> getDeptList(Collection<Long> ids) { if (CollUtil.isEmpty(ids)) { return Collections.emptyList(); } return deptMapper.selectBatchIds(ids); }
@Test public void testGetDeptList_ids() { // mock 数据 DeptDO deptDO01 = randomPojo(DeptDO.class); deptMapper.insert(deptDO01); DeptDO deptDO02 = randomPojo(DeptDO.class); deptMapper.insert(deptDO02); // 准备参数 List<Long> ids = Arrays.asList(deptDO01.getId(), deptDO02.getId()); // 调用 List<DeptDO> deptDOList = deptService.getDeptList(ids); // 断言 assertEquals(2, deptDOList.size()); assertEquals(deptDO01, deptDOList.get(0)); assertEquals(deptDO02, deptDOList.get(1)); }
@Override public int size() { return eventHandler.size(); }
@Test public void testInterruptedWithDeferredEvents() throws Exception { CompletableFuture<Void> cleanupFuture = new CompletableFuture<>(); try (KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testInterruptedWithDeferredEvents", () -> cleanupFuture.complete(null))) { CompletableFuture<Thread> queueThread = new CompletableFuture<>(); queue.append(() -> queueThread.complete(Thread.currentThread())); ExceptionTrapperEvent ieTrapper1 = new ExceptionTrapperEvent(); ExceptionTrapperEvent ieTrapper2 = new ExceptionTrapperEvent(); queue.scheduleDeferred("ie2", __ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + HOURS.toNanos(2)), ieTrapper2); queue.scheduleDeferred("ie1", __ -> OptionalLong.of(Time.SYSTEM.nanoseconds() + HOURS.toNanos(1)), ieTrapper1); TestUtils.retryOnExceptionWithTimeout(30000, () -> assertEquals(2, queue.size())); queueThread.get().interrupt(); cleanupFuture.get(); assertEquals(InterruptedException.class, ieTrapper1.exception.get().getClass()); assertEquals(InterruptedException.class, ieTrapper2.exception.get().getClass()); } }
@Override public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { if (response instanceof HttpServletResponse) { final HttpServletResponse resp = (HttpServletResponse) response; resp.setHeader("Cache-Control", CACHE_SETTINGS); } chain.doFilter(request, response); }
@Test void setsACacheHeaderOnTheResponse() throws Exception { filter.doFilter(request, response, chain); final InOrder inOrder = inOrder(response, chain); inOrder.verify(response).setHeader("Cache-Control", "must-revalidate,no-cache,no-store"); inOrder.verify(chain).doFilter(request, response); }
@Override public String brokerSetIdForReplica(final Replica replica, final ClusterModel clusterModel, final BrokerSetResolutionHelper brokerSetResolutionHelper) throws ReplicaToBrokerSetMappingException { String topicName = replica.topicPartition().topic(); Map<String, Set<Integer>> brokersByBrokerSetId = brokerSetResolutionHelper.brokersByBrokerSetId(); int numBrokerSets = brokersByBrokerSetId.size(); if (numBrokerSets == 0) { throw new ReplicaToBrokerSetMappingException("Can not map replica to a broker set since there are no broker sets."); } // If the broker sets haven't changed and topic mapping is cached, then read from cache if (brokersByBrokerSetId.keySet().equals(_brokerSetIdCache) && _brokerSetIdByTopicCache.containsKey(topicName)) { return _brokerSetIdByTopicCache.get(topicName); } // If the broker set ids have changed then clear the caches and re-map topics since we use consistent hash ring if (!brokersByBrokerSetId.keySet().equals(_brokerSetIdCache)) { _brokerSetIdByTopicCache.clear(); _sortedBrokerSetIdsCache.clear(); _brokerSetIdCache = new HashSet<>(brokersByBrokerSetId.keySet()); } String brokerSetId = brokerSetIdForTopic(topicName, brokersByBrokerSetId); _brokerSetIdByTopicCache.put(topicName, brokerSetId); return brokerSetId; }
@Test public void testSingleBrokerSetMappingPolicy() throws BrokerSetResolutionException, ReplicaToBrokerSetMappingException { ClusterModel clusterModel = DeterministicCluster.brokerSetSatisfiable1(); Map<String, Set<Integer>> testSingleBrokerSetMapping = Collections.singletonMap("BS1", Set.of(0, 1, 2, 3, 4, 5)); BrokerSetResolver brokerSetResolver = EasyMock.createNiceMock(BrokerSetResolver.class); EasyMock.expect(brokerSetResolver.brokerIdsByBrokerSetId(BrokerSetResolutionHelper.getRackIdByBrokerIdMapping(clusterModel))) .andReturn(testSingleBrokerSetMapping); EasyMock.replay(brokerSetResolver); BrokerSetResolutionHelper brokerSetResolutionHelper = new BrokerSetResolutionHelper(clusterModel, brokerSetResolver); for (Replica replica : clusterModel.leaderReplicas()) { assertEquals("BS1", TOPIC_NAME_HASH_BROKER_SET_MAPPING_POLICY.brokerSetIdForReplica(replica, clusterModel, brokerSetResolutionHelper)); } }
@Override public void execute() { invokeMethod(); }
@Test void can_create_with_no_argument() throws Throwable { Method method = JavaStaticHookDefinitionTest.class.getMethod("no_arguments"); JavaStaticHookDefinition definition = new JavaStaticHookDefinition(method, 0, lookup); definition.execute(); assertTrue(invoked); }
@Override public Object initialize(Object obj) { if (obj instanceof HazelcastInstanceAware aware) { aware.setHazelcastInstance(instance); } if (obj instanceof NodeAware aware) { aware.setNode(instance.node); } if (obj instanceof SerializationServiceAware aware) { aware.setSerializationService(instance.node.getSerializationService()); } if (hasExternalContext) { return externalContext.initialize(obj); } return obj; }
@Test public void testInitialize() { DependencyInjectionUserClass initializedUserClass = (DependencyInjectionUserClass) serializationService.getManagedContext().initialize(userClass); assertEquals(hazelcastInstance, initializedUserClass.hazelcastInstance); assertEquals(node, initializedUserClass.node); assertEquals(serializationService, initializedUserClass.serializationService); assertTrue(userContext.wasCalled); }
static int validatePubsubMessageSize(PubsubMessage message, int maxPublishBatchSize) throws SizeLimitExceededException { int payloadSize = message.getPayload().length; if (payloadSize > PUBSUB_MESSAGE_DATA_MAX_BYTES) { throw new SizeLimitExceededException( "Pubsub message data field of length " + payloadSize + " exceeds maximum of " + PUBSUB_MESSAGE_DATA_MAX_BYTES + " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits"); } int totalSize = payloadSize; @Nullable Map<String, String> attributes = message.getAttributeMap(); if (attributes != null) { if (attributes.size() > PUBSUB_MESSAGE_MAX_ATTRIBUTES) { throw new SizeLimitExceededException( "Pubsub message contains " + attributes.size() + " attributes which exceeds the maximum of " + PUBSUB_MESSAGE_MAX_ATTRIBUTES + ". See https://cloud.google.com/pubsub/quotas#resource_limits"); } // Consider attribute encoding overhead, so it doesn't go over the request limits totalSize += attributes.size() * PUBSUB_MESSAGE_ATTRIBUTE_ENCODE_ADDITIONAL_BYTES; for (Map.Entry<String, String> attribute : attributes.entrySet()) { String key = attribute.getKey(); int keySize = key.getBytes(StandardCharsets.UTF_8).length; if (keySize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES) { throw new SizeLimitExceededException( "Pubsub message attribute key '" + key + "' exceeds the maximum of " + PUBSUB_MESSAGE_ATTRIBUTE_MAX_KEY_BYTES + " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits"); } totalSize += keySize; String value = attribute.getValue(); int valueSize = value.getBytes(StandardCharsets.UTF_8).length; if (valueSize > PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES) { throw new SizeLimitExceededException( "Pubsub message attribute value for key '" + key + "' starting with '" + value.substring(0, Math.min(256, value.length())) + "' exceeds the maximum of " + PUBSUB_MESSAGE_ATTRIBUTE_MAX_VALUE_BYTES + " bytes. See https://cloud.google.com/pubsub/quotas#resource_limits"); } totalSize += valueSize; } } if (totalSize > maxPublishBatchSize) { throw new SizeLimitExceededException( "Pubsub message of length " + totalSize + " exceeds maximum of " + maxPublishBatchSize + " bytes, when considering the payload and attributes. " + "See https://cloud.google.com/pubsub/quotas#resource_limits"); } return totalSize; }
@Test public void testValidatePubsubMessageSizeOnlyPayload() throws SizeLimitExceededException { byte[] data = new byte[1024]; PubsubMessage message = new PubsubMessage(data, null); int messageSize = PreparePubsubWriteDoFn.validatePubsubMessageSize(message, PUBSUB_MESSAGE_MAX_TOTAL_SIZE); assertEquals(data.length, messageSize); }
@Override public String toString() { return toString(true); }
@Test public void testToString() { long length = 11111; long fileCount = 22222; long directoryCount = 33333; long quota = 44444; long spaceConsumed = 55555; long spaceQuota = 66665; ContentSummary contentSummary = new ContentSummary.Builder().length(length). fileCount(fileCount).directoryCount(directoryCount).quota(quota). spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build(); String expected = " 44444 -11111 66665" + " 11110 33333 22222 11111 "; assertEquals(expected, contentSummary.toString()); }
public Map<String, MetaProperties> logDirProps() { return logDirProps; }
@Test public void testCopierWriteLogDirChanges() throws Exception { MetaPropertiesEnsemble.Loader loader = new MetaPropertiesEnsemble.Loader(); loader.addMetadataLogDir(createLogDir(SAMPLE_META_PROPS_LIST.get(0))); MetaPropertiesEnsemble ensemble = loader.load(); MetaPropertiesEnsemble.Copier copier = new MetaPropertiesEnsemble.Copier(ensemble); String newLogDir1 = createEmptyLogDir(); copier.logDirProps().put(newLogDir1, SAMPLE_META_PROPS_LIST.get(1)); String newLogDir2 = createEmptyLogDir(); copier.logDirProps().put(newLogDir2, SAMPLE_META_PROPS_LIST.get(2)); copier.writeLogDirChanges(); assertEquals(SAMPLE_META_PROPS_LIST.get(1).toProperties(), PropertiesUtils.readPropertiesFile( new File(newLogDir1, META_PROPERTIES_NAME).getAbsolutePath())); assertEquals(SAMPLE_META_PROPS_LIST.get(2).toProperties(), PropertiesUtils.readPropertiesFile( new File(newLogDir2, META_PROPERTIES_NAME).getAbsolutePath())); }
@VisibleForTesting static SortedMap<OffsetRange, Integer> computeOverlappingRanges(Iterable<OffsetRange> ranges) { ImmutableSortedMap.Builder<OffsetRange, Integer> rval = ImmutableSortedMap.orderedBy(OffsetRangeComparator.INSTANCE); List<OffsetRange> sortedRanges = Lists.newArrayList(ranges); if (sortedRanges.isEmpty()) { return rval.build(); } Collections.sort(sortedRanges, OffsetRangeComparator.INSTANCE); // Stores ranges in smallest 'from' and then smallest 'to' order // e.g. [2, 7), [3, 4), [3, 5), [3, 5), [3, 6), [4, 0) PriorityQueue<OffsetRange> rangesWithSameFrom = new PriorityQueue<>(OffsetRangeComparator.INSTANCE); Iterator<OffsetRange> iterator = sortedRanges.iterator(); // Stored in reverse sorted order so that when we iterate and re-add them back to // overlappingRanges they are stored in sorted order from smallest to largest range.to List<OffsetRange> rangesToProcess = new ArrayList<>(); while (iterator.hasNext()) { OffsetRange current = iterator.next(); // Skip empty ranges if (current.getFrom() == current.getTo()) { continue; } // If the current range has a different 'from' then a prior range then we must produce // ranges in [rangesWithSameFrom.from, current.from) while (!rangesWithSameFrom.isEmpty() && rangesWithSameFrom.peek().getFrom() != current.getFrom()) { rangesToProcess.addAll(rangesWithSameFrom); Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE); rangesWithSameFrom.clear(); int i = 0; long lastTo = rangesToProcess.get(i).getFrom(); // Output all the ranges that are strictly less then current.from // e.g. current.to := 7 for [3, 4), [3, 5), [3, 5), [3, 6) will produce // [3, 4) := 4 // [4, 5) := 3 // [5, 6) := 1 for (; i < rangesToProcess.size(); ++i) { if (rangesToProcess.get(i).getTo() > current.getFrom()) { break; } // Output only the first of any subsequent duplicate ranges if (i == 0 || rangesToProcess.get(i - 1).getTo() != rangesToProcess.get(i).getTo()) { rval.put( new OffsetRange(lastTo, rangesToProcess.get(i).getTo()), rangesToProcess.size() - i); lastTo = rangesToProcess.get(i).getTo(); } } // We exitted the loop with 'to' > current.from, we must add the range [lastTo, // current.from) if it is non-empty if (lastTo < current.getFrom() && i != rangesToProcess.size()) { rval.put(new OffsetRange(lastTo, current.getFrom()), rangesToProcess.size() - i); } // The remaining ranges have a 'to' that is greater then 'current.from' and will overlap // with current so add them back to rangesWithSameFrom with the updated 'from' for (; i < rangesToProcess.size(); ++i) { rangesWithSameFrom.add( new OffsetRange(current.getFrom(), rangesToProcess.get(i).getTo())); } rangesToProcess.clear(); } rangesWithSameFrom.add(current); } // Process the last chunk of overlapping ranges while (!rangesWithSameFrom.isEmpty()) { // This range always represents the range with with the smallest 'to' OffsetRange current = rangesWithSameFrom.remove(); rangesToProcess.addAll(rangesWithSameFrom); Collections.sort(rangesToProcess, OffsetRangeComparator.INSTANCE); rangesWithSameFrom.clear(); rval.put(current, rangesToProcess.size() + 1 /* include current */); // Shorten all the remaining ranges such that they start with current.to for (OffsetRange rangeWithDifferentFrom : rangesToProcess) { // Skip any duplicates of current if (rangeWithDifferentFrom.getTo() > current.getTo()) { rangesWithSameFrom.add(new OffsetRange(current.getTo(), rangeWithDifferentFrom.getTo())); } } rangesToProcess.clear(); } return rval.build(); }
@Test public void testNoOverlapping() { Iterable<OffsetRange> ranges = Arrays.asList(range(0, 2), range(4, 6)); Map<OffsetRange, Integer> nonOverlappingRangesToNumElementsPerPosition = computeOverlappingRanges(ranges); assertEquals( ImmutableMap.of(range(0, 2), 1, range(4, 6), 1), nonOverlappingRangesToNumElementsPerPosition); assertNonEmptyRangesAndPositions(ranges, nonOverlappingRangesToNumElementsPerPosition); }
protected void connect0(CertConfig certConfig) { String caCertPath = certConfig.getCaCertPath(); String remoteAddress = certConfig.getRemoteAddress(); logger.info( "Try to connect to Dubbo Cert Authority server: " + remoteAddress + ", caCertPath: " + remoteAddress); try { if (StringUtils.isNotEmpty(caCertPath)) { channel = NettyChannelBuilder.forTarget(remoteAddress) .sslContext(GrpcSslContexts.forClient() .trustManager(new File(caCertPath)) .build()) .build(); } else { logger.warn( CONFIG_SSL_CONNECT_INSECURE, "", "", "No caCertPath is provided, will use insecure connection."); channel = NettyChannelBuilder.forTarget(remoteAddress) .sslContext(GrpcSslContexts.forClient() .trustManager(InsecureTrustManagerFactory.INSTANCE) .build()) .build(); } } catch (Exception e) { logger.error(LoggerCodeConstants.CONFIG_SSL_PATH_LOAD_FAILED, "", "", "Failed to load SSL cert file.", e); throw new RuntimeException(e); } }
@Test void testConnect1() { FrameworkModel frameworkModel = new FrameworkModel(); DubboCertManager certManager = new DubboCertManager(frameworkModel); CertConfig certConfig = new CertConfig("127.0.0.1:30062", null, null, null); certManager.connect0(certConfig); Assertions.assertNotNull(certManager.channel); Assertions.assertEquals("127.0.0.1:30062", certManager.channel.authority()); frameworkModel.destroy(); }
@Override public String getSecurityName() { return securityName; }
@Test public void testGetSecurityName() { assertEquals(securityName, defaultSnmpv3Device.getSecurityName()); }
public static WorkflowInstance.Status deriveAggregatedStatus( MaestroWorkflowInstanceDao instanceDao, WorkflowSummary summary, WorkflowInstance.Status runStatus, WorkflowRuntimeOverview overviewToUpdate) { if (!summary.isFreshRun() && runStatus == WorkflowInstance.Status.SUCCEEDED) { WorkflowInstance instance = instanceDao.getWorkflowInstanceRun( summary.getWorkflowId(), summary.getWorkflowInstanceId(), summary.getWorkflowRunId()); instance.setRuntimeOverview(overviewToUpdate); instance.setStatus(runStatus); WorkflowInstance.Status aggStatus = AggregatedViewHelper.computeAggregatedView(instance, false).getWorkflowInstanceStatus(); if (aggStatus != runStatus) { overviewToUpdate.setRunStatus(runStatus); return aggStatus; } } return runStatus; }
@Test public void testDeriveAggregatedStatus() { WorkflowInstance instance = getGenericWorkflowInstance( 2, WorkflowInstance.Status.SUCCEEDED, RunPolicy.RESTART_FROM_SPECIFIC, RestartPolicy.RESTART_FROM_BEGINNING); instance.getRuntimeDag().remove("step1"); MaestroWorkflowInstanceDao instanceDao = mock(MaestroWorkflowInstanceDao.class); when(instanceDao.getWorkflowInstanceRun(any(), anyLong(), anyLong())).thenReturn(instance); WorkflowSummary summary = mock(WorkflowSummary.class); when(summary.isFreshRun()).thenReturn(false); Workflow runtimeWorkflow = mock(Workflow.class); Step step1 = mock(Step.class); when(step1.getId()).thenReturn("step1"); Step step2 = mock(Step.class); when(step2.getId()).thenReturn("step2"); Step step3 = mock(Step.class); when(step3.getId()).thenReturn("step3"); when(runtimeWorkflow.getSteps()).thenReturn(Arrays.asList(step1, step2, step3)); instance.setRuntimeWorkflow(runtimeWorkflow); WorkflowInstanceAggregatedInfo baseline = new WorkflowInstanceAggregatedInfo(); baseline.setWorkflowInstanceStatus(WorkflowInstance.Status.FAILED); baseline .getStepAggregatedViews() .put("step1", generateStepAggregated(StepInstance.Status.SUCCEEDED, 11L, 12L)); baseline .getStepAggregatedViews() .put("step2", generateStepAggregated(StepInstance.Status.FATALLY_FAILED, 11L, 12L)); baseline .getStepAggregatedViews() .put("step3", generateStepAggregated(StepInstance.Status.STOPPED, 11L, 12L)); instance.setAggregatedInfo(baseline); Map<String, StepRuntimeState> decodedOverview = new LinkedHashMap<>(); decodedOverview.put("step2", generateStepState(StepInstance.Status.SUCCEEDED, 11L, 12L)); WorkflowRuntimeOverview overview = mock(WorkflowRuntimeOverview.class); doReturn(decodedOverview).when(overview).decodeStepOverview(instance.getRuntimeDag()); WorkflowInstance.Status actual = AggregatedViewHelper.deriveAggregatedStatus( instanceDao, summary, WorkflowInstance.Status.FAILED, overview); assertEquals(WorkflowInstance.Status.FAILED, actual); verify(overview, times(0)).setRunStatus(any()); actual = AggregatedViewHelper.deriveAggregatedStatus( instanceDao, summary, WorkflowInstance.Status.SUCCEEDED, overview); assertEquals(WorkflowInstance.Status.FAILED, actual); verify(overview, times(1)).setRunStatus(WorkflowInstance.Status.SUCCEEDED); }
static MapKeyLoader.Role assignRole(boolean isPartitionOwner, boolean isMapNamePartition, boolean isMapNamePartitionFirstReplica) { if (isMapNamePartition) { if (isPartitionOwner) { // map-name partition owner is the SENDER return MapKeyLoader.Role.SENDER; } else { if (isMapNamePartitionFirstReplica) { // first replica of the map-name partition is the SENDER_BACKUP return MapKeyLoader.Role.SENDER_BACKUP; } else { // other replicas of the map-name partition do not have a role return MapKeyLoader.Role.NONE; } } } else { // ordinary partition owners are RECEIVERs, otherwise no role return isPartitionOwner ? MapKeyLoader.Role.RECEIVER : MapKeyLoader.Role.NONE; } }
@Test public void assignRole_NONE_insignificantFlagFalse() { boolean isPartitionOwner = false; boolean isMapNamePartition = false; boolean insignificant = false; Role role = MapKeyLoaderUtil.assignRole(isPartitionOwner, isMapNamePartition, insignificant); assertEquals(NONE, role); }
@Override public Iterator<T> iterator() { return new LinkedSetIterator(); }
@Test public void testRemoveMulti() { LOG.info("Test remove multi"); for (Integer i : list) { assertTrue(set.add(i)); } for (int i = 0; i < NUM / 2; i++) { assertTrue(set.remove(list.get(i))); } // the deleted elements should not be there for (int i = 0; i < NUM / 2; i++) { assertFalse(set.contains(list.get(i))); } // the rest should be there for (int i = NUM / 2; i < NUM; i++) { assertTrue(set.contains(list.get(i))); } Iterator<Integer> iter = set.iterator(); // the remaining elements should be in order int num = NUM / 2; while (iter.hasNext()) { assertEquals(list.get(num++), iter.next()); } assertEquals(num, NUM); LOG.info("Test remove multi - DONE"); }
public static boolean instanceOfSupportListMenuItemView(Object view) { return ReflectUtil.isInstance(view, "android.support.v7.view.menu.ListMenuItemView"); }
@Test public void instanceOfSupportListMenuItemView() { CheckBox textView1 = new CheckBox(mApplication); textView1.setText("child1"); Assert.assertFalse(SAViewUtils.instanceOfSupportListMenuItemView(textView1)); }
public static FindKV findKV(String regex, int keyGroup, int valueGroup) { return findKV(Pattern.compile(regex), keyGroup, valueGroup); }
@Test @Category(NeedsRunner.class) public void testKVMatchesNameNone() { PCollection<KV<String, String>> output = p.apply(Create.of("x y z")) .apply(Regex.findKV("a (?<keyname>b) (?<valuename>c)", "keyname", "valuename")); PAssert.that(output).empty(); p.run(); }
@Override public int writeTo(TransferableChannel destChannel, int previouslyWritten, int remaining) throws IOException { long position = this.position + previouslyWritten; int count = Math.min(remaining, sizeInBytes() - previouslyWritten); // safe to cast to int since `count` is an int return (int) destChannel.transferFrom(channel, position, count); }
@Test public void testWriteTo() throws IOException { org.apache.kafka.common.requests.ByteBufferChannel channel = new org.apache.kafka.common.requests.ByteBufferChannel(fileRecords.sizeInBytes()); int size = fileRecords.sizeInBytes(); UnalignedFileRecords records1 = fileRecords.sliceUnaligned(0, size / 2); UnalignedFileRecords records2 = fileRecords.sliceUnaligned(size / 2, size - size / 2); records1.writeTo(channel, 0, records1.sizeInBytes()); records2.writeTo(channel, 0, records2.sizeInBytes()); channel.close(); Iterator<Record> records = MemoryRecords.readableRecords(channel.buffer()).records().iterator(); for (byte[] value : values) { assertTrue(records.hasNext()); assertEquals(records.next().value(), ByteBuffer.wrap(value)); } }
@Override @Transactional public boolean updateAfterApproval(Long userId, Integer userType, String clientId, Map<String, Boolean> requestedScopes) { // 如果 requestedScopes 为空,说明没有要求,则返回 true 通过 if (CollUtil.isEmpty(requestedScopes)) { return true; } // 更新批准的信息 boolean success = false; // 需要至少有一个同意 LocalDateTime expireTime = LocalDateTime.now().plusSeconds(TIMEOUT); for (Map.Entry<String, Boolean> entry : requestedScopes.entrySet()) { if (entry.getValue()) { success = true; } saveApprove(userId, userType, clientId, entry.getKey(), entry.getValue(), expireTime); } return success; }
@Test public void testUpdateAfterApproval_reject() { // 准备参数 Long userId = randomLongId(); Integer userType = randomEle(UserTypeEnum.values()).getValue(); String clientId = randomString(); Map<String, Boolean> requestedScopes = new LinkedHashMap<>(); requestedScopes.put("write", false); // mock 方法 // 调用 boolean success = oauth2ApproveService.updateAfterApproval(userId, userType, clientId, requestedScopes); // 断言 assertFalse(success); List<OAuth2ApproveDO> result = oauth2ApproveMapper.selectList(); assertEquals(1, result.size()); // write assertEquals(userId, result.get(0).getUserId()); assertEquals(userType, result.get(0).getUserType()); assertEquals(clientId, result.get(0).getClientId()); assertEquals("write", result.get(0).getScope()); assertFalse(result.get(0).getApproved()); assertFalse(DateUtils.isExpired(result.get(0).getExpiresTime())); }
public static InetSocketAddress getInetSocketAddressFromRpcURL(String rpcURL) throws Exception { // Pekko URLs have the form schema://systemName@host:port/.... if it's a remote Pekko URL try { final Address address = getAddressFromRpcURL(rpcURL); if (address.host().isDefined() && address.port().isDefined()) { return new InetSocketAddress(address.host().get(), (int) address.port().get()); } else { throw new MalformedURLException(); } } catch (MalformedURLException e) { throw new Exception("Could not retrieve InetSocketAddress from Pekko URL " + rpcURL); } }
@Test void getHostFromRpcURLReturnsHostAfterAtSign() throws Exception { final String url = "pekko.tcp://flink@localhost:1234/user/jobmanager"; final InetSocketAddress expected = new InetSocketAddress("localhost", 1234); final InetSocketAddress result = PekkoUtils.getInetSocketAddressFromRpcURL(url); assertThat(result).isEqualTo(expected); }
private void processNestedMap(Map<String, Object> map) { for(String key : map.keySet()) { String value = (String) map.get(key); if (value.contains("\n")) { Object valueObject = processNestedString(value); map.put(key, valueObject); } } }
@Test public void testProcessNestedMap() throws Exception { Map<String, Object> map = new HashMap<>(); map.put("test1", "\n" + " languages:\n" + " - Ruby\n" + " - Perl\n" + " - Python \n" + " websites:\n" + " YAML: yaml.org\n" + " Ruby: ruby-lang.org\n" + " Python: python.org\n" + " Perl: use.perl.org"); map.put("test2", "\n" + " - aaaa\n" + " - bbbb\n" + " -\n" + " id: 1\n" + " name: company1\n" + " price: 200W\n" + " -\n" + " id: 2\n" + " name: company2\n" + " price: 500W\n" + " -\n" + " -aaaa\n" + " -bbbb\n" + " - abc: abc\n" + " ccc: ccc\n" + " ddd: ddd"); Method processNestedMapMethod = DefaultConfigLoader.class.getDeclaredMethod("processNestedMap", Map.class); processNestedMapMethod.setAccessible(true); processNestedMapMethod.invoke(configLoader, map); Map<String, Object> result = (Map)map.get("test1"); List<String> result1 = (List)result.get("languages"); Assert.assertEquals("Ruby", result1.get(0)); Map<String, String> result2 = (Map)result.get("websites"); Assert.assertEquals( "yaml.org", result2.get("YAML")); System.out.println(map); }
@Override public void dump(OutputStream output) { try (PrintWriter out = new PrintWriter(new OutputStreamWriter(output, UTF_8))) { for (long value : values) { out.printf("%d%n", value); } } }
@Test public void dumpsToAStream() throws Exception { final ByteArrayOutputStream output = new ByteArrayOutputStream(); snapshot.dump(output); assertThat(output.toString()) .isEqualTo(String.format("1%n2%n3%n4%n5%n")); }
@Override public <T> ReducingState<T> getReducingState(ReducingStateDescriptor<T> stateProperties) { KeyedStateStore keyedStateStore = checkPreconditionsAndGetKeyedStateStore(stateProperties); stateProperties.initializeSerializerUnlessSet(this::createSerializer); return keyedStateStore.getReducingState(stateProperties); }
@Test void testV2ReducingStateInstantiation() throws Exception { final ExecutionConfig config = new ExecutionConfig(); SerializerConfig serializerConfig = config.getSerializerConfig(); serializerConfig.registerKryoType(Path.class); final AtomicReference<Object> descriptorCapture = new AtomicReference<>(); StreamingRuntimeContext context = createRuntimeContext(descriptorCapture, config); @SuppressWarnings("unchecked") ReduceFunction<TaskInfo> reducer = (ReduceFunction<TaskInfo>) mock(ReduceFunction.class); org.apache.flink.runtime.state.v2.ReducingStateDescriptor<TaskInfo> descr = new org.apache.flink.runtime.state.v2.ReducingStateDescriptor<>( "name", reducer, TypeInformation.of(TaskInfo.class), serializerConfig); context.getReducingState(descr); org.apache.flink.runtime.state.v2.ReducingStateDescriptor<?> descrIntercepted = (org.apache.flink.runtime.state.v2.ReducingStateDescriptor<?>) descriptorCapture.get(); TypeSerializer<?> serializer = descrIntercepted.getSerializer(); // check that the Path class is really registered, i.e., the execution config was applied assertThat(serializer).isInstanceOf(KryoSerializer.class); assertThat(((KryoSerializer<?>) serializer).getKryo().getRegistration(Path.class).getId()) .isPositive(); }
@SuppressWarnings("unchecked") public static D2CanaryDistributionStrategy toConfig(CanaryDistributionStrategy properties) { D2CanaryDistributionStrategy config = new D2CanaryDistributionStrategy(); StrategyType type = strategyTypes.get(properties.getStrategy()); if (type == null) { LOG.warn("Unknown strategy type from CanaryDistributionStrategy: " + properties.getStrategy() + ". Fall back to DISABLED."); type = StrategyType.DISABLED; } config.setStrategy(type); try { switch (type) { case PERCENTAGE: Double scope = getValidScope(PropertyUtil.checkAndGetValue(properties.getPercentageStrategyProperties(), PropertyKeys.PERCENTAGE_SCOPE, Number.class, "PercentageStrategyProperties").doubleValue()); PercentageStrategyProperties toPercentageProperties = new PercentageStrategyProperties(); toPercentageProperties.setScope(scope); config.setPercentageStrategyProperties(toPercentageProperties); break; case TARGET_HOSTS: List<String> hosts = PropertyUtil.checkAndGetValue(properties.getTargetHostsStrategyProperties(), PropertyKeys.TARGET_HOSTS, List.class, "TargetHostsStrategyProperties"); TargetHostsStrategyProperties toTargetHostsProperties = new TargetHostsStrategyProperties(); toTargetHostsProperties.setTargetHosts(new StringArray(hosts)); config.setTargetHostsStrategyProperties(toTargetHostsProperties); break; case TARGET_APPLICATIONS: Map<String, Object> fromTargetAppsProperties = properties.getTargetApplicationsStrategyProperties(); List<String> apps = PropertyUtil.checkAndGetValue(fromTargetAppsProperties, PropertyKeys.TARGET_APPLICATIONS, List.class, "TargetApplicationsStrategyProperties"); Double appScope = getValidScope(PropertyUtil.checkAndGetValue(fromTargetAppsProperties, PropertyKeys.PERCENTAGE_SCOPE, Number.class, "TargetApplicationsStrategyProperties").doubleValue()); TargetApplicationsStrategyProperties toTargetAppsProperties = new TargetApplicationsStrategyProperties(); toTargetAppsProperties.setTargetApplications(new StringArray(apps)); toTargetAppsProperties.setScope(appScope); config.setTargetApplicationsStrategyProperties(toTargetAppsProperties); break; case DISABLED: break; default: throw new IllegalStateException("Unexpected strategy type: " + type); } } catch (Exception e) { LOG.warn("Error in converting distribution strategy. Fall back to DISABLED.", e); config.setStrategy(StrategyType.DISABLED); } return config; }
@Test(dataProvider = "getEdgeCasesDistributionPropertiesAndConfigs") public void testToConfigEdgeCases(String strategyType, Map<String, Object> percentageProperties, Map<String, Object> targetHostsProperties, Map<String, Object> targetAppsProperties, D2CanaryDistributionStrategy expected) { CanaryDistributionStrategy input = new CanaryDistributionStrategy(strategyType, percentageProperties, targetHostsProperties, targetAppsProperties); Assert.assertEquals(CanaryDistributionStrategyConverter.toConfig(input), expected); }
public void parseStepParameter( Map<String, Map<String, Object>> allStepOutputData, Map<String, Parameter> workflowParams, Map<String, Parameter> stepParams, Parameter param, String stepId) { parseStepParameter( allStepOutputData, workflowParams, stepParams, param, stepId, new HashSet<>()); }
@Test public void testParseStepParameterWith4Underscore() { StringParameter bar = StringParameter.builder().name("bar").expression("_step1____foo + '-1';").build(); paramEvaluator.parseStepParameter( Collections.singletonMap("_step1_", Collections.emptyMap()), Collections.emptyMap(), Collections.singletonMap("_foo", StringParameter.builder().value("123").build()), bar, "_step1_"); assertEquals("123-1", bar.getEvaluatedResult()); }
@Override public FilterBindings get() { return filterBindings; }
@Test void requireThatCorrectlyConfiguredFiltersAreIncluded() { final String requestFilter1Id = "requestFilter1"; final String requestFilter2Id = "requestFilter2"; final String requestFilter3Id = "requestFilter3"; final String responseFilter1Id = "responseFilter1"; final String responseFilter2Id = "responseFilter2"; final String responseFilter3Id = "responseFilter3"; // Set up config. configBuilder.filter(new ServerConfig.Filter.Builder().id(requestFilter1Id).binding("http://*/a")); configBuilder.filter(new ServerConfig.Filter.Builder().id(requestFilter2Id).binding("http://*/b")); configBuilder.filter(new ServerConfig.Filter.Builder().id(responseFilter1Id).binding("http://*/c")); configBuilder.filter(new ServerConfig.Filter.Builder().id(responseFilter3Id).binding("http://*/d")); // Set up registry. final ComponentRegistry<RequestFilter> availableRequestFilters = new ComponentRegistry<>(); final RequestFilter requestFilter1Instance = mock(RequestFilter.class); final RequestFilter requestFilter2Instance = mock(RequestFilter.class); final RequestFilter requestFilter3Instance = mock(RequestFilter.class); availableRequestFilters.register(ComponentId.fromString(requestFilter1Id), requestFilter1Instance); availableRequestFilters.register(ComponentId.fromString(requestFilter2Id), requestFilter2Instance); availableRequestFilters.register(ComponentId.fromString(requestFilter3Id), requestFilter3Instance); final ComponentRegistry<ResponseFilter> availableResponseFilters = new ComponentRegistry<>(); final ResponseFilter responseFilter1Instance = mock(ResponseFilter.class); final ResponseFilter responseFilter2Instance = mock(ResponseFilter.class); final ResponseFilter responseFilter3Instance = mock(ResponseFilter.class); availableResponseFilters.register(ComponentId.fromString(responseFilter1Id), responseFilter1Instance); availableResponseFilters.register(ComponentId.fromString(responseFilter2Id), responseFilter2Instance); availableResponseFilters.register(ComponentId.fromString(responseFilter3Id), responseFilter3Instance); final FilterChainRepository filterChainRepository = new FilterChainRepository( new ChainsConfig(new ChainsConfig.Builder()), availableRequestFilters, availableResponseFilters, new ComponentRegistry<>(), new ComponentRegistry<>()); // Set up the provider that we aim to test. final FilterBindingsProvider provider = new FilterBindingsProvider( new ComponentId("foo"), new ServerConfig(configBuilder), filterChainRepository, new ComponentRegistry<>()); // Execute. final FilterBindings filterBindings = provider.get(); // Verify. assertNotNull(filterBindings); assertEquals(filterBindings.requestFilters().stream().collect(Collectors.toSet()), Set.of(requestFilter1Instance, requestFilter2Instance)); assertEquals(filterBindings.responseFilters().stream().collect(Collectors.toSet()), Set.of(responseFilter1Instance, responseFilter3Instance)); }
@Nullable static ProxyProvider createFrom(Properties properties) { Objects.requireNonNull(properties, "properties"); if (properties.containsKey(HTTP_PROXY_HOST) || properties.containsKey(HTTPS_PROXY_HOST)) { return createHttpProxyFrom(properties); } if (properties.containsKey(SOCKS_PROXY_HOST)) { return createSocksProxyFrom(properties); } return null; }
@Test void proxyFromSystemProperties_errorWhenSocksPortIsNotANumber() { Properties properties = new Properties(); properties.setProperty(ProxyProvider.SOCKS_PROXY_HOST, "host"); properties.setProperty(ProxyProvider.SOCKS_PROXY_PORT, "8080Hello"); assertThatIllegalArgumentException() .isThrownBy(() -> ProxyProvider.createFrom(properties)) .withMessage("expected system property socksProxyPort to be a number but got 8080Hello"); }
public boolean liveness() { if (!Health.Status.GREEN.equals(dbConnectionNodeCheck.check().getStatus())) { return false; } if (!Health.Status.GREEN.equals(webServerStatusNodeCheck.check().getStatus())) { return false; } if (!Health.Status.GREEN.equals(ceStatusNodeCheck.check().getStatus())) { return false; } if (esStatusNodeCheck != null && Health.Status.RED.equals(esStatusNodeCheck.check().getStatus())) { return false; } return true; }
@Test public void fail_when_es_check_fail() { when(dbConnectionNodeCheck.check()).thenReturn(Health.GREEN); when(webServerStatusNodeCheck.check()).thenReturn(Health.GREEN); when(ceStatusNodeCheck.check()).thenReturn(Health.GREEN); when(esStatusNodeCheck.check()).thenReturn(RED); Assertions.assertThat(underTest.liveness()).isFalse(); }
public String toSnapshot(boolean hOption) { return String.format(SNAPSHOT_FORMAT, formatSize(snapshotLength, hOption), formatSize(snapshotFileCount, hOption), formatSize(snapshotDirectoryCount, hOption), formatSize(snapshotSpaceConsumed, hOption)); }
@Test public void testToSnapshotHumanReadable() { long snapshotLength = Long.MAX_VALUE; long snapshotFileCount = 222222222; long snapshotDirectoryCount = 33333; long snapshotSpaceConsumed = 222256578; ContentSummary contentSummary = new ContentSummary.Builder() .snapshotLength(snapshotLength).snapshotFileCount(snapshotFileCount) .snapshotDirectoryCount(snapshotDirectoryCount) .snapshotSpaceConsumed(snapshotSpaceConsumed).build(); String expected = " 8.0 E 211.9 M 32.6 K " + " 212.0 M "; assertEquals(expected, contentSummary.toSnapshot(true)); }
public static BigDecimal cast(final Integer value, final int precision, final int scale) { if (value == null) { return null; } return cast(value.longValue(), precision, scale); }
@Test public void shouldCastString() { // When: final BigDecimal decimal = DecimalUtil.cast("1.1", 3, 2); // Then: assertThat(decimal, is(new BigDecimal("1.10"))); }
public static byte[] scrambleCachingSha2(byte[] password, byte[] seed) throws DigestException { MessageDigest md; try { md = MessageDigest.getInstance("SHA-256"); } catch (NoSuchAlgorithmException ex) { throw new DigestException(ex); } byte[] dig1 = new byte[CACHING_SHA2_DIGEST_LENGTH]; byte[] dig2 = new byte[CACHING_SHA2_DIGEST_LENGTH]; byte[] scramble1 = new byte[CACHING_SHA2_DIGEST_LENGTH]; // SHA2(src) => digest_stage1 md.update(password, 0, password.length); md.digest(dig1, 0, CACHING_SHA2_DIGEST_LENGTH); md.reset(); // SHA2(digest_stage1) => digest_stage2 md.update(dig1, 0, dig1.length); md.digest(dig2, 0, CACHING_SHA2_DIGEST_LENGTH); md.reset(); // SHA2(digest_stage2, m_rnd) => scramble_stage1 md.update(dig2, 0, dig1.length); md.update(seed, 0, seed.length); md.digest(scramble1, 0, CACHING_SHA2_DIGEST_LENGTH); // XOR(digest_stage1, scramble_stage1) => scramble byte[] mysqlScrambleBuff = new byte[CACHING_SHA2_DIGEST_LENGTH]; xorString(dig1, mysqlScrambleBuff, scramble1, CACHING_SHA2_DIGEST_LENGTH); return mysqlScrambleBuff; }
@Test public void testScrambleCachingSha2() throws DigestException { byte[] bytes1 = new byte[]{73, -38, 6, -106, 14, -28, -98, -32, -80, -49, -88, -66, -116, -101, -86, 25, -7, 32, 44, -118, 24, -128, -8, 12, 10, -38, 111, -11, 42, -111, 43, -123}; byte[] bytes2 = new byte[]{-86, 63, -63, 80, 93, 3, 105, -59, 71, -41, 81, 112, 35, -29, 28, -115, -68, 16, -119, -60, -53, -80, -4, -19, 60, -37, 27, -22, -23, -23, 49, -36}; Assert.assertArrayEquals(bytes1, MySQLPasswordEncrypter .scrambleCachingSha2(new byte[0], new byte[0])); Assert.assertArrayEquals(bytes2, MySQLPasswordEncrypter .scrambleCachingSha2( new byte[]{1, 2, 3, 4, 5, 6, 7, 8}, new byte[]{1, 1})); }
@Nullable public byte[] getValue() { return mValue; }
@Test public void setValue_UINT16_BE() { final MutableData data = new MutableData(new byte[2]); data.setValue(26576, Data.FORMAT_UINT16_BE, 0); assertArrayEquals(new byte[] { 0x67, (byte) 0xD0 } , data.getValue()); }
@Override public InputStream read(final Path file, final TransferStatus status, final ConnectionCallback connectionCallback) throws BackgroundException { final MantaHttpHeaders headers = new MantaHttpHeaders(); try { try { if(status.isAppend()) { final HttpRange range = HttpRange.withStatus(status); headers.setByteRange(range.getStart(), range.getEnd() < 0 ? null : range.getEnd()); } // Requesting an empty file as an InputStream doesn't work, but we also don't want to // perform a HEAD request for every read so we'll opt to handle the exception instead // see https://github.com/joyent/java-manta/issues/248 return session.getClient().getAsInputStream(file.getAbsolute(), headers); } catch(UnsupportedOperationException e) { final MantaObject probablyEmptyFile = session.getClient().head(file.getAbsolute()); if(probablyEmptyFile.getContentLength() != 0) { throw new AccessDeniedException(); } return new NullInputStream(0L); } } catch(MantaException e) { throw new MantaExceptionMappingService().map("Download {0} failed", e, file); } catch(MantaClientHttpResponseException e) { throw new MantaHttpExceptionMappingService().map("Download {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Download {0} failed", e, file); } }
@Test public void testReadRangeUnknownLength() throws Exception { final Path drive = new MantaDirectoryFeature(session).mkdir(randomDirectory(), new TransferStatus()); final Path test = new Path(drive, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new MantaTouchFeature(session).touch(test, new TransferStatus()); final Local local = new Local(PROPERTIES.get("java.io.tmpdir"), new AlphanumericRandomStringService().random()); final byte[] content = RandomUtils.nextBytes(1000); final OutputStream out = local.getOutputStream(false); assertNotNull(out); IOUtils.write(content, out); out.close(); new DefaultUploadFeature<>(new MantaWriteFeature(session)).upload( test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); final TransferStatus status = new TransferStatus(); status.setLength(-1L); status.setAppend(true); status.setOffset(100L); final InputStream in = new MantaReadFeature(session).read(test, status, new DisabledConnectionCallback()); assertNotNull(in); final ByteArrayOutputStream buffer = new ByteArrayOutputStream(content.length - 100); new StreamCopier(status, status).transfer(in, buffer); final byte[] reference = new byte[content.length - 100]; System.arraycopy(content, 100, reference, 0, content.length - 100); assertArrayEquals(reference, buffer.toByteArray()); in.close(); final MantaDeleteFeature delete = new MantaDeleteFeature(session); delete.delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static Config loadFromStream(InputStream source) { return loadFromStream(source, System.getProperties()); }
@Test public void testLoadFromStream() { InputStream xmlStream = new ByteArrayInputStream( getSimpleXmlConfigStr( "instance-name", "hz-instance-name", "cluster-name", "${cluster.name}" ).getBytes() ); InputStream yamlStream = new ByteArrayInputStream( getSimpleYamlConfigStr( "instance-name", "hz-instance-name", "cluster-name", "${cluster.name}" ).getBytes() ); String clusterName = randomName(); Properties properties = new Properties(); properties.setProperty("cluster.name", clusterName); Config cfg = Config.loadFromStream(xmlStream, properties); assertEquals(clusterName, cfg.getClusterName()); assertEquals("hz-instance-name", cfg.getInstanceName()); clusterName = randomName(); properties.setProperty("cluster.name", clusterName); cfg = Config.loadFromStream(yamlStream, properties); assertEquals(clusterName, cfg.getClusterName()); assertEquals("hz-instance-name", cfg.getInstanceName()); // test for stream with > 4KB content final int instanceNameLen = 1 << 14; String instanceName = String.join("", Collections.nCopies(instanceNameLen, "x")); // wrap with BufferedInputStream (which is not resettable), so that ConfigStream // behaviour kicks in. yamlStream = new BufferedInputStream(new ByteArrayInputStream( getSimpleYamlConfigStr("instance-name", instanceName).getBytes()) ); cfg = Config.loadFromStream(yamlStream); assertEquals(instanceName, cfg.getInstanceName()); }
public String csvCpeConfidence(Set<Identifier> ids) { if (ids == null || ids.isEmpty()) { return "\"\""; } boolean addComma = false; final StringBuilder sb = new StringBuilder(); for (Identifier id : ids) { if (addComma) { sb.append(", "); } else { addComma = true; } sb.append(id.getConfidence()); } if (sb.length() == 0) { return "\"\""; } return StringEscapeUtils.escapeCsv(sb.toString()); }
@Test public void testCsvCpeConfidence() { EscapeTool instance = new EscapeTool(); Set<Identifier> ids = null; String expResult = "\"\""; String result = instance.csvCpeConfidence(ids); assertEquals(expResult, result); ids = new HashSet<>(); expResult = "\"\""; result = instance.csvCpeConfidence(ids); assertEquals(expResult, result); ids = new HashSet<>(); GenericIdentifier i1 = new GenericIdentifier("cpe:/a:somegroup:something:1.0", Confidence.HIGH); ids.add(i1); expResult = "HIGH"; result = instance.csvCpeConfidence(ids); assertEquals(expResult, result); ids = new HashSet<>(); i1 = new GenericIdentifier("cpe:/a:somegroup:something:1.0", Confidence.HIGH); ids.add(i1); GenericIdentifier i2 = new GenericIdentifier("cpe:/a:somegroup:something2:1.0", Confidence.MEDIUM); ids.add(i2); expResult = "\"HIGH, MEDIUM\""; String expResult2 = "\"MEDIUM, HIGH\""; result = instance.csvCpeConfidence(ids); assertTrue(expResult.equals(result) || expResult2.equals(result)); }
@VisibleForTesting Path getStagingDir(FileSystem defaultFileSystem) throws IOException { final String configuredStagingDir = flinkConfiguration.get(YarnConfigOptions.STAGING_DIRECTORY); if (configuredStagingDir == null) { return defaultFileSystem.getHomeDirectory(); } FileSystem stagingDirFs = new Path(configuredStagingDir).getFileSystem(defaultFileSystem.getConf()); return stagingDirFs.makeQualified(new Path(configuredStagingDir)); }
@Test void testGetStagingDirWithoutSpecifyingStagingDir() throws IOException { try (final YarnClusterDescriptor yarnClusterDescriptor = createYarnClusterDescriptor()) { YarnConfiguration yarnConfig = new YarnConfiguration(); yarnConfig.set("fs.defaultFS", "file://tmp"); FileSystem defaultFileSystem = FileSystem.get(yarnConfig); Path stagingDir = yarnClusterDescriptor.getStagingDir(defaultFileSystem); assertThat(defaultFileSystem.getScheme()).isEqualTo("file"); assertThat(stagingDir.getFileSystem(yarnConfig).getScheme()).isEqualTo("file"); } }
public static boolean isEIP3668(String data) { if (data == null || data.length() < 10) { return false; } return EnsUtils.EIP_3668_CCIP_INTERFACE_ID.equals(data.substring(0, 10)); }
@Test void isEIP3668WhenNotRightPrefix() { assertFalse(EnsUtils.isEIP3668("123456789012")); }
@Udf(description = "Returns the inverse (arc) tangent of y / x") public Double atan2( @UdfParameter( value = "y", description = "The ordinate (y) coordinate." ) final Integer y, @UdfParameter( value = "x", description = "The abscissa (x) coordinate." ) final Integer x ) { return atan2(y == null ? null : y.doubleValue(), x == null ? null : x.doubleValue()); }
@Test public void shouldHandlePositiveYNegativeX() { assertThat(udf.atan2(1.1, -0.24), closeTo(1.7856117271965553, 0.000000000000001)); assertThat(udf.atan2(6.0, -7.1), closeTo(2.4399674339361113, 0.000000000000001)); assertThat(udf.atan2(2, -3), closeTo(2.5535900500422257, 0.000000000000001)); assertThat(udf.atan2(2L, -2L), closeTo(2.356194490192345, 0.000000000000001)); }
@Override public String doLayout(ILoggingEvent event) { if (!isStarted()) { return CoreConstants.EMPTY_STRING; } StringBuilder sb = new StringBuilder(); long timestamp = event.getTimeStamp(); sb.append(cachingDateFormatter.format(timestamp)); sb.append(" ["); sb.append(event.getThreadName()); sb.append("] "); sb.append(event.getLevel().toString()); sb.append(" "); sb.append(event.getLoggerName()); sb.append(" -"); kvp(event, sb); sb.append("- "); sb.append(event.getFormattedMessage()); sb.append(CoreConstants.LINE_SEPARATOR); IThrowableProxy tp = event.getThrowableProxy(); if (tp != null) { String stackTrace = tpc.convert(event); sb.append(stackTrace); } return sb.toString(); }
@Test public void nullMessage() { LoggingEvent event = new LoggingEvent("", logger, Level.INFO, null, null, null); event.setTimeStamp(0); String result = layout.doLayout(event); String resultSuffix = result.substring(13).trim(); assertTrue(resultSuffix.matches("\\[.*\\] INFO ch.qos.logback.classic.layout.TTLLLayoutTest -- null"), "[" + resultSuffix + "] did not match regex"); }
@Override @Transactional(value="defaultTransactionManager") public OAuth2AccessTokenEntity refreshAccessToken(String refreshTokenValue, TokenRequest authRequest) throws AuthenticationException { if (Strings.isNullOrEmpty(refreshTokenValue)) { // throw an invalid token exception if there's no refresh token value at all throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } OAuth2RefreshTokenEntity refreshToken = clearExpiredRefreshToken(tokenRepository.getRefreshTokenByValue(refreshTokenValue)); if (refreshToken == null) { // throw an invalid token exception if we couldn't find the token throw new InvalidTokenException("Invalid refresh token: " + refreshTokenValue); } ClientDetailsEntity client = refreshToken.getClient(); AuthenticationHolderEntity authHolder = refreshToken.getAuthenticationHolder(); // make sure that the client requesting the token is the one who owns the refresh token ClientDetailsEntity requestingClient = clientDetailsService.loadClientByClientId(authRequest.getClientId()); if (!client.getClientId().equals(requestingClient.getClientId())) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidClientException("Client does not own the presented refresh token"); } //Make sure this client allows access token refreshing if (!client.isAllowRefresh()) { throw new InvalidClientException("Client does not allow refreshing access token!"); } // clear out any access tokens if (client.isClearAccessTokensOnRefresh()) { tokenRepository.clearAccessTokensForRefreshToken(refreshToken); } if (refreshToken.isExpired()) { tokenRepository.removeRefreshToken(refreshToken); throw new InvalidTokenException("Expired refresh token: " + refreshTokenValue); } OAuth2AccessTokenEntity token = new OAuth2AccessTokenEntity(); // get the stored scopes from the authentication holder's authorization request; these are the scopes associated with the refresh token Set<String> refreshScopesRequested = new HashSet<>(refreshToken.getAuthenticationHolder().getAuthentication().getOAuth2Request().getScope()); Set<SystemScope> refreshScopes = scopeService.fromStrings(refreshScopesRequested); // remove any of the special system scopes refreshScopes = scopeService.removeReservedScopes(refreshScopes); Set<String> scopeRequested = authRequest.getScope() == null ? new HashSet<String>() : new HashSet<>(authRequest.getScope()); Set<SystemScope> scope = scopeService.fromStrings(scopeRequested); // remove any of the special system scopes scope = scopeService.removeReservedScopes(scope); if (scope != null && !scope.isEmpty()) { // ensure a proper subset of scopes if (refreshScopes != null && refreshScopes.containsAll(scope)) { // set the scope of the new access token if requested token.setScope(scopeService.toStrings(scope)); } else { String errorMsg = "Up-scoping is not allowed."; logger.error(errorMsg); throw new InvalidScopeException(errorMsg); } } else { // otherwise inherit the scope of the refresh token (if it's there -- this can return a null scope set) token.setScope(scopeService.toStrings(refreshScopes)); } token.setClient(client); if (client.getAccessTokenValiditySeconds() != null) { Date expiration = new Date(System.currentTimeMillis() + (client.getAccessTokenValiditySeconds() * 1000L)); token.setExpiration(expiration); } if (client.isReuseRefreshToken()) { // if the client re-uses refresh tokens, do that token.setRefreshToken(refreshToken); } else { // otherwise, make a new refresh token OAuth2RefreshTokenEntity newRefresh = createRefreshToken(client, authHolder); token.setRefreshToken(newRefresh); // clean up the old refresh token tokenRepository.removeRefreshToken(refreshToken); } token.setAuthenticationHolder(authHolder); tokenEnhancer.enhance(token, authHolder.getAuthentication()); tokenRepository.saveAccessToken(token); return token; }
@Test public void refreshAccessToken_expiration() { Integer accessTokenValiditySeconds = 3600; when(client.getAccessTokenValiditySeconds()).thenReturn(accessTokenValiditySeconds); long start = System.currentTimeMillis(); OAuth2AccessTokenEntity token = service.refreshAccessToken(refreshTokenValue, tokenRequest); long end = System.currentTimeMillis(); // Accounting for some delta for time skew on either side. Date lowerBoundAccessTokens = new Date(start + (accessTokenValiditySeconds * 1000L) - DELTA); Date upperBoundAccessTokens = new Date(end + (accessTokenValiditySeconds * 1000L) + DELTA); verify(scopeService, atLeastOnce()).removeReservedScopes(anySet()); assertTrue(token.getExpiration().after(lowerBoundAccessTokens) && token.getExpiration().before(upperBoundAccessTokens)); }
@Override public void upload(final List<Image> images, final List<MultipartFile> fileImages) { IntStream.range(0, images.size()) .forEach(index -> saveFile( fileImages.get(index), images.get(index).getUniqueName() )); }
@Test void 이미지를_업로드한다() { // given List<Image> images = List.of(이미지를_생성한다()); List<MultipartFile> fileImages = List.of(file); // when & then assertDoesNotThrow(() -> imageUploader.upload(images, fileImages)); }
public List<LispAfiAddress> getAddresses() { return ImmutableList.copyOf(addresses); }
@Test public void testConstruction() { LispListLcafAddress listLcafAddress = address1; LispAfiAddress ipv4Address1 = new LispIpv4Address(IpAddress.valueOf("192.168.1.1")); LispAfiAddress ipv6Address1 = new LispIpv6Address(IpAddress.valueOf("1111:2222:3333:4444:5555:6666:7777:8885")); List<LispAfiAddress> afiAddresses1 = Lists.newArrayList(); afiAddresses1.add(ipv4Address1); afiAddresses1.add(ipv6Address1); assertThat(listLcafAddress.getAddresses(), is(afiAddresses1)); }
@RequiresApi(Build.VERSION_CODES.R) @Override public boolean onInlineSuggestionsResponse(@NonNull InlineSuggestionsResponse response) { final List<InlineSuggestion> inlineSuggestions = response.getInlineSuggestions(); if (inlineSuggestions.size() > 0) { mInlineSuggestionAction.onNewSuggestions(inlineSuggestions); getInputViewContainer().addStripAction(mInlineSuggestionAction, true); getInputViewContainer().setActionsStripVisibility(true); } return !inlineSuggestions.isEmpty(); }
@Test public void testActionStripAdded() { simulateOnStartInputFlow(); mAnySoftKeyboardUnderTest.onInlineSuggestionsResponse( mockResponse(Mockito.mock(InlineContentView.class))); Assert.assertNotNull( mAnySoftKeyboardUnderTest .getInputViewContainer() .findViewById(R.id.inline_suggestions_strip_root)); TextView countText = mAnySoftKeyboardUnderTest .getInputViewContainer() .findViewById(R.id.inline_suggestions_strip_text); Assert.assertNotNull(countText); Assert.assertEquals("1", countText.getText().toString()); }
public static byte[] toBytes(int val) { byte[] b = new byte[4]; for (int i = 3; i > 0; i--) { b[i] = (byte) val; val >>>= 8; } b[0] = (byte) val; return b; }
@Test public void testToBytes() { assertArrayEquals(new byte[] {0, 0, 0, 20}, IOUtils.toBytes(20)); assertArrayEquals(new byte[] {0x02, (byte) 0x93, (byte) 0xed, (byte) 0x88}, IOUtils.toBytes(43249032)); assertArrayEquals(new byte[] {0x19, (byte) 0x99, (byte) 0x9a, 0x61}, IOUtils.toBytes(Integer.MAX_VALUE / 5 + 200)); assertArrayEquals(new byte[] {(byte) 0x7f, (byte) 0xff, (byte) 0xff, (byte) 0xff}, IOUtils.toBytes(Integer.MAX_VALUE)); assertArrayEquals(new byte[] {0, 0, 0, 0, 0, 0, 0, 20}, IOUtils.toBytes(20L)); assertArrayEquals(new byte[] {0, 0, 0, 0, 0x49, 0x52, 0x45, 0x32}, IOUtils.toBytes(1230128434L)); assertArrayEquals( new byte[] {0x19, (byte) 0x99, (byte) 0x99, (byte) 0x99, (byte) 0x99, (byte) 0x99, (byte) 0x9a, 0x61}, IOUtils.toBytes(Long.MAX_VALUE / 5 + 200)); assertArrayEquals( new byte[] {(byte) 0x7f, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff}, IOUtils.toBytes(Long.MAX_VALUE)); }
static final String generateForFunction(RuleBuilderStep step, FunctionDescriptor<?> function) { return generateForFunction(step, function, 1); }
@Test public void generateFunctionWithSingleParamGeneration() { RuleBuilderStep step = RuleBuilderStep.builder().function("function1") .parameters(Map.of("required", "val1")).build(); final FunctionDescriptor<Boolean> descriptor = FunctionUtil.testFunction( "function1", ImmutableList.of( string("required").build(), integer("optional").optional().build() ), Boolean.class ).descriptor(); assertThat(ParserUtil.generateForFunction(step, descriptor)).isEqualTo( "function1(" + NL + " required : \"val1\"" + NL + " )" ); }
private boolean destination(Scanner scanner) { scanner.whitespace(); Position start = scanner.position(); if (!LinkScanner.scanLinkDestination(scanner)) { return false; } String rawDestination = scanner.getSource(start, scanner.position()).getContent(); destination = rawDestination.startsWith("<") ? rawDestination.substring(1, rawDestination.length() - 1) : rawDestination; int whitespace = scanner.whitespace(); if (!scanner.hasNext()) { // Destination was at end of line, so this is a valid reference for sure (and maybe a title). // If not at end of line, wait for title to be valid first. referenceValid = true; paragraphLines.clear(); } else if (whitespace == 0) { // spec: The title must be separated from the link destination by whitespace return false; } state = State.START_TITLE; return true; }
@Test public void testDestination() { parse("[foo]: /url"); assertEquals(State.START_TITLE, parser.getState()); assertParagraphLines("", parser); assertEquals(1, parser.getDefinitions().size()); assertDef(parser.getDefinitions().get(0), "foo", "/url", null); parse("[bar]: </url2>"); assertDef(parser.getDefinitions().get(1), "bar", "/url2", null); }
@Override public ManageSnapshots removeBranch(String name) { updateSnapshotReferencesOperation().removeBranch(name); return this; }
@TestTemplate public void testRemoveBranch() { table.newAppend().appendFile(FILE_A).commit(); long snapshotId = table.currentSnapshot().snapshotId(); // Test a basic case of creating and then removing a branch and tag table.manageSnapshots().createBranch("branch1", snapshotId).commit(); table.manageSnapshots().removeBranch("branch1").commit(); TableMetadata updated = table.ops().refresh(); SnapshotRef expectedBranch = updated.ref("branch1"); assertThat(expectedBranch).isNull(); // Test chained creating and removal of branch and tag table.manageSnapshots().createBranch("branch2", snapshotId).removeBranch("branch2").commit(); updated = table.ops().refresh(); assertThat(updated.ref("branch2")).isNull(); }
@Override public boolean remove(final Local file) { return this.update(file, null); }
@Test public void testRemove() throws Exception { final WorkspaceIconService s = new WorkspaceIconService(); final Local file = new Local(PreferencesFactory.get().getProperty("tmp.dir"), UUID.randomUUID().toString()); assertFalse(s.remove(file)); LocalTouchFactory.get().touch(file); assertFalse(s.remove(file)); assertTrue(s.update(file, NSImage.imageWithContentsOfFile("../../img/download0.icns"))); assertTrue(s.remove(file)); file.delete(); }
public Value evalForValue(String exp) { return context.eval(JS, exp); }
@Test void testJavaFunction() { Value v = je.evalForValue("Java.type('com.intuit.karate.graal.StaticPojo').sayHello"); assertFalse(v.isMetaObject()); assertFalse(v.isHostObject()); assertTrue(v.canExecute()); }
public static boolean allZeros(byte[] buf) { return Arrays.areAllZeroes(buf, 0, buf.length); }
@Test void all_zeros_checks_length_and_array_contents() { assertTrue(SideChannelSafe.allZeros(new byte[0])); assertFalse(SideChannelSafe.allZeros(new byte[]{ 1 })); assertTrue(SideChannelSafe.allZeros(new byte[]{ 0 })); assertFalse(SideChannelSafe.allZeros(new byte[]{ 0, 0, 127, 0 })); assertFalse(SideChannelSafe.allZeros(new byte[]{ 0, 0, -1, 0 })); assertTrue(SideChannelSafe.allZeros(new byte[]{ 0, 0, 0 })); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testIntegerEq() { boolean shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MIN_VALUE - 25)).eval(FILE); assertThat(shouldRead).as("Should not read: id below lower bound").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MIN_VALUE - 1)).eval(FILE); assertThat(shouldRead).as("Should not read: id below lower bound").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MIN_VALUE)).eval(FILE); assertThat(shouldRead).as("Should read: id equal to lower bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE - 4)).eval(FILE); assertThat(shouldRead).as("Should read: id between lower and upper bounds").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE)).eval(FILE); assertThat(shouldRead).as("Should read: id equal to upper bound").isTrue(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE + 1)).eval(FILE); assertThat(shouldRead).as("Should not read: id above upper bound").isFalse(); shouldRead = new InclusiveMetricsEvaluator(SCHEMA, equal("id", INT_MAX_VALUE + 6)).eval(FILE); assertThat(shouldRead).as("Should not read: id above upper bound").isFalse(); }
public static SanitizerConfig load() { return new SanitizerConfig(CONFIG_NAME); }
@Test public void testLoad() { SanitizerConfig config = SanitizerConfig.load(); assert(config != null); }
@Override public ReadBufferResult readBuffer( TieredStoragePartitionId partitionId, TieredStorageSubpartitionId subpartitionId, int segmentId, int bufferIndex, MemorySegment memorySegment, BufferRecycler recycler, @Nullable ReadProgress readProgress, @Nullable CompositeBuffer partialBuffer) throws IOException { // Get the channel of the segment file for a subpartition. Map<TieredStorageSubpartitionId, Tuple2<ReadableByteChannel, Integer>> subpartitionInfo = openedChannelAndSegmentIds.computeIfAbsent(partitionId, ignore -> new HashMap<>()); Tuple2<ReadableByteChannel, Integer> fileChannelAndSegmentId = subpartitionInfo.getOrDefault(subpartitionId, Tuple2.of(null, -1)); ReadableByteChannel channel = fileChannelAndSegmentId.f0; // Create the channel if there is a new segment file for a subpartition. if (channel == null || fileChannelAndSegmentId.f1 != segmentId) { if (channel != null) { channel.close(); } channel = openNewChannel(partitionId, subpartitionId, segmentId); if (channel == null) { // return null if the segment file doesn't exist. return null; } subpartitionInfo.put(subpartitionId, Tuple2.of(channel, segmentId)); } // Try to read a buffer from the channel. reusedHeaderBuffer.clear(); int bufferHeaderResult = channel.read(reusedHeaderBuffer); if (bufferHeaderResult == -1) { channel.close(); openedChannelAndSegmentIds.get(partitionId).remove(subpartitionId); return getSingletonReadResult( new NetworkBuffer(memorySegment, recycler, Buffer.DataType.END_OF_SEGMENT)); } reusedHeaderBuffer.flip(); BufferHeader header = parseBufferHeader(reusedHeaderBuffer); int dataBufferResult = channel.read(memorySegment.wrap(0, header.getLength())); if (dataBufferResult != header.getLength()) { channel.close(); throw new IOException("The length of data buffer is illegal."); } Buffer.DataType dataType = header.getDataType(); return getSingletonReadResult( new NetworkBuffer( memorySegment, recycler, dataType, header.isCompressed(), header.getLength())); }
@Test void testReadBuffer() throws IOException { for (int subpartitionId = 0; subpartitionId < DEFAULT_NUM_SUBPARTITION; ++subpartitionId) { for (int segmentId = 0; segmentId < DEFAULT_SEGMENT_NUM; ++segmentId) { for (int bufferIndex = 0; bufferIndex < DEFAULT_BUFFER_PER_SEGMENT; ++bufferIndex) { Buffer buffer = readBuffer( bufferIndex, new TieredStorageSubpartitionId(subpartitionId), segmentId); assertThat(buffer).isNotNull(); buffer.recycleBuffer(); } } } }
@Override protected double maintain() { List<Node> provisionedSnapshot; try { NodeList nodes; // Host and child nodes are written in separate transactions, but both are written while holding the // unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children // of newly provisioned hosts. try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) { nodes = nodeRepository().nodes().list(); } provisionedSnapshot = provision(nodes); } catch (NodeAllocationException | IllegalStateException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage()); return 0; // avoid removing excess hosts } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts", e); return 0; // avoid removing excess hosts } return markForRemoval(provisionedSnapshot); }
@Test public void deprovision_node_when_no_allocation_and_past_ttl() { tester = new DynamicProvisioningTester(); ManualClock clock = (ManualClock) tester.nodeRepository.clock(); tester.hostProvisioner.with(Behaviour.failProvisioning); tester.provisioningTester.makeReadyHosts(2, new NodeResources(1, 1, 1, 1)).activateTenantHosts(); List<Node> hosts = tester.nodeRepository.nodes().list(Node.State.active).asList(); Node host1 = hosts.get(0); Node host2 = hosts.get(1); tester.nodeRepository.nodes().write(host1.withHostTTL(Duration.ofDays(1)), () -> { }); tester.nodeRepository.nodes().write(host2.withHostTTL(Duration.ofHours(1)), () -> { }); Node host11 = tester.addNode("host1-1", Optional.of(host1.hostname()), NodeType.tenant, State.active, DynamicProvisioningTester.tenantApp); // Host is not marked for deprovisioning by maintainer, because child is present tester.maintain(); assertFalse(node(host1.hostname()).get().status().wantToDeprovision()); assertEquals(Optional.empty(), node(host1.hostname()).get().hostEmptyAt()); // Child is set to deprovision, but turns active tester.nodeRepository.nodes().park(host11.hostname(), true, Agent.system, "not good"); tester.nodeRepository.nodes().reactivate(host11.hostname(), Agent.operator, "all good"); assertTrue(node(host11.hostname()).get().status().wantToDeprovision()); assertEquals(State.active, node(host11.hostname()).get().state()); tester.maintain(); assertFalse(node(host1.hostname()).get().status().wantToDeprovision()); assertEquals(Optional.empty(), node(host1.hostname()).get().hostEmptyAt()); // Child is parked, to make the host effectively empty tester.nodeRepository.nodes().park(host11.hostname(), true, Agent.system, "not good"); tester.maintain(); assertFalse(node(host1.hostname()).get().status().wantToDeprovision()); assertEquals(Optional.of(clock.instant().truncatedTo(ChronoUnit.MILLIS)), node(host1.hostname()).get().hostEmptyAt()); // Some time passes, but not enough for host1 to be deprovisioned clock.advance(Duration.ofDays(1).minusSeconds(1)); tester.maintain(); assertFalse(node(host1.hostname()).get().status().wantToDeprovision()); assertEquals(Optional.of(clock.instant().minus(Duration.ofDays(1).minusSeconds(1)).truncatedTo(ChronoUnit.MILLIS)), node(host1.hostname()).get().hostEmptyAt()); assertTrue(node(host2.hostname()).get().status().wantToDeprovision()); assertTrue(node(host2.hostname()).get().status().wantToRetire()); assertEquals(State.active, node(host2.hostname()).get().state()); assertEquals(Optional.of(clock.instant().minus(Duration.ofDays(1).minusSeconds(1)).truncatedTo(ChronoUnit.MILLIS)), node(host2.hostname()).get().hostEmptyAt()); // Some more time passes, but child is reactivated on host1, rendering the host non-empty again clock.advance(Duration.ofDays(1)); tester.nodeRepository.nodes().reactivate(host11.hostname(), Agent.operator, "all good"); tester.maintain(); assertFalse(node(host1.hostname()).get().status().wantToDeprovision()); assertEquals(Optional.empty(), node(host1.hostname()).get().hostEmptyAt()); // Child is removed, and host is marked as empty tester.nodeRepository.database().writeTo(State.deprovisioned, host11, Agent.operator, Optional.empty()); tester.nodeRepository.nodes().forget(node(host11.hostname()).get()); assertEquals(Optional.empty(), node(host11.hostname())); tester.maintain(); assertFalse(node(host1.hostname()).get().status().wantToDeprovision()); assertEquals(Optional.of(clock.instant().truncatedTo(ChronoUnit.MILLIS)), node(host1.hostname()).get().hostEmptyAt()); // Enough time passes for the host to be deprovisioned clock.advance(Duration.ofDays(1)); tester.maintain(); assertTrue(node(host1.hostname()).get().status().wantToDeprovision()); assertTrue(node(host1.hostname()).get().status().wantToRetire()); assertEquals(State.active, node(host1.hostname()).get().state()); assertEquals(Optional.of(clock.instant().minus(Duration.ofDays(1)).truncatedTo(ChronoUnit.MILLIS)), node(host1.hostname()).get().hostEmptyAt()); // Let tenant host app redeploy, retiring the obsolete host. tester.provisioningTester.activateTenantHosts(); clock.advance(Duration.ofHours(1)); new RetiredExpirer(tester.nodeRepository, new MockDeployer(tester.nodeRepository), new NullMetric(), Duration.ofHours(1), Duration.ofHours(1)).maintain(); tester.provisioningTester.activateTenantHosts(); // Hosts move themselves to parked (via ready) once they've synced up their logs to archive and are then deprovisioned tester.nodeRepository.nodes().list(State.dirty).forEach(node -> tester.nodeRepository.nodes().markNodeAvailableForNewAllocation(node.hostname(), Agent.nodeAdmin, "Readied by host-admin")); tester.deprovisioner.maintain(); assertEquals(List.of(), tester.nodeRepository.nodes().list().not().state(State.deprovisioned).asList()); }
@Override public void doRegister(@NonNull ThreadPoolPluginSupport support) { enableThreadPoolPluginRegistrars.values().forEach(registrar -> registrar.doRegister(support)); enableThreadPoolPlugins.values().forEach(support::tryRegister); }
@Test public void testDoRegister() { GlobalThreadPoolPluginManager manager = new DefaultGlobalThreadPoolPluginManager(); manager.enableThreadPoolPlugin(new TestPlugin("1")); manager.enableThreadPoolPlugin(new TestPlugin("2")); manager.enableThreadPoolPluginRegistrar(new TestRegistrar("1")); TestSupport support = new TestSupport("1"); manager.doRegister(support); Assert.assertEquals(3, support.getAllPlugins().size()); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { locks.computeIfAbsent(msg.getOriginator(), SemaphoreWithTbMsgQueue::new) .addToQueueAndTryProcess(msg, ctx, this::processMsgAsync); }
@Test public void test_sqrt_5_to_attribute_and_metadata() { var node = initNode(TbRuleNodeMathFunctionType.SQRT, new TbMathResult(TbMathArgumentType.ATTRIBUTE, "result", 3, false, true, DataConstants.SERVER_SCOPE), new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "a") ); TbMsg msg = TbMsg.newMsg(TbMsgType.POST_TELEMETRY_REQUEST, originator, TbMsgMetaData.EMPTY, JacksonUtil.newObjectNode().put("a", 5).toString()); when(telemetryService.saveAttrAndNotify(any(), any(), any(AttributeScope.class), anyString(), anyDouble())) .thenReturn(Futures.immediateFuture(null)); node.onMsg(ctx, msg); ArgumentCaptor<TbMsg> msgCaptor = ArgumentCaptor.forClass(TbMsg.class); verify(ctx, timeout(TIMEOUT)).tellSuccess(msgCaptor.capture()); verify(telemetryService, times(1)).saveAttrAndNotify(any(), any(), any(AttributeScope.class), anyString(), anyDouble()); TbMsg resultMsg = msgCaptor.getValue(); assertNotNull(resultMsg); assertNotNull(resultMsg.getData()); var result = resultMsg.getMetaData().getValue("result"); assertNotNull(result); assertEquals("2.236", result); }
@Override public boolean acquirePermit(String nsId) { if (contains(nsId)) { return super.acquirePermit(nsId); } return super.acquirePermit(DEFAULT_NS); }
@Test public void testHandlerAllocationPreconfigured() { Configuration conf = createConf(40); conf.setDouble(DFS_ROUTER_FAIR_HANDLER_PROPORTION_KEY_PREFIX + "ns1", 0.5); RouterRpcFairnessPolicyController routerRpcFairnessPolicyController = FederationUtil.newFairnessPolicyController(conf); // ns1 should have 20 permits allocated for (int i=0; i<20; i++) { assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns1")); } // ns2 should have 4 permits. // concurrent should have 4 permits. for (int i=0; i<4; i++) { assertTrue(routerRpcFairnessPolicyController.acquirePermit("ns2")); assertTrue( routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS)); } assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns1")); assertFalse(routerRpcFairnessPolicyController.acquirePermit("ns2")); assertFalse(routerRpcFairnessPolicyController.acquirePermit(CONCURRENT_NS)); }
void badRequest(String s) { setStatus(HttpServletResponse.SC_BAD_REQUEST); String title = "Bad request: "; setTitle((s != null) ? join(title, s) : title); }
@Test public void testBadRequestWithNullMessage() { // It should not throw NullPointerException appController.badRequest(null); verifyExpectations(StringUtils.EMPTY); }
public Optional<GroupDto> findGroup(DbSession dbSession, String groupName) { return dbClient.groupDao().selectByName(dbSession, groupName); }
@Test public void findGroup_whenGroupDoesntExist_returnsEmtpyOptional() { when(dbClient.groupDao().selectByName(dbSession, GROUP_NAME)) .thenReturn(Optional.empty()); assertThat(groupService.findGroup(dbSession, GROUP_NAME)).isEmpty(); }
public static FallbackMethod create(String fallbackMethodName, Method originalMethod, Object[] args, Object original, Object proxy) throws NoSuchMethodException { MethodMeta methodMeta = new MethodMeta( fallbackMethodName, originalMethod.getParameterTypes(), originalMethod.getReturnType(), original.getClass()); Map<Class<?>, Method> methods = FALLBACK_METHODS_CACHE .computeIfAbsent(methodMeta, FallbackMethod::extractMethods); if (!methods.isEmpty()) { return new FallbackMethod(methods, originalMethod.getReturnType(), args, original, proxy); } else { throw new NoSuchMethodException(String.format("%s %s.%s(%s,%s)", methodMeta.returnType, methodMeta.targetClass, methodMeta.fallbackMethodName, StringUtils.arrayToDelimitedString(methodMeta.params, ","), Throwable.class)); } }
@Test public void notFoundFallbackMethod_shouldThrowsNoSuchMethodException() throws Throwable { FallbackMethodTest target = new FallbackMethodTest(); Method testMethod = target.getClass().getMethod("testMethod", String.class); assertThatThrownBy( () -> FallbackMethod.create("noMethod", testMethod, new Object[]{"test"}, target, target)) .isInstanceOf(NoSuchMethodException.class) .hasMessage( "class java.lang.String class io.github.resilience4j.spring6.fallback.FallbackMethodTest.noMethod(class java.lang.String,class java.lang.Throwable)"); }
@Override public SecretsPluginInfo pluginInfoFor(GoPluginDescriptor descriptor) { String pluginId = descriptor.id(); return new SecretsPluginInfo(descriptor, securityConfigSettings(pluginId), image(pluginId)); }
@Test public void shouldBuildPluginInfoWithPluginDescriptor() { GoPluginDescriptor descriptor = GoPluginDescriptor.builder().id("plugin1").build(); SecretsPluginInfo pluginInfo = new SecretsPluginInfoBuilder(extension).pluginInfoFor(descriptor); assertThat(pluginInfo.getDescriptor(), is(descriptor)); }
protected final AnyKeyboardViewBase getMiniKeyboard() { return mMiniKeyboard; }
@Test public void testLongPressWhenNoPrimaryKeyButTextShouldOpenMiniKeyboard() throws Exception { ExternalAnyKeyboard anyKeyboard = new ExternalAnyKeyboard( new DefaultAddOn(getApplicationContext(), getApplicationContext()), getApplicationContext(), keyboard_with_keys_with_no_codes, keyboard_with_keys_with_no_codes, "test", 0, 0, "en", "", "", KEYBOARD_ROW_MODE_NORMAL); anyKeyboard.loadKeyboard(mViewUnderTest.mKeyboardDimens); mViewUnderTest.setKeyboard(anyKeyboard, 0); final AnyKeyboard.AnyKey key = (AnyKeyboard.AnyKey) anyKeyboard.getKeys().get(6); Assert.assertEquals(0, key.getPrimaryCode()); Assert.assertEquals(0, key.getCodesCount()); Assert.assertEquals(R.xml.popup_16keys_wxyz, key.popupResId); Assert.assertEquals("popup", key.label); Assert.assertNull(key.popupCharacters); ViewTestUtils.navigateFromTo(mViewUnderTest, key, key, 1000, true, false); TestRxSchedulers.foregroundAdvanceBy(1); Assert.assertTrue(mViewUnderTest.mMiniKeyboardPopup.isShowing()); AnyKeyboardViewBase miniKeyboard = mViewUnderTest.getMiniKeyboard(); Assert.assertNotNull(miniKeyboard); Assert.assertNotNull(miniKeyboard.getKeyboard()); Assert.assertEquals(6, miniKeyboard.getKeyboard().getKeys().size()); Mockito.verify(mMockKeyboardListener, Mockito.never()) .onKey(anyInt(), any(), anyInt(), any(), anyBoolean()); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testValueEmpty() throws Exception { JmxCollector jc = new JmxCollector( "\n---\nrules:\n- pattern: `.*`\n name: foo\n value:" .replace('`', '"')) .register(prometheusRegistry); assertNull(getSampleValue("foo", new String[] {}, new String[] {})); }
public Object execute(ProceedingJoinPoint proceedingJoinPoint, Method method, String fallbackMethodValue, CheckedSupplier<Object> primaryFunction) throws Throwable { String fallbackMethodName = spelResolver.resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue); FallbackMethod fallbackMethod = null; if (StringUtils.hasLength(fallbackMethodName)) { try { fallbackMethod = FallbackMethod .create(fallbackMethodName, method, proceedingJoinPoint.getArgs(), proceedingJoinPoint.getTarget(), proceedingJoinPoint.getThis()); } catch (NoSuchMethodException ex) { logger.warn("No fallback method match found", ex); } } if (fallbackMethod == null) { return primaryFunction.get(); } else { return fallbackDecorators.decorate(fallbackMethod, primaryFunction).get(); } }
@Test public void testPrimaryMethodExecutionWithFallback() throws Throwable { Method method = this.getClass().getMethod("getName", String.class); final CheckedSupplier<Object> primaryFunction = () -> getName("Name"); final String fallbackMethodValue = "getNameValidFallback"; when(proceedingJoinPoint.getArgs()).thenReturn(new Object[]{}); when(proceedingJoinPoint.getTarget()).thenReturn(this); when(spelResolver.resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue)).thenReturn(fallbackMethodValue); when(fallbackDecorators.decorate(any(),eq(primaryFunction))).thenReturn(primaryFunction); final Object result = fallbackExecutor.execute(proceedingJoinPoint, method, fallbackMethodValue, primaryFunction); assertThat(result).isEqualTo("Name"); verify(spelResolver, times(1)).resolve(method, proceedingJoinPoint.getArgs(), fallbackMethodValue); verify(fallbackDecorators, times(1)).decorate(any(),eq(primaryFunction)); }
static int loadRequiredIntProp( Properties props, String keyName ) { String value = props.getProperty(keyName); if (value == null) { throw new RuntimeException("Failed to find " + keyName); } try { return Integer.parseInt(value); } catch (NumberFormatException e) { throw new RuntimeException("Unable to read " + keyName + " as a base-10 number.", e); } }
@Test public void loadNonIntegerRequiredIntProp() { Properties props = new Properties(); props.setProperty("foo.bar", "b"); assertEquals("Unable to read foo.bar as a base-10 number.", assertThrows(RuntimeException.class, () -> PropertiesUtils.loadRequiredIntProp(props, "foo.bar")). getMessage()); }
@Override public Object plugin(final Object target) { return Plugin.wrap(target, this); }
@Test public void pluginTest() { final PostgreSqlUpdateInterceptor postgreSqlUpdateInterceptor = new PostgreSqlUpdateInterceptor(); Assertions.assertDoesNotThrow(() -> postgreSqlUpdateInterceptor.plugin(new Object())); }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldComputeEquivalenceSetWithoutOverlap() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j2.getLeftSource()).thenReturn(a); when(j2.getRightSource()).thenReturn(c); when(j1.getLeftJoinExpression()).thenReturn(e1); when(j1.getRightJoinExpression()).thenReturn(e2); when(j2.getLeftJoinExpression()).thenReturn(e3); when(j2.getRightJoinExpression()).thenReturn(e4); final List<JoinInfo> joins = ImmutableList.of(j1, j2); // When: final Node root = JoinTree.build(joins); // Then: assertThat(root.joinEquivalenceSet(), containsInAnyOrder(e3, e4)); }
@Operation(summary = "queryWorkflowInstanceById", description = "QUERY_WORKFLOW_INSTANCE_BY_ID") @Parameters({ @Parameter(name = "workflowInstanceId", description = "WORKFLOW_INSTANCE_ID", schema = @Schema(implementation = Integer.class, example = "123456", required = true)) }) @GetMapping(value = "/{workflowInstanceId}") @ResponseStatus(HttpStatus.OK) @ApiException(Status.QUERY_PROCESS_INSTANCE_BY_ID_ERROR) public Result queryWorkflowInstanceById(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @PathVariable("workflowInstanceId") Integer workflowInstanceId) { Map<String, Object> result = processInstanceService.queryProcessInstanceById(loginUser, workflowInstanceId); return returnDataList(result); }
@Test public void testQueryWorkflowInstanceById() { User loginUser = getLoginUser(); Map<String, Object> result = new HashMap<>(); result.put(DATA_LIST, new ProcessInstance()); putMsg(result, Status.SUCCESS); Mockito.when(processInstanceService.queryProcessInstanceById(any(), eq(1))).thenReturn(result); Result result1 = workflowInstanceV2Controller.queryWorkflowInstanceById(loginUser, 1); Assertions.assertTrue(result1.isSuccess()); }
public String toStringNoColon() { final StringBuilder builder = new StringBuilder(); for (final byte b : this.address) { builder.append(String.format("%02X", b & 0xFF)); } return builder.toString(); }
@Test public void testToStringNoColon() throws Exception { assertEquals(MAC_ONOS_STR_NO_COLON, MAC_ONOS.toStringNoColon()); }
public static StructType partitionType(Table table) { Collection<PartitionSpec> specs = table.specs().values(); return buildPartitionProjectionType("table partition", specs, allFieldIds(specs)); }
@Test public void testPartitionTypeWithAddingBackSamePartitionFieldInV2Table() { TestTables.TestTable table = TestTables.create(tableDir, "test", SCHEMA, BY_DATA_SPEC, V2_FORMAT_VERSION); table.updateSpec().removeField("data").commit(); table.updateSpec().addField("data").commit(); // in v2, we should be able to reuse the original partition spec StructType expectedType = StructType.of(NestedField.optional(1000, "data", Types.StringType.get())); StructType actualType = Partitioning.partitionType(table); assertThat(actualType).isEqualTo(expectedType); }
@Udf public Long trunc(@UdfParameter final Long val) { return val; }
@Test public void shouldTruncateSimpleBigDecimalNegative() { assertThat(udf.trunc(new BigDecimal("-1.23")), is(new BigDecimal("-1"))); assertThat(udf.trunc(new BigDecimal("-1.0")), is(new BigDecimal("-1"))); assertThat(udf.trunc(new BigDecimal("-1.5")), is(new BigDecimal("-1"))); assertThat(udf.trunc(new BigDecimal("-1530000")), is(new BigDecimal("-1530000"))); assertThat(udf.trunc(new BigDecimal("-10.1")), is(new BigDecimal("-10"))); assertThat(udf.trunc(new BigDecimal("-12345.5")), is(new BigDecimal("-12345"))); assertThat(udf.trunc(new BigDecimal("-9.99")), is(new BigDecimal("-9"))); assertThat(udf.trunc(new BigDecimal("-110.1")), is(new BigDecimal("-110"))); assertThat(udf.trunc(new BigDecimal("-1530000.01")), is(new BigDecimal("-1530000"))); assertThat(udf.trunc(new BigDecimal("-9999999.99")), is(new BigDecimal("-9999999"))); }
@Override public void handle(ContainersLauncherEvent event) { // TODO: ContainersLauncher launches containers one by one!! Container container = event.getContainer(); ContainerId containerId = container.getContainerId(); switch (event.getType()) { case LAUNCH_CONTAINER: Application app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerLaunch launch = new ContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RELAUNCH_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); ContainerRelaunch relaunch = new ContainerRelaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(relaunch); running.put(containerId, relaunch); break; case RECOVER_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); running.put(containerId, launch); break; case RECOVER_PAUSED_CONTAINER: app = context.getApplications().get( containerId.getApplicationAttemptId().getApplicationId()); launch = new RecoverPausedContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager); containerLauncher.submit(launch); break; case CLEANUP_CONTAINER: cleanup(event, containerId, true); break; case CLEANUP_CONTAINER_FOR_REINIT: cleanup(event, containerId, false); break; case SIGNAL_CONTAINER: SignalContainersLauncherEvent signalEvent = (SignalContainersLauncherEvent) event; ContainerLaunch runningContainer = running.get(containerId); if (runningContainer == null) { // Container not launched. So nothing needs to be done. LOG.info("Container " + containerId + " not running, nothing to signal."); return; } try { runningContainer.signalContainer(signalEvent.getCommand()); } catch (IOException e) { LOG.warn("Got exception while signaling container " + containerId + " with command " + signalEvent.getCommand()); } break; case PAUSE_CONTAINER: ContainerLaunch launchedContainer = running.get(containerId); if (launchedContainer == null) { // Container not launched. So nothing needs to be done. return; } // Pause the container try { launchedContainer.pauseContainer(); } catch (Exception e) { LOG.info("Got exception while pausing container: " + StringUtils.stringifyException(e)); } break; case RESUME_CONTAINER: ContainerLaunch launchCont = running.get(containerId); if (launchCont == null) { // Container not launched. So nothing needs to be done. return; } // Resume the container. try { launchCont.resumeContainer(); } catch (Exception e) { LOG.info("Got exception while resuming container: " + StringUtils.stringifyException(e)); } break; } }
@SuppressWarnings("unchecked") @Test public void testRelaunchContainerEvent() throws IllegalArgumentException { Map<ContainerId, ContainerLaunch> dummyMap = spy.running; when(event.getType()) .thenReturn(ContainersLauncherEventType.RELAUNCH_CONTAINER); assertEquals(0, dummyMap.size()); spy.handle(event); assertEquals(1, dummyMap.size()); Mockito.verify(containerLauncher, Mockito.times(1)) .submit(Mockito.any(ContainerRelaunch.class)); for (ContainerId cid : dummyMap.keySet()) { Object o = dummyMap.get(cid); assertEquals(true, (o instanceof ContainerRelaunch)); } }
public void inviteDirectly(EntityBareJid address) throws NotConnectedException, InterruptedException { inviteDirectly(address, null, null, false, null); }
@Test public void testInviteDirectly() throws Throwable { EntityBareJid roomJid = JidCreate.entityBareFrom("room@example.com"); EntityBareJid userJid = JidCreate.entityBareFrom("user@example.com"); AtomicBoolean updateRequestSent = new AtomicBoolean(); InvokeDirectlyResponder serverSimulator = new InvokeDirectlyResponder() { @Override void verifyRequest(Message updateRequest) { assertEquals(userJid, updateRequest.getTo(), "The provided JID doesn't match the request!"); GroupChatInvitation groupChatInvitation = (GroupChatInvitation) updateRequest.getExtension(GroupChatInvitation.NAMESPACE); assertNotNull(groupChatInvitation, "Missing GroupChatInvitation extension"); assertEquals(roomJid, groupChatInvitation.getRoomAddress()); assertNull(groupChatInvitation.getReason()); assertNull(groupChatInvitation.getPassword()); assertFalse(groupChatInvitation.continueAsOneToOneChat()); assertNull(groupChatInvitation.getThread()); updateRequestSent.set(true); } }; serverSimulator.start(); // Create multi user chat MultiUserChat multiUserChat = multiUserChatManager.getMultiUserChat(roomJid); // Call tested method multiUserChat.inviteDirectly(userJid); // Wait for processing requests serverSimulator.join(RESPONSE_TIMEOUT_IN_MILLIS); // Check if an error occurred within the simulator final Throwable exception = serverSimulator.getException(); if (exception != null) { throw exception; } assertTrue(updateRequestSent.get(), "Invite directly request not sent"); }
public static KiePMMLDroolsAST getKiePMMLDroolsAST(final List<Field<?>> fields, final TreeModel model, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap, final List<KiePMMLDroolsType> types) { logger.trace("getKiePMMLDroolsAST {} {}", fields, model); DATA_TYPE targetType = getTargetFieldType(fields, model); List<OutputField> outputFields = model.getOutput() != null ? model.getOutput().getOutputFields() : Collections.emptyList(); List<KiePMMLDroolsRule> rules = KiePMMLTreeModelNodeASTFactory.factory(fieldTypeMap, outputFields, model.getNoTrueChildStrategy(), targetType).declareRulesFromRootNode(model.getNode(), ""); return new KiePMMLDroolsAST(types, rules); }
@Test void getKiePMMLDroolsIrisAST() { final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = getFieldTypeMap(irisPmml.getDataDictionary(), irisPmml.getTransformationDictionary(), irisModel.getLocalTransformations()); List<KiePMMLDroolsType> types = Collections.emptyList(); KiePMMLDroolsAST retrieved = KiePMMLTreeModelASTFactory.getKiePMMLDroolsAST(getFieldsFromDataDictionary(irisPmml.getDataDictionary()), irisModel, fieldTypeMap, types); assertThat(retrieved).isNotNull(); assertThat(retrieved.getTypes()).isEqualTo(types); assertThat(retrieved.getRules()).isNotEmpty(); }
@Override public Reiterator<Object> get(int tag) { return new SubIterator(tag); }
@Test public void testPartialIteration() { TaggedReiteratorList iter = create(6, new String[] {"a", "b", "c"}); Iterator<?> get0 = iter.get(0); Iterator<?> get1 = iter.get(1); Iterator<?> get3 = iter.get(3); assertEquals(asList(get0, 1), "a0"); assertEquals(asList(get1, 2), "a1", "b1"); assertEquals(asList(get3, 3), "a3", "b3", "c3"); Iterator<?> get2 = iter.get(2); Iterator<?> get0Again = iter.get(0); assertEquals(asList(get0, 1), "b0"); assertEquals(get2, "a2", "b2", "c2"); assertEquals(get0Again, "a0", "b0", "c0"); assertEquals(asList(get0), "c0"); Iterator<?> get4 = iter.get(4); assertEquals(get4, "a4", "b4", "c4"); assertEquals(get4 /*empty*/); assertEquals(iter.get(4), "a4", "b4", "c4"); }
@Override public void transform(Message message, DataType fromType, DataType toType) { final Optional<ValueRange> valueRange = getValueRangeBody(message); String range = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "range", "A:A").toString(); String majorDimension = message .getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "majorDimension", RangeCoordinate.DIMENSION_ROWS).toString(); String spreadsheetId = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "spreadsheetId", "").toString(); String[] columnNames = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "columnNames", "A").toString().split(","); boolean splitResults = Boolean .parseBoolean(message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "splitResults", "false").toString()); if (valueRange.isPresent()) { message.setBody( transformFromValueRangeModel(message, valueRange.get(), spreadsheetId, range, majorDimension, columnNames)); } else if (splitResults) { message.setBody(transformFromSplitValuesModel(message, spreadsheetId, range, majorDimension, columnNames)); } else { String valueInputOption = message.getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "valueInputOption", "USER_ENTERED").toString(); message.setBody( transformToValueRangeModel(message, spreadsheetId, range, majorDimension, valueInputOption, columnNames)); } }
@Test public void testTransformToValueRangeMultipleColumns() throws Exception { Exchange inbound = new DefaultExchange(camelContext); inbound.getMessage().setHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "range", "A1:B2"); inbound.getMessage().setHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "majorDimension", RangeCoordinate.DIMENSION_COLUMNS); List<String> model = Arrays.asList("{" + "\"spreadsheetId\": \"" + spreadsheetId + "\"," + "\"#1\": \"a1\"," + "\"#2\": \"a2\"" + "}", "{" + "\"spreadsheetId\": \"" + spreadsheetId + "\"," + "\"#1\": \"b1\"," + "\"#2\": \"b2\"" + "}"); inbound.getMessage().setBody(model); transformer.transform(inbound.getMessage(), DataType.ANY, DataType.ANY); Assertions.assertEquals("A1:B2", inbound.getMessage().getHeader(GoogleSheetsStreamConstants.RANGE)); Assertions.assertEquals(RangeCoordinate.DIMENSION_COLUMNS, inbound.getMessage().getHeader(GoogleSheetsStreamConstants.MAJOR_DIMENSION)); Assertions.assertEquals("USER_ENTERED", inbound.getMessage().getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "valueInputOption")); ValueRange valueRange = (ValueRange) inbound.getMessage().getHeader(GoogleSheetsConstants.PROPERTY_PREFIX + "values"); Assertions.assertEquals(2L, valueRange.getValues().size()); Assertions.assertEquals(2L, valueRange.getValues().get(0).size()); Assertions.assertEquals("a1", valueRange.getValues().get(0).get(0)); Assertions.assertEquals("a2", valueRange.getValues().get(0).get(1)); Assertions.assertEquals(2L, valueRange.getValues().get(1).size()); Assertions.assertEquals("b1", valueRange.getValues().get(1).get(0)); Assertions.assertEquals("b2", valueRange.getValues().get(1).get(1)); }
EventLoopGroup getSharedOrCreateEventLoopGroup(EventLoopGroup eventLoopGroupShared) { if (eventLoopGroupShared != null) { return eventLoopGroupShared; } return this.eventLoopGroup = new NioEventLoopGroup(); }
@Test public void givenNull_whenGetEventLoop_ThenReturnShared() { eventLoop = client.getSharedOrCreateEventLoopGroup(null); assertThat(eventLoop, instanceOf(NioEventLoopGroup.class)); }
public static NetworkMultiplexer dedicated(Network net) { return new NetworkMultiplexer(net, false); }
@Test void testDedicated() { MockNetwork net = new MockNetwork(); MockOwner owner = new MockOwner(); NetworkMultiplexer dedicated = NetworkMultiplexer.dedicated(net); assertEquals(Set.of(dedicated), net.attached); assertEquals(Set.of(), net.registered); assertFalse(net.shutDown.get()); dedicated.attach(owner); dedicated.detach(owner); assertTrue(net.shutDown.get()); }
public RuntimeOptionsBuilder parse(String... args) { return parse(Arrays.asList(args)); }
@Test void ensure_order_type_reverse_is_used() { RuntimeOptions options = parser .parse("--order", "reverse") .build(); Pickle a = createPickle("file:path/file1.feature", "a"); Pickle b = createPickle("file:path/file2.feature", "b"); assertThat(options.getPickleOrder() .orderPickles(Arrays.asList(a, b)), contains(b, a)); }
@Override public boolean equals(Object o) { if (this == o) { return true; } if (o == null || getClass() != o.getClass()) { return false; } PiOptionalFieldMatch that = (PiOptionalFieldMatch) o; return Objects.equal(this.fieldId(), that.fieldId()) && Objects.equal(value, that.value); }
@Test public void testEquals() { new EqualsTester() .addEqualityGroup(piOptionalFieldMatch1, sameAsPiOptionalFieldMatch1) .addEqualityGroup(piOptionalFieldMatch2) .testEquals(); }
Bootstrap getBootstrap() { return bootstrap; }
@Test void testSetKeepaliveOptionWithNioNotConfigurable() throws Exception { assumeThat(keepaliveForNioConfigurable()).isFalse(); final Configuration config = new Configuration(); config.set(NettyShuffleEnvironmentOptions.TRANSPORT_TYPE, "nio"); config.set(NettyShuffleEnvironmentOptions.CLIENT_TCP_KEEP_IDLE_SECONDS, 300); config.set(NettyShuffleEnvironmentOptions.CLIENT_TCP_KEEP_INTERVAL_SECONDS, 10); config.set(NettyShuffleEnvironmentOptions.CLIENT_TCP_KEEP_COUNT, 8); try (NetUtils.Port clientPort = NetUtils.getAvailablePort()) { final NettyClient client = createNettyClient(config, clientPort); Map<String, Object> options = client.getBootstrap().config().options().entrySet().stream() .collect(Collectors.toMap(e -> e.getKey().name(), Map.Entry::getValue)); assertThat(options) .doesNotContainKeys( NettyClient.NIO_TCP_KEEPIDLE_KEY, NettyClient.NIO_TCP_KEEPINTERVAL_KEY, NettyClient.NIO_TCP_KEEPCOUNT_KEY); } }
public BigInteger getD() { return d; }
@Test public void shouldConvertPrivateFactor() { assertEquals(D, new EcPrivateKey(new ECPrivateKeyParameters(D, PARAMS)).getD()); }
protected static DataSource getDataSourceFromJndi( String dsName, Context ctx ) throws NamingException { if ( Utils.isEmpty( dsName ) ) { throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", String.valueOf( dsName ) ) ); } Object foundDs = FoundDS.get( dsName ); if ( foundDs != null ) { return (DataSource) foundDs; } Object lkup = null; DataSource rtn = null; NamingException firstNe = null; // First, try what they ask for... try { lkup = ctx.lookup( dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { firstNe = ignored; } try { // Needed this for Jboss lkup = ctx.lookup( "java:" + dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { // ignore } try { // Tomcat lkup = ctx.lookup( "java:comp/env/jdbc/" + dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { // ignore } try { // Others? lkup = ctx.lookup( "jdbc/" + dsName ); if ( lkup instanceof DataSource ) { rtn = (DataSource) lkup; FoundDS.put( dsName, rtn ); return rtn; } } catch ( NamingException ignored ) { // ignore } if ( firstNe != null ) { throw firstNe; } throw new NamingException( BaseMessages.getString( PKG, "DatabaseUtil.DSNotFound", dsName ) ); }
@Test( expected = NamingException.class ) public void testNullName() throws NamingException { DatabaseUtil.getDataSourceFromJndi( null, context ); }
public void expand(String key, long value, RangeHandler rangeHandler, EdgeHandler edgeHandler) { if (value < lowerBound || value > upperBound) { // Value outside bounds -> expand to nothing. return; } int maxLevels = value > 0 ? maxPositiveLevels : maxNegativeLevels; int sign = value > 0 ? 1 : -1; // Append key to feature string builder StringBuilder builder = new StringBuilder(128); builder.append(key).append('='); long levelSize = arity; long edgeInterval = (value / arity) * arity; edgeHandler.handleEdge(createEdgeFeatureHash(builder, edgeInterval), (int) Math.abs(value - edgeInterval)); for (int i = 0; i < maxLevels; ++i) { long start = (value / levelSize) * levelSize; if (Math.abs(start) + levelSize - 1 < 0) { // overflow break; } rangeHandler.handleRange(createRangeFeatureHash(builder, start, start + sign * (levelSize - 1))); levelSize *= arity; if (levelSize <= 0 && levelSize != Long.MIN_VALUE) { //overflow break; } } }
@Test void requireThatLargeRangeIsExpanded() { PredicateRangeTermExpander expander = new PredicateRangeTermExpander(10); Iterator<String> expectedLabels = List.of( "key=123456789012345670-123456789012345679", "key=123456789012345600-123456789012345699", "key=123456789012345000-123456789012345999", "key=123456789012340000-123456789012349999", "key=123456789012300000-123456789012399999", "key=123456789012000000-123456789012999999", "key=123456789010000000-123456789019999999", "key=123456789000000000-123456789099999999", "key=123456789000000000-123456789999999999", "key=123456780000000000-123456789999999999", "key=123456700000000000-123456799999999999", "key=123456000000000000-123456999999999999", "key=123450000000000000-123459999999999999", "key=123400000000000000-123499999999999999", "key=123000000000000000-123999999999999999", "key=120000000000000000-129999999999999999", "key=100000000000000000-199999999999999999", "key=0-999999999999999999").iterator(); expander.expand("key", 123456789012345678L, range -> assertEquals(PredicateHash.hash64(expectedLabels.next()), range), (edge, value) -> { assertEquals(PredicateHash.hash64("key=123456789012345670"), edge); assertEquals(8, value); }); assertFalse(expectedLabels.hasNext()); }
public static Stream<ItemSet> apply(FPTree tree) { FPGrowth growth = new FPGrowth(tree); return StreamSupport.stream(growth.spliterator(), false); }
@Test public void testSinglePath() { System.out.println("single path"); FPTree tree = FPTree.of(1, itemsets2); assertEquals(15, FPGrowth.apply(tree).count()); }
@Override public Num calculate(BarSeries series, Position position) { if (position.isClosed()) { final int exitIndex = position.getExit().getIndex(); final int entryIndex = position.getEntry().getIndex(); return series.numOf(exitIndex - entryIndex + 1); } return series.zero(); }
@Test public void calculateWithTwoPositions() { MockBarSeries series = new MockBarSeries(numFunction, 100, 105, 110, 100, 95, 105); TradingRecord tradingRecord = new BaseTradingRecord(Trade.buyAt(0, series), Trade.sellAt(2, series), Trade.buyAt(3, series), Trade.sellAt(5, series)); AnalysisCriterion numberOfBars = getCriterion(); assertNumEquals(6, numberOfBars.calculate(series, tradingRecord)); }
public static DateTimeFormatter createDateTimeFormatter(String format, Mode mode) { DateTimeFormatterBuilder builder = new DateTimeFormatterBuilder(); boolean formatContainsHourOfAMPM = false; for (Token token : tokenize(format)) { switch (token.getType()) { case DateFormat.TEXT: builder.appendLiteral(token.getText()); break; case DateFormat.DD: builder.appendValue(DAY_OF_MONTH, mode.getMinTwoPositionFieldWidth(), 2, NOT_NEGATIVE); break; case DateFormat.HH24: builder.appendValue(HOUR_OF_DAY, mode.getMinTwoPositionFieldWidth(), 2, NOT_NEGATIVE); break; case DateFormat.HH: builder.appendValue(HOUR_OF_AMPM, mode.getMinTwoPositionFieldWidth(), 2, NOT_NEGATIVE); formatContainsHourOfAMPM = true; break; case DateFormat.MI: builder.appendValue(MINUTE_OF_HOUR, mode.getMinTwoPositionFieldWidth(), 2, NOT_NEGATIVE); break; case DateFormat.MM: builder.appendValue(MONTH_OF_YEAR, mode.getMinTwoPositionFieldWidth(), 2, NOT_NEGATIVE); break; case DateFormat.SS: builder.appendValue(SECOND_OF_MINUTE, mode.getMinTwoPositionFieldWidth(), 2, NOT_NEGATIVE); break; case DateFormat.YY: builder.appendValueReduced(YEAR, 2, 2, 2000); break; case DateFormat.YYYY: builder.appendValue(YEAR, 4); break; case DateFormat.UNRECOGNIZED: default: throw new PrestoException( StandardErrorCode.INVALID_FUNCTION_ARGUMENT, String.format("Failed to tokenize string [%s] at offset [%d]", token.getText(), token.getCharPositionInLine())); } } try { // Append default values(0) for time fields(HH24, HH, MI, SS) because JSR-310 does not accept bare Date value as DateTime if (formatContainsHourOfAMPM) { // At the moment format does not allow to include AM/PM token, thus it was never possible to specify PM hours using 'HH' token in format // Keep existing behaviour by defaulting to 0(AM) for AMPM_OF_DAY if format string contains 'HH' builder.parseDefaulting(HOUR_OF_AMPM, 0) .parseDefaulting(AMPM_OF_DAY, 0); } else { builder.parseDefaulting(HOUR_OF_DAY, 0); } return builder.parseDefaulting(MINUTE_OF_HOUR, 0) .parseDefaulting(SECOND_OF_MINUTE, 0) .toFormatter(); } catch (UnsupportedOperationException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, e); } }
@Test(expectedExceptions = PrestoException.class) public void testInvalidTokenCreate1() { DateFormatParser.createDateTimeFormatter("ala", FORMATTER); }
@Override public RemotingCommand processRequest(final ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { return this.processRequest(ctx.channel(), request, true); }
@Test public void testProcessRequest() throws RemotingCommandException { RemotingCommand request = createPeekMessageRequest("group","topic",0); GetMessageResult getMessageResult = new GetMessageResult(); getMessageResult.setStatus(GetMessageStatus.FOUND); ByteBuffer bb = ByteBuffer.allocate(64); bb.putLong(MessageDecoder.MESSAGE_STORE_TIMESTAMP_POSITION, System.currentTimeMillis()); SelectMappedBufferResult mappedBufferResult1 = new SelectMappedBufferResult(0, bb, 64, null); for (int i = 0; i < 10;i++) { getMessageResult.addMessage(mappedBufferResult1); } when(messageStore.getMessage(anyString(),anyString(),anyInt(),anyLong(),anyInt(),any())).thenReturn(getMessageResult); RemotingCommand response = peekMessageProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); }
public ManagedProcess launch(AbstractCommand command) { EsInstallation esInstallation = command.getEsInstallation(); if (esInstallation != null) { cleanupOutdatedEsData(esInstallation); writeConfFiles(esInstallation); } Process process; if (command instanceof JavaCommand<?> javaCommand) { process = launchJava(javaCommand); } else { throw new IllegalStateException("Unexpected type of command: " + command.getClass()); } ProcessId processId = command.getProcessId(); try { if (processId == ProcessId.ELASTICSEARCH) { checkArgument(esInstallation != null, "Incorrect configuration EsInstallation is null"); EsConnectorImpl esConnector = new EsConnectorImpl(singleton(HostAndPort.fromParts(esInstallation.getHost(), esInstallation.getHttpPort())), esInstallation.getBootstrapPassword(), esInstallation.getHttpKeyStoreLocation(), esInstallation.getHttpKeyStorePassword().orElse(null)); return new EsManagedProcess(process, processId, esConnector); } else { ProcessCommands commands = allProcessesCommands.createAfterClean(processId.getIpcIndex()); return new ProcessCommandsManagedProcess(process, processId, commands); } } catch (Exception e) { // just in case if (process != null) { process.destroyForcibly(); } throw new IllegalStateException(format("Fail to launch monitor of process [%s]", processId.getHumanReadableName()), e); } }
@Test public void temporary_properties_file_can_be_avoided() throws Exception { File tempDir = temp.newFolder(); TestProcessBuilder processBuilder = new TestProcessBuilder(); ProcessLauncher underTest = new ProcessLauncherImpl(tempDir, commands, () -> processBuilder); JavaCommand<JvmOptions<?>> command = new JavaCommand<>(ProcessId.WEB_SERVER, temp.newFolder()); command.setReadsArgumentsFromFile(false); command.setArgument("foo", "bar"); command.setArgument("baz", "woo"); command.setJvmOptions(new JvmOptions<>()); underTest.launch(command); String propsFilePath = processBuilder.commands.get(processBuilder.commands.size() - 1); File file = new File(propsFilePath); assertThat(file).doesNotExist(); }
protected Object[] copyOrCloneArrayFromLoadFile( Object[] outputRowData, Object[] readrow ) { // if readrow array is shorter than outputRowData reserved space, then we can not clone it because we have to // preserve the outputRowData reserved space. Clone, creates a new array with a new length, equals to the // readRow length and with that set we lost our outputRowData reserved space - needed for future additions. // The equals case works in both clauses, but arraycopy is up to 5 times faster for smaller arrays. if ( readrow.length <= outputRowData.length ) { System.arraycopy( readrow, 0, outputRowData, 0, readrow.length ); } else { // if readrow array is longer than outputRowData reserved space, then we can only clone it. // Copy does not work here and will return an error since we are trying to copy a bigger array into a shorter one. outputRowData = readrow.clone(); } return outputRowData; }
@Test public void testCopyOrCloneArrayFromLoadFileWithBiggerSizedReadRowArray() { int size = 5; Object[] rowData = new Object[ size ]; Object[] readrow = new Object[ size + 1 ]; LoadFileInput loadFileInput = mock( LoadFileInput.class ); Mockito.when( loadFileInput.copyOrCloneArrayFromLoadFile( rowData, readrow ) ).thenCallRealMethod(); assertEquals( 6, loadFileInput.copyOrCloneArrayFromLoadFile( rowData, readrow ).length ); }
@Override public void add(Integer value) { this.min = Math.min(this.min, value); }
@Test void testAdd() { IntMinimum min = new IntMinimum(); min.add(1234); min.add(9876); min.add(-987); min.add(-123); assertThat(min.getLocalValue().intValue()).isEqualTo(-987); }
@Override public synchronized Response handle(Request req) { // note the [synchronized] if (corsEnabled && "OPTIONS".equals(req.getMethod())) { Response response = new Response(200); response.setHeader("Allow", ALLOWED_METHODS); response.setHeader("Access-Control-Allow-Origin", "*"); response.setHeader("Access-Control-Allow-Methods", ALLOWED_METHODS); List<String> requestHeaders = req.getHeaderValues("Access-Control-Request-Headers"); if (requestHeaders != null) { response.setHeader("Access-Control-Allow-Headers", requestHeaders); } return response; } if (prefix != null && req.getPath().startsWith(prefix)) { req.setPath(req.getPath().substring(prefix.length())); } // rare case when http-client is active within same jvm // snapshot existing thread-local to restore ScenarioEngine prevEngine = ScenarioEngine.get(); for (Map.Entry<Feature, ScenarioRuntime> entry : scenarioRuntimes.entrySet()) { Feature feature = entry.getKey(); ScenarioRuntime runtime = entry.getValue(); // important for graal to work properly Thread.currentThread().setContextClassLoader(runtime.featureRuntime.suite.classLoader); LOCAL_REQUEST.set(req); req.processBody(); ScenarioEngine engine = initEngine(runtime, globals, req); for (FeatureSection fs : feature.getSections()) { if (fs.isOutline()) { runtime.logger.warn("skipping scenario outline - {}:{}", feature, fs.getScenarioOutline().getLine()); break; } Scenario scenario = fs.getScenario(); if (isMatchingScenario(scenario, engine)) { Map<String, Object> configureHeaders; Variable response, responseStatus, responseHeaders, responseDelay; ScenarioActions actions = new ScenarioActions(engine); Result result = executeScenarioSteps(feature, runtime, scenario, actions); engine.mockAfterScenario(); configureHeaders = engine.mockConfigureHeaders(); response = engine.vars.remove(ScenarioEngine.RESPONSE); responseStatus = engine.vars.remove(ScenarioEngine.RESPONSE_STATUS); responseHeaders = engine.vars.remove(ScenarioEngine.RESPONSE_HEADERS); responseDelay = engine.vars.remove(RESPONSE_DELAY); globals.putAll(engine.shallowCloneVariables()); Response res = new Response(200); if (result.isFailed()) { response = new Variable(result.getError().getMessage()); responseStatus = new Variable(500); } else { if (corsEnabled) { res.setHeader("Access-Control-Allow-Origin", "*"); } res.setHeaders(configureHeaders); if (responseHeaders != null && responseHeaders.isMap()) { res.setHeaders(responseHeaders.getValue()); } if (responseDelay != null) { res.setDelay(responseDelay.getAsInt()); } } if (response != null && !response.isNull()) { res.setBody(response.getAsByteArray()); if (res.getContentType() == null) { ResourceType rt = ResourceType.fromObject(response.getValue()); if (rt != null) { res.setContentType(rt.contentType); } } } if (responseStatus != null) { res.setStatus(responseStatus.getAsInt()); } if (prevEngine != null) { ScenarioEngine.set(prevEngine); } if (mockInterceptor != null) { mockInterceptor.intercept(req, res, scenario); } return res; } } } logger.warn("no scenarios matched, returning 404: {}", req); // NOTE: not logging with engine.logger if (prevEngine != null) { ScenarioEngine.set(prevEngine); } return new Response(404); }
@Test void testGraalJavaClassLoading() { background().scenario( "pathMatches('/hello')", "def Utils = Java.type('com.intuit.karate.core.MockUtils')", "def response = Utils.testBytes" ); request.path("/hello"); handle(); match(response.getBody(), MockUtils.testBytes); }
@JsonProperty public String getId() { return message.getId(); }
@Test public void testGetId() throws Exception { assertEquals(message.getId(), messageSummary.getId()); }