focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public ImmutableList<PluginDefinition> getAllPlugins() { try { if (checkHealthWithBackoffs()) { logger.atInfo().log("Getting language server plugins..."); var listPluginsResponse = service .listPluginsWithDeadline(ListPluginsRequest.getDefaultInstance(), DEFAULT_DEADLINE) .get(); // Note: each plugin service client has a dedicated RemoteVulnDetectorImpl instance, // so we can safely set this flag here. this.wantCompactRunRequest = listPluginsResponse.getWantCompactRunRequest(); return ImmutableList.copyOf(listPluginsResponse.getPluginsList()); } else { return ImmutableList.of(); } } catch (InterruptedException | ExecutionException e) { throw new LanguageServerException("Failed to get response from language server.", e); } }
@Test public void getAllPlugins_withServingServer_returnsSuccessfulList() throws Exception { registerHealthCheckWithStatus(ServingStatus.SERVING); var plugin = createSinglePluginDefinitionWithName("test"); RemoteVulnDetector pluginToTest = getNewRemoteVulnDetectorInstance(); serviceRegistry.addService( new PluginServiceImplBase() { @Override public void listPlugins( ListPluginsRequest request, StreamObserver<ListPluginsResponse> responseObserver) { responseObserver.onNext(ListPluginsResponse.newBuilder().addPlugins(plugin).build()); responseObserver.onCompleted(); } }); assertThat(pluginToTest.getAllPlugins()).containsExactly(plugin); }
public static <K, V> RedistributeByKey<K, V> byKey() { return new RedistributeByKey<>(false); }
@Test @Category(ValidatesRunner.class) public void testJustRedistribute() { PCollection<KV<String, Integer>> input = pipeline.apply( Create.of(ARBITRARY_KVS).withCoder(KvCoder.of(StringUtf8Coder.of(), VarIntCoder.of()))); PCollection<KV<String, Integer>> output = input.apply(Redistribute.byKey()); PAssert.that(output).containsInAnyOrder(ARBITRARY_KVS); assertEquals(input.getWindowingStrategy(), output.getWindowingStrategy()); pipeline.run(); }
@Override public int compareTo(@NotNull Evidence o) { return new CompareToBuilder() .append(this.source == null ? null : this.source.toLowerCase(), o.source == null ? null : o.source.toLowerCase()) .append(this.name == null ? null : this.name.toLowerCase(), o.name == null ? null : o.name.toLowerCase()) .append(this.value == null ? null : this.value.toLowerCase(), o.value == null ? null : o.value.toLowerCase()) .append(this.confidence, o.getConfidence()) .append(this.fromHint, o.isFromHint()) .toComparison(); }
@Test public void testCompareTo() { Evidence that0 = new Evidence("file", "name", "guice-3.0", Confidence.HIGHEST); Evidence that1 = new Evidence("jar", "package name", "dependency", Confidence.HIGHEST); Evidence that2 = new Evidence("jar", "package name", "google", Confidence.HIGHEST); Evidence that3 = new Evidence("jar", "package name", "guice", Confidence.HIGHEST); Evidence that4 = new Evidence("jar", "package name", "inject", Confidence.HIGHEST); Evidence that5 = new Evidence("jar", "package name", "inject", Confidence.LOW); Evidence that6 = new Evidence("jar", "package name", "internal", Confidence.LOW); Evidence that7 = new Evidence("manifest", "Bundle-Description", "Guice is a lightweight dependency injection framework for Java 5 and above", Confidence.MEDIUM); Evidence that8 = new Evidence("Manifest", "Implementation-Title", "Spring Framework", Confidence.HIGH); Evidence that9 = new Evidence("manifest", "implementation-title", "zippy", Confidence.HIGH); Evidence instance = new Evidence("Manifest", "Implementation-Title", "Spring Framework", Confidence.HIGH); int result = instance.compareTo(that0); assertTrue(result > 0); result = instance.compareTo(that1); assertTrue(result > 0); result = instance.compareTo(that2); assertTrue(result > 0); result = instance.compareTo(that3); assertTrue(result > 0); result = instance.compareTo(that4); assertTrue(result > 0); result = instance.compareTo(that5); assertTrue(result > 0); result = instance.compareTo(that6); assertTrue(result > 0); result = instance.compareTo(that7); assertTrue(result > 0); result = instance.compareTo(that8); assertTrue(result == 0); result = instance.compareTo(that9); assertTrue(result < 0); }
public HttpRequest inject(HttpRequest request) { Map<String, List<String>> map = new HashMap<>(); if (manager.activeSpan() == null) { return request; } map.put(ApolloAuditConstants.TRACE_ID, Collections.singletonList(manager.activeSpan().traceId())); map.put(ApolloAuditConstants.SPAN_ID, Collections.singletonList(manager.activeSpan().spanId())); map.put(ApolloAuditConstants.OPERATOR, Collections.singletonList(manager.activeSpan().operator())); map.put(ApolloAuditConstants.PARENT_ID, Collections.singletonList(manager.activeSpan().parentId())); map.put(ApolloAuditConstants.FOLLOWS_FROM_ID, Collections.singletonList(manager.activeSpan().followsFromId())); HttpHeaders headers = request.getHeaders(); headers.putAll(map); return request; }
@Test public void testInjectCaseActiveSpanIsNull() { HttpRequest mockRequest = Mockito.mock(HttpRequest.class); { Mockito.when(manager.activeSpan()).thenReturn(null); Mockito.when(mockRequest.getHeaders()).thenReturn(new HttpHeaders()); } HttpRequest injected = tracer.inject(mockRequest); HttpHeaders headers = injected.getHeaders(); assertNull(headers.get(ApolloAuditConstants.TRACE_ID)); assertNull(headers.get(ApolloAuditConstants.SPAN_ID)); assertNull(headers.get(ApolloAuditConstants.OPERATOR)); assertNull(headers.get(ApolloAuditConstants.PARENT_ID)); assertNull(headers.get(ApolloAuditConstants.FOLLOWS_FROM_ID)); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void sessionWindowedCogroupedZeroArgCountWithTopologyConfigShouldPreserveTopologyStructure() { // override the default store into in-memory final StreamsBuilder builder = new StreamsBuilder(overrideDefaultStore(StreamsConfig.IN_MEMORY)); builder.stream("input-topic") .groupByKey() .cogroup((key, value, aggregate) -> value) .windowedBy(SessionWindows.ofInactivityGapWithNoGrace(ofMillis(1))) .aggregate(() -> "", (aggKey, aggOne, aggTwo) -> ""); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topology: my-topology:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> COGROUPKSTREAM-AGGREGATE-0000000002\n" + " Processor: COGROUPKSTREAM-AGGREGATE-0000000002 (stores: [COGROUPKSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> COGROUPKSTREAM-MERGE-0000000003\n" + " <-- KSTREAM-SOURCE-0000000000\n" + " Processor: COGROUPKSTREAM-MERGE-0000000003 (stores: [])\n" + " --> none\n" + " <-- COGROUPKSTREAM-AGGREGATE-0000000002\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(false)); }
static <T extends Comparable<? super T>> int compareListWithFillValue( List<T> left, List<T> right, T fillValue) { int longest = Math.max(left.size(), right.size()); for (int i = 0; i < longest; i++) { T leftElement = fillValue; T rightElement = fillValue; if (i < left.size()) { leftElement = left.get(i); } if (i < right.size()) { rightElement = right.get(i); } int compareResult = leftElement.compareTo(rightElement); if (compareResult != 0) { return compareResult; } } return 0; }
@Test public void compareWithFillValue_nonEmptyListVariedSizeWithPositiveFillValue_returnsPositive() { assertThat( ComparisonUtility.compareListWithFillValue( Lists.newArrayList(1, 2), Lists.newArrayList(1, 2, 3), 100)) .isGreaterThan(0); }
@SuppressWarnings("MethodLength") public void onFragment(final DirectBuffer buffer, final int offset, final int length, final Header header) { messageHeaderDecoder.wrap(buffer, offset); final int templateId = messageHeaderDecoder.templateId(); final int schemaId = messageHeaderDecoder.schemaId(); if (schemaId != MessageHeaderDecoder.SCHEMA_ID) { if (listenerExtension != null) { listenerExtension.onExtensionMessage( messageHeaderDecoder.blockLength(), templateId, schemaId, messageHeaderDecoder.version(), buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, length - MessageHeaderDecoder.ENCODED_LENGTH); return; } throw new ClusterException("expected schemaId=" + MessageHeaderDecoder.SCHEMA_ID + ", actual=" + schemaId); } switch (templateId) { case SessionMessageHeaderDecoder.TEMPLATE_ID: { sessionMessageHeaderDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionMessageHeaderDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onMessage( sessionId, sessionMessageHeaderDecoder.timestamp(), buffer, offset + SESSION_HEADER_LENGTH, length - SESSION_HEADER_LENGTH, header); } break; } case SessionEventDecoder.TEMPLATE_ID: { sessionEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = sessionEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onSessionEvent( sessionEventDecoder.correlationId(), sessionId, sessionEventDecoder.leadershipTermId(), sessionEventDecoder.leaderMemberId(), sessionEventDecoder.code(), sessionEventDecoder.detail()); } break; } case NewLeaderEventDecoder.TEMPLATE_ID: { newLeaderEventDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = newLeaderEventDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { listener.onNewLeader( sessionId, newLeaderEventDecoder.leadershipTermId(), newLeaderEventDecoder.leaderMemberId(), newLeaderEventDecoder.ingressEndpoints()); } break; } case AdminResponseDecoder.TEMPLATE_ID: { adminResponseDecoder.wrap( buffer, offset + MessageHeaderDecoder.ENCODED_LENGTH, messageHeaderDecoder.blockLength(), messageHeaderDecoder.version()); final long sessionId = adminResponseDecoder.clusterSessionId(); if (sessionId == clusterSessionId) { final long correlationId = adminResponseDecoder.correlationId(); final AdminRequestType requestType = adminResponseDecoder.requestType(); final AdminResponseCode responseCode = adminResponseDecoder.responseCode(); final String message = adminResponseDecoder.message(); final int payloadOffset = adminResponseDecoder.offset() + AdminResponseDecoder.BLOCK_LENGTH + AdminResponseDecoder.messageHeaderLength() + message.length() + AdminResponseDecoder.payloadHeaderLength(); final int payloadLength = adminResponseDecoder.payloadLength(); listener.onAdminResponse( sessionId, correlationId, requestType, responseCode, message, buffer, payloadOffset, payloadLength); } break; } default: break; } }
@Test void onFragmentShouldInvokeOnAdminResponseCallbackIfSessionIdMatches() { final int offset = 24; final long clusterSessionId = 18; final long correlationId = 3274239749237498239L; final AdminRequestType type = AdminRequestType.SNAPSHOT; final AdminResponseCode responseCode = AdminResponseCode.UNAUTHORISED_ACCESS; final String message = "Unauthorised access detected!"; final byte[] payload = new byte[]{ 0x1, 0x2, 0x3 }; adminResponseEncoder .wrapAndApplyHeader(buffer, offset, messageHeaderEncoder) .clusterSessionId(clusterSessionId) .correlationId(correlationId) .requestType(type) .responseCode(responseCode) .message(message); adminResponseEncoder.putPayload(payload, 0, payload.length); final EgressListener egressListener = mock(EgressListener.class); final Header header = new Header(1, 3); final EgressAdapter adapter = new EgressAdapter(egressListener, clusterSessionId, mock(Subscription.class), 10); adapter.onFragment(buffer, offset, adminResponseEncoder.encodedLength(), header); verify(egressListener).onAdminResponse( clusterSessionId, correlationId, type, responseCode, message, buffer, offset + MessageHeaderEncoder.ENCODED_LENGTH + adminResponseEncoder.encodedLength() - payload.length, payload.length); verifyNoMoreInteractions(egressListener); }
static int getIndexInsensitive(CharSequence name, CharSequence value) { if (value.length() == 0) { HeaderNameIndex entry = getEntry(name); return entry == null || !entry.emptyValue ? NOT_FOUND : entry.index; } int bucket = headerBucket(value); HeaderIndex header = HEADERS_WITH_NON_EMPTY_VALUES[bucket]; if (header == null) { return NOT_FOUND; } if (equalsVariableTime(header.name, name) && equalsVariableTime(header.value, value)) { return header.index; } return NOT_FOUND; }
@Test public void testExistingHeaderNameAndValueSecondMatch() { assertEquals(7, HpackStaticTable.getIndexInsensitive( AsciiString.cached(":scheme"), AsciiString.cached("https"))); }
public static Builder custom() { return new Builder(); }
@Test(expected = IllegalArgumentException.class) public void testBuildWithIllegalCoreThreadPoolSize() { ThreadPoolBulkheadConfig.custom() .coreThreadPoolSize(-1) .build(); }
@Override public boolean addAll(Collection<? extends R> c) { throw new UnsupportedOperationException("LazySet is not modifiable"); }
@Test(expected = UnsupportedOperationException.class) public void testAddAll_throwsException() { set.addAll(Collections.emptyList()); }
public boolean rollbackClusterState(UUID txnId) { clusterServiceLock.lock(); try { final LockGuard currentLock = getStateLock(); if (!currentLock.allowsUnlock(txnId)) { return false; } logger.fine("Rolling back cluster state transaction: " + txnId); stateLockRef.set(LockGuard.NOT_LOCKED); // if state allows join after rollback, then remove all members which left during transaction. if (state.isJoinAllowed()) { node.getClusterService().getMembershipManager().removeAllMissingMembers(); } return true; } finally { clusterServiceLock.unlock(); } }
@Test(expected = NullPointerException.class) public void test_unlockClusterState_nullTransactionId() { clusterStateManager.rollbackClusterState(null); }
@Override public ApiResult<TopicPartition, DeletedRecords> handleResponse( Node broker, Set<TopicPartition> keys, AbstractResponse abstractResponse ) { DeleteRecordsResponse response = (DeleteRecordsResponse) abstractResponse; Map<TopicPartition, DeletedRecords> completed = new HashMap<>(); Map<TopicPartition, Throwable> failed = new HashMap<>(); List<TopicPartition> unmapped = new ArrayList<>(); Set<TopicPartition> retriable = new HashSet<>(); for (DeleteRecordsResponseData.DeleteRecordsTopicResult topicResult: response.data().topics()) { for (DeleteRecordsResponseData.DeleteRecordsPartitionResult partitionResult : topicResult.partitions()) { Errors error = Errors.forCode(partitionResult.errorCode()); TopicPartition topicPartition = new TopicPartition(topicResult.name(), partitionResult.partitionIndex()); if (error == Errors.NONE) { completed.put(topicPartition, new DeletedRecords(partitionResult.lowWatermark())); } else { handlePartitionError(topicPartition, error, failed, unmapped, retriable); } } } // Sanity-check if the current leader for these partitions returned results for all of them for (TopicPartition topicPartition : keys) { if (unmapped.isEmpty() && !completed.containsKey(topicPartition) && !failed.containsKey(topicPartition) && !retriable.contains(topicPartition) ) { ApiException sanityCheckException = new ApiException( "The response from broker " + broker.id() + " did not contain a result for topic partition " + topicPartition); log.error( "DeleteRecords request for topic partition {} failed sanity check", topicPartition, sanityCheckException); failed.put(topicPartition, sanityCheckException); } } return new ApiResult<>(completed, failed, unmapped); }
@Test public void testHandleSuccessfulResponse() { AdminApiHandler.ApiResult<TopicPartition, DeletedRecords> result = handleResponse(createResponse(emptyMap(), recordsToDelete.keySet())); assertResult(result, recordsToDelete.keySet(), emptyMap(), emptyList(), emptySet()); }
@GetMapping public String getHealth() { // TODO UP DOWN WARN StringBuilder sb = new StringBuilder(); String dbStatus = dataSourceService.getHealth(); boolean addressServerHealthy = isAddressServerHealthy(); if (dbStatus.contains(HEALTH_UP) && addressServerHealthy && ServerMemberManager.isInIpList()) { sb.append(HEALTH_UP); } else if (dbStatus.contains(HEALTH_WARN) && addressServerHealthy && ServerMemberManager.isInIpList()) { sb.append("WARN:"); sb.append("slave db (").append(dbStatus.split(":")[1]).append(") down. "); } else { sb.append("DOWN:"); if (dbStatus.contains(HEALTH_DOWN)) { sb.append("master db (").append(dbStatus.split(":")[1]).append(") down. "); } if (!addressServerHealthy) { sb.append("address server down. "); } if (!ServerMemberManager.isInIpList()) { sb.append("server ip ").append(InetUtils.getSelfIP()) .append(" is not in the serverList of address server. "); } } return sb.toString(); }
@Test void testGetHealthWhenTheLoopUpInfoParseError() throws Exception { when(dataSourceService.getHealth()).thenReturn("UP"); when(memberManager.getLookup()).thenReturn(memberLookup); when(memberLookup.useAddressServer()).thenReturn(true); final HashMap<String, Object> info = new HashMap<>(); info.put("addressServerHealth", "not boolean value"); when(memberLookup.info()).thenReturn(info); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(Constants.HEALTH_CONTROLLER_PATH); String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString(); assertEquals("DOWN:address server down. ", actualValue); }
public final void doesNotContainKey(@Nullable Object key) { check("keySet()").that(checkNotNull(actual).keySet()).doesNotContain(key); }
@Test public void doesNotContainKeyFailure() { ImmutableMultimap<String, String> multimap = ImmutableMultimap.of("kurt", "kluever"); expectFailureWhenTestingThat(multimap).doesNotContainKey("kurt"); assertFailureKeys("value of", "expected not to contain", "but was", "multimap was"); assertFailureValue("value of", "multimap.keySet()"); assertFailureValue("expected not to contain", "kurt"); assertFailureValue("but was", "[kurt]"); }
public synchronized void lostNodeFound(Address address) { Preconditions.checkNotNull(address, "address should not be null"); mLostNodes.remove(address); for (Runnable function : mChangeListeners) { function.run(); } }
@Test public void lostNodeFound() { ConfigurationStore configStore = createConfigStore(); configStore.handleNodeLost(mAddressOne); configStore.handleNodeLost(mAddressTwo); Map<Address, List<ConfigRecord>> confMap = configStore.getConfMap(); assertFalse(confMap.containsKey(mAddressOne)); assertFalse(confMap.containsKey(mAddressTwo)); configStore.lostNodeFound(mAddressTwo); confMap = configStore.getConfMap(); assertFalse(confMap.containsKey(mAddressOne)); assertTrue(confMap.containsKey(mAddressTwo)); }
public RecordAppendResult append(String topic, int partition, long timestamp, byte[] key, byte[] value, Header[] headers, AppendCallbacks callbacks, long maxTimeToBlock, boolean abortOnNewBatch, long nowMs, Cluster cluster) throws InterruptedException { TopicInfo topicInfo = topicInfoMap.computeIfAbsent(topic, k -> new TopicInfo(createBuiltInPartitioner(logContext, k, batchSize))); // We keep track of the number of appending thread to make sure we do not miss batches in // abortIncompleteBatches(). appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { // Loop to retry in case we encounter partitioner's race conditions. while (true) { // If the message doesn't have any partition affinity, so we pick a partition based on the broker // availability and performance. Note, that here we peek current partition before we hold the // deque lock, so we'll need to make sure that it's not changed while we were waiting for the // deque lock. final BuiltInPartitioner.StickyPartitionInfo partitionInfo; final int effectivePartition; if (partition == RecordMetadata.UNKNOWN_PARTITION) { partitionInfo = topicInfo.builtInPartitioner.peekCurrentPartitionInfo(cluster); effectivePartition = partitionInfo.partition(); } else { partitionInfo = null; effectivePartition = partition; } // Now that we know the effective partition, let the caller know. setPartition(callbacks, effectivePartition); // check if we have an in-progress batch Deque<ProducerBatch> dq = topicInfo.batches.computeIfAbsent(effectivePartition, k -> new ArrayDeque<>()); synchronized (dq) { // After taking the lock, validate that the partition hasn't changed and retry. if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster)) continue; RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callbacks, dq, nowMs); if (appendResult != null) { // If queue has incomplete batches we disable switch (see comments in updatePartitionInfo). boolean enableSwitch = allBatchesFull(dq); topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch); return appendResult; } } // we don't have an in-progress record batch try to allocate a new batch if (abortOnNewBatch) { // Return a result that will cause another call to append. return new RecordAppendResult(null, false, false, true, 0); } if (buffer == null) { byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression.type(), key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {} with remaining timeout {}ms", size, topic, effectivePartition, maxTimeToBlock); // This call may block if we exhausted buffer space. buffer = free.allocate(size, maxTimeToBlock); // Update the current time in case the buffer allocation blocked above. // NOTE: getting time may be expensive, so calling it under a lock // should be avoided. nowMs = time.milliseconds(); } synchronized (dq) { // After taking the lock, validate that the partition hasn't changed and retry. if (partitionChanged(topic, topicInfo, partitionInfo, dq, nowMs, cluster)) continue; RecordAppendResult appendResult = appendNewBatch(topic, effectivePartition, dq, timestamp, key, value, headers, callbacks, buffer, nowMs); // Set buffer to null, so that deallocate doesn't return it back to free pool, since it's used in the batch. if (appendResult.newBatchCreated) buffer = null; // If queue has incomplete batches we disable switch (see comments in updatePartitionInfo). boolean enableSwitch = allBatchesFull(dq); topicInfo.builtInPartitioner.updatePartitionInfo(partitionInfo, appendResult.appendedBytes, cluster, enableSwitch); return appendResult; } } } finally { free.deallocate(buffer); appendsInProgress.decrementAndGet(); } }
@Test public void testSplitFrequency() throws InterruptedException { long seed = System.currentTimeMillis(); Random random = new Random(); random.setSeed(seed); final int batchSize = 1024; final int numMessages = 1000; RecordAccumulator accum = createTestRecordAccumulator(batchSize, 3 * 1024, Compression.gzip().build(), 10); // Adjust the high and low compression ratio message percentage for (int goodCompRatioPercentage = 1; goodCompRatioPercentage < 100; goodCompRatioPercentage++) { int numSplit = 0; int numBatches = 0; CompressionRatioEstimator.resetEstimation(topic); for (int i = 0; i < numMessages; i++) { int dice = random.nextInt(100); byte[] value = (dice < goodCompRatioPercentage) ? bytesWithGoodCompression(random) : bytesWithPoorCompression(random, 100); accum.append(topic, partition1, 0L, null, value, Record.EMPTY_HEADERS, null, 0, false, time.milliseconds(), cluster); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; } time.sleep(10); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; assertTrue((double) numSplit / numBatches < 0.1f, String.format("Total num batches = %d, split batches = %d, more than 10%% of the batch splits. " + "Random seed is " + seed, numBatches, numSplit)); } }
@VisibleForTesting static Duration parseToDuration(String timeStr) { final Matcher matcher = Pattern.compile("(?<value>\\d+)\\s*(?<time>[dhms])").matcher(timeStr); if (!matcher.matches()) { throw new IllegalArgumentException("Expected a time specification in the form <number>[d,h,m,s], e.g. 3m, but found [" + timeStr + "]"); } final int value = Integer.parseInt(matcher.group("value")); final String timeSpecifier = matcher.group("time"); final TemporalUnit unit; switch (timeSpecifier) { case "d": unit = ChronoUnit.DAYS; break; case "h": unit = ChronoUnit.HOURS; break; case "m": unit = ChronoUnit.MINUTES; break; case "s": unit = ChronoUnit.SECONDS; break; default: throw new IllegalStateException("Expected a time unit specification from d,h,m,s but found: [" + timeSpecifier + "]"); } return Duration.of(value, unit); }
@Test public void testParseToDurationSuccessfullyParseExpectedFormats() { assertEquals(Duration.of(4, ChronoUnit.DAYS), AbstractPipelineExt.parseToDuration("4d")); assertEquals(Duration.of(3, ChronoUnit.HOURS), AbstractPipelineExt.parseToDuration("3h")); assertEquals(Duration.of(2, ChronoUnit.MINUTES), AbstractPipelineExt.parseToDuration("2m")); assertEquals(Duration.of(1, ChronoUnit.SECONDS), AbstractPipelineExt.parseToDuration("1s")); }
public static UserOperatorConfig buildFromMap(Map<String, String> map) { Map<String, String> envMap = new HashMap<>(map); envMap.keySet().retainAll(UserOperatorConfig.keyNames()); Map<String, Object> generatedMap = ConfigParameter.define(envMap, CONFIG_VALUES); return new UserOperatorConfig(generatedMap); }
@Test public void testFromMapInvalidLabelsStringThrows() { Map<String, String> envVars = new HashMap<>(UserOperatorConfigTest.ENV_VARS); envVars.put(UserOperatorConfig.LABELS.key(), ",label1="); assertThrows(InvalidConfigurationException.class, () -> UserOperatorConfig.buildFromMap(envVars)); }
RuleDto createNewRule(RulesRegistrationContext context, RulesDefinition.Rule ruleDef) { RuleDto newRule = createRuleWithSimpleFields(ruleDef, uuidFactory.create(), system2.now()); ruleDescriptionSectionsGeneratorResolver.generateFor(ruleDef).forEach(newRule::addRuleDescriptionSectionDto); context.created(newRule); return newRule; }
@Test public void createNewRule_whenRuleDefinitionDoesHaveCleanCodeAttributeAndIsSecurityHotspot_shouldReturnNull() { RulesDefinition.Rule ruleDef = getDefaultRule(CleanCodeAttribute.TESTED, RuleType.SECURITY_HOTSPOT); RuleDto newRuleDto = underTest.createNewRule(context, ruleDef); assertThat(newRuleDto.getCleanCodeAttribute()).isNull(); assertThat(newRuleDto.getDefaultImpacts()).isEmpty(); }
@Override public Predicate visit(OrPredicate orPredicate, IndexRegistry indexes) { Predicate[] originalInnerPredicates = orPredicate.predicates; if (originalInnerPredicates == null || originalInnerPredicates.length < MINIMUM_NUMBER_OF_OR_TO_REPLACE) { return orPredicate; } InternalListMultiMap<String, Integer> candidates = findAndGroupCandidates(originalInnerPredicates); if (candidates == null) { return orPredicate; } int toBeRemoved = 0; boolean modified = false; Predicate[] target = originalInnerPredicates; for (Map.Entry<String, List<Integer>> candidate : candidates.entrySet()) { String attribute = candidate.getKey(); List<Integer> positions = candidate.getValue(); if (positions.size() < MINIMUM_NUMBER_OF_OR_TO_REPLACE) { continue; } if (!modified) { modified = true; target = createCopy(target); } toBeRemoved = replaceForAttribute(attribute, target, positions, toBeRemoved); } Predicate[] newInnerPredicates = replaceInnerPredicates(target, toBeRemoved); return getOrCreateFinalPredicate(orPredicate, originalInnerPredicates, newInnerPredicates); }
@Test public void whenThresholdExceeded_noEnoughCandidatesFound_thenReturnItself() { // (age != 1 or age != 2 or age != 3 or age != 4 or age != 5) Predicate p1 = equal("age", 1); Predicate p2 = equal("age", 2); Predicate p3 = equal("age", 3); Predicate p4 = equal("age", 4); Predicate p5 = notEqual("age", 5); OrPredicate or = (OrPredicate) or(p1, p2, p3, p4, p5); OrPredicate result = (OrPredicate) visitor.visit(or, indexes); assertThat(or).isEqualTo(result); }
public static void addLockOptions(String basePath, TypedProperties props) { if (!props.containsKey(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key())) { props.putAll(FileSystemBasedLockProvider.getLockConfig(basePath)); } }
@Test void testAddLockOptions() { TypedProperties props1 = new TypedProperties(); UtilHelpers.addLockOptions("path1", props1); assertEquals(FileSystemBasedLockProvider.class.getName(), props1.getString(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key())); TypedProperties props2 = new TypedProperties(); props2.put(HoodieLockConfig.LOCK_PROVIDER_CLASS_NAME.key(), "Dummy"); UtilHelpers.addLockOptions("path2", props2); assertEquals(1, props2.size(), "Should not add lock options if the lock provider is already there."); }
@Override public void onReceived() throws InvalidNotificationException { if (!mAppLifecycleFacade.isAppVisible()) { postNotification(null); notifyReceivedBackgroundToJS(); } else { notifyReceivedToJS(); } }
@Test public void onReceived_validDataForBackgroundApp_postNotificationAndNotifyJs() throws Exception { // Arrange setUpBackgroundApp(); // Act final PushNotification uut = createUUT(); uut.onReceived(); // Assert ArgumentCaptor<Notification> notificationCaptor = ArgumentCaptor.forClass(Notification.class); verify(mNotificationManager).notify(anyInt(), notificationCaptor.capture()); verify(mJsIOHelper).sendEventToJS(eq(NOTIFICATION_RECEIVED_BACKGROUND_EVENT_NAME), argThat(new isValidNotification(mNotificationBundle)), eq(mReactContext)); }
@Override public JType apply(String nodeName, JsonNode node, JsonNode parent, JClassContainer jClassContainer, Schema schema) { String propertyTypeName = getTypeName(node); JType type; if (propertyTypeName.equals("object") || node.has("properties") && node.path("properties").size() > 0) { type = ruleFactory.getObjectRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else if (node.has("existingJavaType")) { String typeName = node.path("existingJavaType").asText(); if (isPrimitive(typeName, jClassContainer.owner())) { type = primitiveType(typeName, jClassContainer.owner()); } else { type = resolveType(jClassContainer, typeName); } } else if (propertyTypeName.equals("string")) { type = jClassContainer.owner().ref(String.class); } else if (propertyTypeName.equals("number")) { type = getNumberType(jClassContainer.owner(), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("integer")) { type = getIntegerType(jClassContainer.owner(), node, ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("boolean")) { type = unboxIfNecessary(jClassContainer.owner().ref(Boolean.class), ruleFactory.getGenerationConfig()); } else if (propertyTypeName.equals("array")) { type = ruleFactory.getArrayRule().apply(nodeName, node, parent, jClassContainer.getPackage(), schema); } else { type = jClassContainer.owner().ref(Object.class); } if (!node.has("javaType") && !node.has("existingJavaType") && node.has("format")) { type = ruleFactory.getFormatRule().apply(nodeName, node.get("format"), node, type, schema); } else if (!node.has("javaType") && !node.has("existingJavaType") && propertyTypeName.equals("string") && node.has("media")) { type = ruleFactory.getMediaRule().apply(nodeName, node.get("media"), node, type, schema); } return type; }
@Test public void applyGeneratesIntegerUsingJavaTypeLongPrimitiveWhenMaximumGreaterThanIntegerMax() { JPackage jpackage = new JCodeModel()._package(getClass().getPackage().getName()); ObjectNode objectNode = new ObjectMapper().createObjectNode(); objectNode.put("type", "integer"); objectNode.put("maximum", Integer.MAX_VALUE + 1L); when(config.isUsePrimitives()).thenReturn(true); JType result = rule.apply("fooBar", objectNode, null, jpackage, null); assertThat(result.fullName(), is("long")); }
@Override public Exchange add(CamelContext camelContext, String key, Exchange oldExchange, Exchange newExchange) throws OptimisticLockingException { if (!optimistic) { throw new UnsupportedOperationException(); } LOG.trace("Adding an Exchange with ID {} for key {} in an optimistic manner.", newExchange.getExchangeId(), key); if (oldExchange == null) { DefaultExchangeHolder newHolder = DefaultExchangeHolder.marshal(newExchange, true, allowSerializedHeaders); DefaultExchangeHolder oldHolder = cache.getAndPut(key, newHolder); if (oldHolder != null) { Exchange exchange = unmarshallExchange(camelContext, oldHolder); LOG.error( "Optimistic locking failed for exchange with key {}: IMap#putIfAbsend returned Exchange with ID {}, while it's expected no exchanges to be returned", key, exchange != null ? exchange.getExchangeId() : "<null>"); throw new OptimisticLockingException(); } } else { DefaultExchangeHolder oldHolder = DefaultExchangeHolder.marshal(oldExchange, true, allowSerializedHeaders); DefaultExchangeHolder newHolder = DefaultExchangeHolder.marshal(newExchange, true, allowSerializedHeaders); if (!cache.replace(key, oldHolder, newHolder)) { LOG.error( "Optimistic locking failed for exchange with key {}: IMap#replace returned no Exchanges, while it's expected to replace one", key); throw new OptimisticLockingException(); } } LOG.trace("Added an Exchange with ID {} for key {} in optimistic manner.", newExchange.getExchangeId(), key); return oldExchange; }
@Test public void checkOptimisticAddOfNewExchange() throws Exception { JCacheAggregationRepository repoOne = createRepository(true); JCacheAggregationRepository repoTwo = createRepository(true); repoOne.start(); repoTwo.start(); try { final String testBody = "This is an optimistic test body. Sincerely yours, Captain Obvious."; final String key = "optimisticKey"; Exchange newEx = createExchangeWithBody(testBody); Exchange oldEx = repoOne.add(context(), key, null, newEx); assertNull(oldEx, "Old exchange should be null."); final String theNewestBody = "This is the newest test body."; Exchange theNewestEx = createExchangeWithBody(theNewestBody); oldEx = repoTwo.add(context(), key, newEx, theNewestEx); assertEquals(newEx.getIn().getBody(), oldEx.getIn().getBody()); } finally { repoOne.stop(); repoTwo.stop(); } }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldNotMatchGenericMethodWithAlreadyReservedTypes() { // Given: final GenericType generic = GenericType.of("A"); givenFunctions( function(EXPECTED, -1, generic, generic) ); // When: final Exception e = assertThrows( KsqlException.class, () -> udfIndex.getFunction(ImmutableList.of(SqlArgument.of(INTEGER), SqlArgument.of(SqlTypes.STRING))) ); // Then: assertThat(e.getMessage(), containsString("Function 'name' does not accept parameters " + "(INTEGER, STRING)")); }
public static List<List<String>> getTabletDistribution(AdminShowReplicaDistributionStmt stmt) throws DdlException { return getTabletDistribution(stmt.getDbName(), stmt.getTblName(), stmt.getPartitionNames()); }
@Test public void testGetTabletDistribution() throws IllegalAccessException, IllegalArgumentException, InvocationTargetException { Object[] args = new Object[] {CatalogMocker.TEST_DB_NAME, CatalogMocker.TEST_TBL_NAME, null}; List<List<String>> result = (List<List<String>>) getTabletDistributionMethod.invoke(null, args); Assert.assertEquals(3, result.size()); System.out.println(result); }
public MatchResult match(MatchResult reuse, byte[] sequence, int start, int length, int node) { if (node == 0) { reuse.reset(MatchResult.NO_MATCH, start, node); return reuse; } final FST fst = _fst; final int end = start + length; for (int i = start; i < end; i++) { final int arc = fst.getArc(node, sequence[i]); if (arc != 0) { if (i + 1 == end && fst.isArcFinal(arc)) { /* The automaton has an exact match of the input sequence. */ reuse.reset(MatchResult.EXACT_MATCH, i, node); return reuse; } if (fst.isArcTerminal(arc)) { /* The automaton contains a prefix of the input sequence. */ reuse.reset(MatchResult.AUTOMATON_HAS_PREFIX, i + 1, node); return reuse; } // Make a transition along the arc. node = fst.getEndNode(arc); } else { if (i > start) { reuse.reset(MatchResult.AUTOMATON_HAS_PREFIX, i, node); } else { reuse.reset(MatchResult.NO_MATCH, i, node); } return reuse; } } /* The sequence is a prefix of at least one sequence in the automaton. */ reuse.reset(MatchResult.SEQUENCE_IS_A_PREFIX, 0, node); return reuse; }
@Test public void testMatch() throws IOException { File file = new File("./src/test/resources/data/abc.native.fst"); FST fst = FST.read(new FileInputStream(file), false, new DirectMemoryManager(FSTTraversalTest.class.getName())); FSTTraversal traversalHelper = new FSTTraversal(fst); MatchResult m = traversalHelper.match("ax".getBytes()); assertEquals(m._kind, AUTOMATON_HAS_PREFIX); assertEquals(m._index, 1); assertEquals(suffixes(fst, m._node), Sets.newHashSet("ba", "c")); assertEquals(traversalHelper.match("aba".getBytes())._kind, EXACT_MATCH); m = traversalHelper.match("abalonger".getBytes()); assertEquals(m._kind, AUTOMATON_HAS_PREFIX); assertEquals("abalonger".substring(m._index), "longer"); m = traversalHelper.match("ab".getBytes()); assertEquals(m._kind, SEQUENCE_IS_A_PREFIX); assertEquals(suffixes(fst, m._node), Sets.newHashSet("a")); }
@VisibleForTesting void validateExperienceOutRange(List<MemberLevelDO> list, Long id, Integer level, Integer experience) { for (MemberLevelDO levelDO : list) { if (levelDO.getId().equals(id)) { continue; } if (levelDO.getLevel() < level) { // 经验大于前一个等级 if (experience <= levelDO.getExperience()) { throw exception(LEVEL_EXPERIENCE_MIN, levelDO.getName(), levelDO.getExperience()); } } else if (levelDO.getLevel() > level) { //小于下一个级别 if (experience >= levelDO.getExperience()) { throw exception(LEVEL_EXPERIENCE_MAX, levelDO.getName(), levelDO.getExperience()); } } } }
@Test public void testCreateLevel_experienceOutRange() { // 准备参数 int level = 10; int experience = 10; String name = randomString(); // mock 数据 memberlevelMapper.insert(randomLevelDO(o -> { o.setLevel(level); o.setExperience(experience); o.setName(name); })); List<MemberLevelDO> list = memberlevelMapper.selectList(); // 调用,校验异常 assertServiceException(() -> levelService.validateExperienceOutRange(list, null, level + 1, experience - 1), LEVEL_EXPERIENCE_MIN, name, level); // 调用,校验异常 assertServiceException(() -> levelService.validateExperienceOutRange(list, null, level - 1, experience + 1), LEVEL_EXPERIENCE_MAX, name, level); }
@Override public boolean checkClass(ClassResolver classResolver, String className) { try { lock.readLock().lock(); return check(className); } finally { lock.readLock().unlock(); } }
@Test public void testCheckClass() { { Fury fury = Fury.builder().requireClassRegistration(false).build(); AllowListChecker checker = new AllowListChecker(AllowListChecker.CheckLevel.STRICT); fury.getClassResolver().setClassChecker(checker); assertThrows(InsecureException.class, () -> fury.serialize(new AllowListCheckerTest())); checker.allowClass(AllowListCheckerTest.class.getName()); byte[] bytes = fury.serialize(new AllowListCheckerTest()); checker.addListener(fury.getClassResolver()); checker.disallowClass(AllowListCheckerTest.class.getName()); assertThrows(InsecureException.class, () -> fury.serialize(new AllowListCheckerTest())); assertThrows(InsecureException.class, () -> fury.deserialize(bytes)); } { Fury fury = Fury.builder().requireClassRegistration(false).build(); AllowListChecker checker = new AllowListChecker(AllowListChecker.CheckLevel.WARN); fury.getClassResolver().setClassChecker(checker); checker.addListener(fury.getClassResolver()); byte[] bytes = fury.serialize(new AllowListCheckerTest()); checker.disallowClass(AllowListCheckerTest.class.getName()); assertThrows(InsecureException.class, () -> fury.serialize(new AllowListCheckerTest())); assertThrows(InsecureException.class, () -> fury.deserialize(bytes)); } }
@Override public String version() { return TSDBUtils.version(address, username, password); }
@Test public void testVersion() { String version = new TSDBConnection(TSDB_ADDRESS,null,null,null).version(); Assert.assertNotNull(version); }
@Override public Boolean getNullableResult(final ResultSet resultSet, final String columnName) throws SQLException { return resultSet.getBoolean(columnName); }
@Test public void getNullableResultTest() { final PostgreSQLBooleanHandler postgreSQLBooleanHandler = new PostgreSQLBooleanHandler(); final ResultSet resultSet = mock(ResultSet.class); Assertions.assertDoesNotThrow(() -> postgreSQLBooleanHandler.getNullableResult(resultSet, 1)); Assertions.assertDoesNotThrow(() -> postgreSQLBooleanHandler.getNullableResult(resultSet, "column")); final CallableStatement callableStatement = mock(CallableStatement.class); Assertions.assertDoesNotThrow(() -> postgreSQLBooleanHandler.getNullableResult(callableStatement, 1)); }
@Override protected IMetaStoreClient newClient() { try { try { return GET_CLIENT.invoke(hiveConf, (HiveMetaHookLoader) tbl -> null, HiveMetaStoreClient.class.getName()); } catch (RuntimeException e) { // any MetaException would be wrapped into RuntimeException during reflection, so let's double-check type here if (e.getCause() instanceof MetaException) { throw (MetaException) e.getCause(); } throw e; } } catch (MetaException e) { throw new RuntimeMetaException(e, "Failed to connect to Hive Metastore"); } catch (Throwable t) { if (t.getMessage() != null && t.getMessage().contains("Another instance of Derby may have already booted")) { throw new RuntimeMetaException(t, "Failed to start an embedded metastore because embedded " + "Derby supports only one client at a time. To fix this, use a metastore that supports " + "multiple clients."); } throw new RuntimeMetaException(t, "Failed to connect to Hive Metastore"); } }
@Test public void testGetTablesFailsForNonReconnectableException() throws Exception { HiveMetaStoreClient hmsClient = Mockito.mock(HiveMetaStoreClient.class); Mockito.doReturn(hmsClient).when(clients).newClient(); Mockito.doThrow(new MetaException("Another meta exception")) .when(hmsClient) .getTables(Mockito.anyString(), Mockito.anyString()); assertThatThrownBy(() -> clients.run(client -> client.getTables("default", "t"))) .isInstanceOf(MetaException.class) .hasMessage("Another meta exception"); }
public FEELFnResult<BigDecimal> invoke(@ParameterName("from") String from, @ParameterName("grouping separator") String group, @ParameterName("decimal separator") String decimal) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( group != null && !group.equals( " " ) && !group.equals( "." ) && !group.equals( "," ) ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "group", "not a valid one, can only be one of: dot ('.'), comma (','), space (' ') ")); } if ( decimal != null ) { if (!decimal.equals( "." ) && !decimal.equals( "," )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "not a valid one, can only be one of: dot ('.'), comma (',') ")); } else if (group != null && decimal.equals( group )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "cannot be the same as parameter 'group' ")); } } if ( group != null ) { from = from.replaceAll( "\\" + group, "" ); } if ( decimal != null ) { from = from.replaceAll( "\\" + decimal, "." ); } BigDecimal result = NumberEvalHelper.getBigDecimalOrNull(from ); if( from != null && result == null ) { // conversion failed return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to calculate final number result" ) ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeNumberWithoutDecimalPart() { FunctionTestUtil.assertResult(numberFunction.invoke("9876", null, null), BigDecimal.valueOf(9876)); }
@Override public long getLargestSequence() { long startNanos = Timer.nanos(); try { return delegate.getLargestSequence(); } finally { getLargestSequenceProbe.recordValue(Timer.nanosElapsed(startNanos)); } }
@Test public void getLargestSequence() { long largestSequence = 100L; when(delegate.getLargestSequence()).thenReturn(largestSequence); long result = ringbufferStore.getLargestSequence(); assertEquals(largestSequence, result); assertProbeCalledOnce("getLargestSequence"); }
@VisibleForTesting static boolean isValidUrlFormat(String url) { Matcher matcher = URL_PATTERN.matcher(url); if (matcher.find()) { String host = matcher.group(2); return InetAddresses.isInetAddress(host) || InternetDomainName.isValid(host); } return false; }
@Test public void testMissingURLProtocol() { assertFalse(SplunkEventWriter.isValidUrlFormat("test-url")); }
public List<DataRecord> merge(final List<DataRecord> dataRecords) { Map<DataRecord.Key, DataRecord> result = new HashMap<>(); dataRecords.forEach(each -> { if (PipelineSQLOperationType.INSERT == each.getType()) { mergeInsert(each, result); } else if (PipelineSQLOperationType.UPDATE == each.getType()) { mergeUpdate(each, result); } else if (PipelineSQLOperationType.DELETE == each.getType()) { mergeDelete(each, result); } }); return new ArrayList<>(result.values()); }
@Test void assertUpdateBeforeDelete() { DataRecord beforeDataRecord = mockUpdateDataRecord(1, 10, 50); DataRecord afterDataRecord = mockDeleteDataRecord(1, 10, 50); Collection<DataRecord> actual = groupEngine.merge(Arrays.asList(beforeDataRecord, afterDataRecord)); assertThat(actual.size(), is(1)); assertThat(actual.iterator().next(), sameInstance(afterDataRecord)); }
public Optional<RouteContext> loadRouteContext(final OriginSQLRouter originSQLRouter, final QueryContext queryContext, final RuleMetaData globalRuleMetaData, final ShardingSphereDatabase database, final ShardingCache shardingCache, final ConfigurationProperties props, final ConnectionContext connectionContext) { if (queryContext.getSql().length() > shardingCache.getConfiguration().getAllowedMaxSqlLength()) { return Optional.empty(); } ShardingRouteCacheableCheckResult cacheableCheckResult = shardingCache.getRouteCacheableChecker().check(database, queryContext); if (!cacheableCheckResult.isProbablyCacheable()) { return Optional.empty(); } List<Object> shardingConditionParams = new ArrayList<>(cacheableCheckResult.getShardingConditionParameterMarkerIndexes().size()); for (int each : cacheableCheckResult.getShardingConditionParameterMarkerIndexes()) { if (each >= queryContext.getParameters().size()) { return Optional.empty(); } shardingConditionParams.add(queryContext.getParameters().get(each)); } Optional<RouteContext> cachedResult = shardingCache.getRouteCache().get(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams)) .flatMap(ShardingRouteCacheValue::getCachedRouteContext); RouteContext result = cachedResult.orElseGet( () -> originSQLRouter.createRouteContext(queryContext, globalRuleMetaData, database, shardingCache.getShardingRule(), props, connectionContext)); if (!cachedResult.isPresent() && hitOneShardOnly(result)) { shardingCache.getRouteCache().put(new ShardingRouteCacheKey(queryContext.getSql(), shardingConditionParams), new ShardingRouteCacheValue(result)); } return Optional.of(result); }
@Test void assertCreateRouteContextWithCacheHit() { QueryContext queryContext = new QueryContext(sqlStatementContext, "insert into t values (?, ?)", Arrays.asList(0, 1), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); when(shardingCache.getConfiguration()).thenReturn(new ShardingCacheConfiguration(100, null)); when(shardingCache.getRouteCacheableChecker()).thenReturn(mock(ShardingRouteCacheableChecker.class)); when(shardingCache.getRouteCacheableChecker().check(null, queryContext)).thenReturn(new ShardingRouteCacheableCheckResult(true, Collections.singletonList(1))); when(shardingCache.getRouteCache()).thenReturn(mock(ShardingRouteCache.class)); RouteContext expected = new RouteContext(); expected.getRouteUnits().add(new RouteUnit(new RouteMapper("ds_0", "ds_0"), Collections.singletonList(new RouteMapper("t", "t")))); expected.getOriginalDataNodes().add(Collections.singletonList(new DataNode("ds_0", "t"))); when(shardingCache.getRouteCache().get(any(ShardingRouteCacheKey.class))).thenReturn(Optional.of(new ShardingRouteCacheValue(expected))); Optional<RouteContext> actual = new CachedShardingSQLRouter().loadRouteContext(null, queryContext, mock(RuleMetaData.class), null, shardingCache, null, null); assertTrue(actual.isPresent()); RouteContext actualRouteContext = actual.get(); assertThat(actualRouteContext, not(expected)); assertThat(actualRouteContext.getOriginalDataNodes(), is(expected.getOriginalDataNodes())); assertThat(actualRouteContext.getRouteUnits(), is(expected.getRouteUnits())); }
@Override public NativeEntity<Collector> createNativeEntity(Entity entity, Map<String, ValueReference> parameters, Map<EntityDescriptor, Object> nativeEntities, String username) { if (entity instanceof EntityV1) { return decode((EntityV1) entity, parameters); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test public void createNativeEntity() { final Entity entity = EntityV1.builder() .id(ModelId.of("0")) .type(ModelTypes.SIDECAR_COLLECTOR_V1) .data(objectMapper.convertValue(SidecarCollectorEntity.create( ValueReference.of("filebeat"), ValueReference.of("exec"), ValueReference.of("linux"), ValueReference.of("/usr/lib/graylog-sidecar/filebeat"), ValueReference.of("-c %s"), ValueReference.of("test config -c %s"), ValueReference.of("")), JsonNode.class)) .build(); assertThat(collectorService.count()).isEqualTo(0L); final NativeEntity<Collector> nativeEntity = facade.createNativeEntity(entity, Collections.emptyMap(), Collections.emptyMap(), "username"); assertThat(collectorService.count()).isEqualTo(1L); final Collector collector = collectorService.findByName("filebeat"); assertThat(collector).isNotNull(); final NativeEntityDescriptor expectedDescriptor = NativeEntityDescriptor.create(entity.id(), collector.id(), ModelTypes.SIDECAR_COLLECTOR_V1, collector.name(), false); assertThat(nativeEntity.descriptor()).isEqualTo(expectedDescriptor); assertThat(nativeEntity.entity()).isEqualTo(collector); }
@Override public void run() { if (!redoService.isConnected()) { LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task"); return; } try { redoForInstances(); redoForSubscribes(); } catch (Exception e) { LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e); } }
@Test void testRunRedoRegisterInstanceWithClientDisabled() throws NacosException { when(clientProxy.isEnable()).thenReturn(false); Set<InstanceRedoData> mockData = generateMockInstanceData(false, false, true); when(redoService.findInstanceRedoData()).thenReturn(mockData); redoTask.run(); verify(clientProxy, never()).doRegisterService(SERVICE, GROUP, INSTANCE); }
@Override public OverlayData createOverlayData(ComponentName remoteApp) { final OverlayData original = mOriginal.createOverlayData(remoteApp); if (original.isValid() || mFixInvalid) { final int backgroundLuminance = luminance(original.getPrimaryColor()); final int diff = backgroundLuminance - luminance(original.getPrimaryTextColor()); if (mRequiredTextColorDiff > Math.abs(diff)) { if (backgroundLuminance > GRAY_LUM) { // closer to white, text will be black original.setPrimaryTextColor(Color.BLACK); original.setSecondaryTextColor(Color.DKGRAY); } else { original.setPrimaryTextColor(Color.WHITE); original.setSecondaryTextColor(Color.LTGRAY); } } } return original; }
@Test public void testReturnsFixedIfTextIsTooClose() { OverlayData original = setupOriginal(Color.GRAY, Color.DKGRAY, Color.LTGRAY); final OverlayData fixed = mUnderTest.createOverlayData(mTestComponent); Assert.assertSame(original, fixed); Assert.assertTrue(fixed.isValid()); Assert.assertEquals(Color.GRAY, fixed.getPrimaryColor()); Assert.assertEquals(Color.DKGRAY, fixed.getPrimaryDarkColor()); Assert.assertEquals(Color.WHITE, fixed.getPrimaryTextColor()); Assert.assertEquals(Color.LTGRAY, fixed.getSecondaryTextColor()); }
TopicFilter topicFilter() { return getConfiguredInstance(TOPIC_FILTER_CLASS, TopicFilter.class); }
@Test public void testAllTopics() { MirrorSourceConfig config = new MirrorSourceConfig(makeProps("topics", ".*")); assertTrue(config.topicFilter().shouldReplicateTopic("topic1"), "topic1 created from wildcard should exist"); assertTrue(config.topicFilter().shouldReplicateTopic("topic2"), "topic2 created from wildcard should exist"); }
public <T extends BaseRequest<T, R>, R extends BaseResponse> R execute(BaseRequest<T, R> request) { return api.send(request); }
@Test public void sendMediaGroupDocuments() { MessagesResponse response = bot.execute(new SendMediaGroup(chatId, new InputMediaDocument(docFile), new InputMediaDocument(docBytes).fileName("test.pdf").contentType("application/pdf") )); assertTrue(response.isOk()); assertEquals(2, response.messages().length); assertNotNull(response.messages()[0].mediaGroupId()); System.out.println(response.messages()[0].document()); System.out.println(response.messages()[1].document()); }
<T extends PipelineOptions> T as(Class<T> iface) { checkNotNull(iface); checkArgument(iface.isInterface(), "Not an interface: %s", iface); T existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { synchronized (this) { // double check existingOption = computedProperties.interfaceToProxyCache.getInstance(iface); if (existingOption == null) { Registration<T> registration = PipelineOptionsFactory.CACHE .get() .validateWellFormed(iface, computedProperties.knownInterfaces); List<PropertyDescriptor> propertyDescriptors = registration.getPropertyDescriptors(); Class<T> proxyClass = registration.getProxyClass(); existingOption = InstanceBuilder.ofType(proxyClass) .fromClass(proxyClass) .withArg(InvocationHandler.class, this) .build(); computedProperties = computedProperties.updated(iface, existingOption, propertyDescriptors); } } } return existingOption; }
@Test public void testDisplayDataMissingPipelineOptionsRegistration() throws Exception { HasClassOptions options = PipelineOptionsFactory.as(HasClassOptions.class); options.setClassOption(ProxyInvocationHandlerTest.class); PipelineOptions deserializedOptions = serializeDeserialize(PipelineOptions.class, options); DisplayData displayData = DisplayData.from(deserializedOptions); String expectedJsonValue = MAPPER.writeValueAsString(ProxyInvocationHandlerTest.class); assertThat(displayData, hasDisplayItem("classOption", expectedJsonValue)); }
public static String getFormattedDate() { DateFormat dateFormat = new SimpleDateFormat( "yyyy-MM-dd HH:mm:ss"); Date date = new Date(); return dateFormat.format(date); }
@Test public void testFormatter() { String dateRegex1 = "^((19|20)\\d\\d)-(0?[1-9]|1[012])-(0?[1-9]|[12][0-9]|3[01]) ([2][0-3]|[0-1][0-9]|[1-9]):[0-5][0-9]:([0-5][0-9]|[6][0])$"; String dateString = Formatter.getFormattedDate(); assertTrue(Pattern .matches(dateRegex1, dateString)); }
static Map<String, String> parseAgentArgs(final String agentArgs) { if (Strings.isEmpty(agentArgs)) { throw new IllegalArgumentException("cannot parse empty value"); } final Map<String, String> values = new HashMap<>(); int optionIndex = -1; do { final int valueIndex = agentArgs.indexOf(VALUE_SEPARATOR, optionIndex); if (valueIndex <= 0) { break; } int nameIndex = -1; while (optionIndex < valueIndex) { nameIndex = optionIndex; optionIndex = agentArgs.indexOf(OPTION_SEPARATOR, optionIndex + 1); if (optionIndex < 0) { break; } } final String optionName = agentArgs.substring(nameIndex + 1, valueIndex); final String value = agentArgs.substring( valueIndex + 1, optionIndex > 0 ? optionIndex : agentArgs.length()); values.put(optionName, value); } while (optionIndex > 0); return values; }
@Test void shouldParseAgentArgsAsAConfigOptionsMap() { final Map<String, String> expectedOptions = new HashMap<>(); expectedOptions.put(LOG_FILENAME, "log.out"); expectedOptions.put(READER_CLASSNAME, "my reader"); expectedOptions.put(ENABLED_DRIVER_EVENT_CODES, "all"); expectedOptions.put(DISABLED_DRIVER_EVENT_CODES, "FRAME_IN,FRAME_OUT"); expectedOptions.put(ENABLED_CLUSTER_EVENT_CODES, "all"); expectedOptions.put(DISABLED_CLUSTER_EVENT_CODES, "CANVASS_POSITION,STATE_CHANGE"); final Map<String, String> configOptions = parseAgentArgs( "||1600|abc|aeron.event.log.filename=log.out|" + "aeron.event.cluster.log=all|" + "aeron.event.log.reader.classname=my reader|" + "aeron.event.log.disable=FRAME_IN,FRAME_OUT|" + "aeron.event.log=all|" + "aeron.event.cluster.log.disable=CANVASS_POSITION,STATE_CHANGE"); assertEquals(expectedOptions, configOptions); }
public boolean isSecured() { return getBaseUrl().startsWith("https://"); }
@Test public void is_secured_on_https_server() { settings.setProperty("sonar.core.serverBaseURL", "https://mydomain.com"); assertThat(underTest().isSecured()).isTrue(); }
public static Expression generateFilterExpression(SearchArgument sarg) { return translate(sarg.getExpression(), sarg.getLeaves()); }
@Test public void testLessThanEqualsOperand() { SearchArgument.Builder builder = SearchArgumentFactory.newBuilder(); SearchArgument arg = builder.startAnd().lessThanEquals("salary", PredicateLeaf.Type.LONG, 3000L).end().build(); UnboundPredicate expected = Expressions.lessThanOrEqual("salary", 3000L); UnboundPredicate actual = (UnboundPredicate) HiveIcebergFilterFactory.generateFilterExpression(arg); assertPredicatesMatch(expected, actual); }
public Upstream choose(final String serviceId, final String selectorId, final String ip, final String loadbalancer) { // load service instance by serviceId List<ServiceInstance> available = this.getServiceInstance(serviceId); if (CollectionUtils.isEmpty(available)) { LOG.info("choose return 1"); return null; } final SpringCloudSelectorHandle springCloudSelectorHandle = SpringCloudPluginDataHandler.SELECTOR_CACHED.get().obtainHandle(selectorId); // not gray flow if (!springCloudSelectorHandle.getGray()) { // load service from register center return this.doSelect(serviceId, ip, loadbalancer); } List<Upstream> divideUpstreams = UpstreamCacheManager.getInstance().findUpstreamListBySelectorId(selectorId); // gray flow,but upstream is null if (CollectionUtils.isEmpty(divideUpstreams)) { return this.doSelect(serviceId, ip, loadbalancer); } // select server from available to choose final List<Upstream> choose = new ArrayList<>(available.size()); for (ServiceInstance serviceInstance : available) { divideUpstreams.stream() .filter(Upstream::isStatus) .filter(upstream -> Objects.equals(upstream.getUrl(), serviceInstance.getUri().getRawAuthority())) .findFirst().ifPresent(choose::add); } if (CollectionUtils.isEmpty(choose)) { return this.doSelect(serviceId, ip, loadbalancer); } // select by divideUpstreams return this.doSelect(choose, loadbalancer, ip); }
@Test public void testChoose() { final String ip = "0.0.0.0"; final String selectorId = "1"; final String loadbalancer = "roundRobin"; // serviceInstance is null Upstream upstreamIsNull = serviceChooser.choose("test", selectorId, ip, loadbalancer); Assertions.assertNull(upstreamIsNull); // not gray flow List<DivideUpstream> divideUpstreams = new ArrayList<>(); DivideUpstream divideUpstream = DivideUpstream.builder() .upstreamUrl("localhost:8080") .build(); divideUpstreams.add(divideUpstream); final SpringCloudSelectorHandle springCloudSelectorHandle = SpringCloudSelectorHandle.builder() .serviceId("serviceId") .divideUpstreams(divideUpstreams) .gray(false) .build(); final SelectorData selectorData = SelectorData.builder() .handle(GsonUtils.getInstance().toJson(springCloudSelectorHandle)) .id("1") .build(); springCloudPluginDataHandler.handlerSelector(selectorData); Upstream upstream = serviceChooser.choose("serviceId", selectorId, ip, loadbalancer); Assertions.assertNotNull(upstream); Assertions.assertEquals(upstream.getUrl(), "localhost:8080"); // gray flow springCloudSelectorHandle.setGray(true); final SelectorData selectorDataGray = SelectorData.builder() .handle(GsonUtils.getInstance().toJson(springCloudSelectorHandle)) .id("1") .build(); springCloudPluginDataHandler.handlerSelector(selectorDataGray); Upstream upstreamGray = serviceChooser.choose("serviceId", selectorId, ip, loadbalancer); Assertions.assertNotNull(upstreamGray); Assertions.assertEquals(upstreamGray.getUrl(), "localhost:8080"); }
public Optional<Measure> toMeasure(@Nullable MeasureDto measureDto, Metric metric) { requireNonNull(metric); if (measureDto == null) { return Optional.empty(); } Double value = measureDto.getValue(); String data = measureDto.getData(); switch (metric.getType().getValueType()) { case INT: return toIntegerMeasure(measureDto, value, data); case LONG: return toLongMeasure(measureDto, value, data); case DOUBLE: return toDoubleMeasure(measureDto, value, data); case BOOLEAN: return toBooleanMeasure(measureDto, value, data); case STRING: return toStringMeasure(measureDto, data); case LEVEL: return toLevelMeasure(measureDto, data); case NO_VALUE: return toNoValueMeasure(measureDto); default: throw new IllegalArgumentException("Unsupported Measure.ValueType " + metric.getType().getValueType()); } }
@Test public void toMeasure_returns_absent_for_null_argument() { assertThat(underTest.toMeasure(null, SOME_INT_METRIC)).isNotPresent(); }
public static void printHelp(PrintStream out) { checkNotNull(out); out.println("The set of registered options are:"); Set<Class<? extends PipelineOptions>> sortedOptions = new TreeSet<>(ClassNameComparator.INSTANCE); sortedOptions.addAll(CACHE.get().registeredOptions); for (Class<? extends PipelineOptions> kls : sortedOptions) { out.format(" %s%n", kls.getName()); } out.format( "%nUse --help=<OptionsName> for detailed help. For example:%n" + " --help=DataflowPipelineOptions <short names valid for registered options>%n" + " --help=org.apache.beam.runners.dataflow.options.DataflowPipelineOptions%n"); }
@Test public void testProgrammaticPrintHelp() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PipelineOptionsFactory.printHelp(new PrintStream(baos)); String output = new String(baos.toByteArray(), StandardCharsets.UTF_8); assertThat(output, containsString("The set of registered options are:")); assertThat(output, containsString("org.apache.beam.sdk.options.PipelineOptions")); }
public static ScheduledTaskHandler of(UUID uuid, String schedulerName, String taskName) { return new ScheduledTaskHandlerImpl(uuid, -1, schedulerName, taskName); }
@Test public void of_equalityDifferentAddress() { String urnA = "urn:hzScheduledTaskHandler:39ffc539-a356-444c-bec7-6f644462c208-1SchedulerTask"; String urnB = "urn:hzScheduledTaskHandler:20e4f0f8-52bf-47e5-b541-e1924b83cc9b-1SchedulerTask"; assertNotEquals(ScheduledTaskHandler.of(urnA), ScheduledTaskHandler.of(urnB)); }
public abstract IsmPrefixReaderIterator overKeyComponents(List<?> keyComponents) throws IOException;
@Test public void testReadMissingKeysBypassingBloomFilter() throws Exception { File tmpFile = tmpFolder.newFile(); List<IsmRecord<byte[]>> data = new ArrayList<>(); data.add(IsmRecord.<byte[]>of(ImmutableList.of(EMPTY, new byte[] {0x04}), EMPTY)); data.add(IsmRecord.<byte[]>of(ImmutableList.of(EMPTY, new byte[] {0x08}), EMPTY)); writeElementsToFile(data, tmpFile); IsmReader<byte[]> reader = new IsmReaderImpl<byte[]>( FileSystems.matchSingleFileSpec(tmpFile.getAbsolutePath()).resourceId(), CODER, cache) { // We use this override to get around the Bloom filter saying that the key doesn't exist. @Override boolean bloomFilterMightContain(RandomAccessData keyBytes) { return true; } }; // Check that we got false with a key before all keys contained in the file. assertFalse(reader.overKeyComponents(ImmutableList.of(EMPTY, new byte[] {0x02})).start()); // Check that we got false with a key between two other keys contained in the file. assertFalse(reader.overKeyComponents(ImmutableList.of(EMPTY, new byte[] {0x06})).start()); // Check that we got false with a key that is after all keys contained in the file. assertFalse(reader.overKeyComponents(ImmutableList.of(EMPTY, new byte[] {0x10})).start()); }
public ShareFetch<K, V> collect(final ShareFetchBuffer fetchBuffer) { ShareFetch<K, V> fetch = ShareFetch.empty(); int recordsRemaining = fetchConfig.maxPollRecords; try { while (recordsRemaining > 0) { final ShareCompletedFetch nextInLineFetch = fetchBuffer.nextInLineFetch(); if (nextInLineFetch == null || nextInLineFetch.isConsumed()) { final ShareCompletedFetch completedFetch = fetchBuffer.peek(); if (completedFetch == null) { break; } if (!completedFetch.isInitialized()) { try { fetchBuffer.setNextInLineFetch(initialize(completedFetch)); } catch (Exception e) { if (fetch.isEmpty()) { fetchBuffer.poll(); } throw e; } } else { fetchBuffer.setNextInLineFetch(completedFetch); } fetchBuffer.poll(); } else { final TopicIdPartition tp = nextInLineFetch.partition; ShareInFlightBatch<K, V> batch = nextInLineFetch.fetchRecords( deserializers, recordsRemaining, fetchConfig.checkCrcs); if (batch.isEmpty()) { nextInLineFetch.drain(); } recordsRemaining -= batch.numRecords(); fetch.add(tp, batch); if (batch.getException() != null) { throw batch.getException(); } else if (batch.hasCachedException()) { break; } } } } catch (KafkaException e) { if (fetch.isEmpty()) { throw e; } } return fetch; }
@Test public void testFetchWithCorruptMessage() { buildDependencies(); subscribeAndAssign(topicAPartition0); ShareCompletedFetch completedFetch = completedFetchBuilder .error(Errors.CORRUPT_MESSAGE) .build(); fetchBuffer.add(completedFetch); assertThrows(KafkaException.class, () -> fetchCollector.collect(fetchBuffer)); }
@PostMapping("/authorize") @Operation(summary = "申请授权", description = "适合 code 授权码模式,或者 implicit 简化模式;在 sso.vue 单点登录界面被【提交】调用") @Parameters({ @Parameter(name = "response_type", required = true, description = "响应类型", example = "code"), @Parameter(name = "client_id", required = true, description = "客户端编号", example = "tudou"), @Parameter(name = "scope", description = "授权范围", example = "userinfo.read"), // 使用 Map<String, Boolean> 格式,Spring MVC 暂时不支持这么接收参数 @Parameter(name = "redirect_uri", required = true, description = "重定向 URI", example = "https://www.iocoder.cn"), @Parameter(name = "auto_approve", required = true, description = "用户是否接受", example = "true"), @Parameter(name = "state", example = "1") }) public CommonResult<String> approveOrDeny(@RequestParam("response_type") String responseType, @RequestParam("client_id") String clientId, @RequestParam(value = "scope", required = false) String scope, @RequestParam("redirect_uri") String redirectUri, @RequestParam(value = "auto_approve") Boolean autoApprove, @RequestParam(value = "state", required = false) String state) { @SuppressWarnings("unchecked") Map<String, Boolean> scopes = JsonUtils.parseObject(scope, Map.class); scopes = ObjectUtil.defaultIfNull(scopes, Collections.emptyMap()); // 0. 校验用户已经登录。通过 Spring Security 实现 // 1.1 校验 responseType 是否满足 code 或者 token 值 OAuth2GrantTypeEnum grantTypeEnum = getGrantTypeEnum(responseType); // 1.2 校验 redirectUri 重定向域名是否合法 + 校验 scope 是否在 Client 授权范围内 OAuth2ClientDO client = oauth2ClientService.validOAuthClientFromCache(clientId, null, grantTypeEnum.getGrantType(), scopes.keySet(), redirectUri); // 2.1 假设 approved 为 null,说明是场景一 if (Boolean.TRUE.equals(autoApprove)) { // 如果无法自动授权通过,则返回空 url,前端不进行跳转 if (!oauth2ApproveService.checkForPreApproval(getLoginUserId(), getUserType(), clientId, scopes.keySet())) { return success(null); } } else { // 2.2 假设 approved 非 null,说明是场景二 // 如果计算后不通过,则跳转一个错误链接 if (!oauth2ApproveService.updateAfterApproval(getLoginUserId(), getUserType(), clientId, scopes)) { return success(OAuth2Utils.buildUnsuccessfulRedirect(redirectUri, responseType, state, "access_denied", "User denied access")); } } // 3.1 如果是 code 授权码模式,则发放 code 授权码,并重定向 List<String> approveScopes = convertList(scopes.entrySet(), Map.Entry::getKey, Map.Entry::getValue); if (grantTypeEnum == OAuth2GrantTypeEnum.AUTHORIZATION_CODE) { return success(getAuthorizationCodeRedirect(getLoginUserId(), client, approveScopes, redirectUri, state)); } // 3.2 如果是 token 则是 implicit 简化模式,则发送 accessToken 访问令牌,并重定向 return success(getImplicitGrantRedirect(getLoginUserId(), client, approveScopes, redirectUri, state)); }
@Test // autoApprove = true,通过 + token public void testApproveOrDeny_autoApproveWithToken() { // 准备参数 String responseType = "token"; String clientId = randomString(); String scope = "{\"read\": true, \"write\": false}"; String redirectUri = "https://www.iocoder.cn"; String state = "test"; // mock 方法(client) OAuth2ClientDO client = randomPojo(OAuth2ClientDO.class).setClientId(clientId).setAdditionalInformation(null); when(oauth2ClientService.validOAuthClientFromCache(eq(clientId), isNull(), eq("implicit"), eq(asSet("read", "write")), eq(redirectUri))).thenReturn(client); // mock 方法(场景一) when(oauth2ApproveService.checkForPreApproval(isNull(), eq(UserTypeEnum.ADMIN.getValue()), eq(clientId), eq(SetUtils.asSet("read", "write")))).thenReturn(true); // mock 方法(访问令牌) OAuth2AccessTokenDO accessTokenDO = randomPojo(OAuth2AccessTokenDO.class) .setAccessToken("test_access_token").setExpiresTime(LocalDateTimeUtil.offset(LocalDateTime.now(), 30010L, ChronoUnit.MILLIS)); when(oauth2GrantService.grantImplicit(isNull(), eq(UserTypeEnum.ADMIN.getValue()), eq(clientId), eq(ListUtil.toList("read")))).thenReturn(accessTokenDO); // 调用 CommonResult<String> result = oauth2OpenController.approveOrDeny(responseType, clientId, scope, redirectUri, true, state); // 断言 assertEquals(0, result.getCode()); assertThat(result.getData(), anyOf( // 29 和 30 都有一定概率,主要是时间计算 is("https://www.iocoder.cn#access_token=test_access_token&token_type=bearer&state=test&expires_in=29&scope=read"), is("https://www.iocoder.cn#access_token=test_access_token&token_type=bearer&state=test&expires_in=30&scope=read") )); }
@Override public <T> void delete(T attachedObject) { addExpireListener(commandExecutor); Set<String> deleted = new HashSet<String>(); delete(attachedObject, deleted); }
@Test public void testDelete() { Customer customer = new Customer("12"); Order order = new Order(customer); order = redisson.getLiveObjectService().persist(order); assertThat(redisson.getKeys().count()).isEqualTo(3); Customer persistedCustomer = order.getCustomer(); redisson.getLiveObjectService().delete(order); assertThat(redisson.getKeys().count()).isEqualTo(2); redisson.getLiveObjectService().delete(persistedCustomer); assertThat(redisson.getKeys().count()).isEqualTo(1); }
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) { return ConfigInstanceUtil.getNewInstance(clazz, configId, this); }
@Test public void non_existent_array_of_struct_in_payload_is_ignored() { Slime slime = new Slime(); Cursor array = slime.setObject().setArray("non_existent_arr"); array.addObject().setString("name", "val"); StructtypesConfig config = new ConfigPayload(slime).toInstance(StructtypesConfig.class, ""); assertNotNull(config); }
public static <T> Inner<T> create() { return new Inner<>(); }
@Test @Category(NeedsRunner.class) public void renameNestedInMapFields() { Schema nestedSchema = Schema.builder().addStringField("field1").addInt32Field("field2").build(); Schema schema = Schema.builder() .addMapField("map", Schema.FieldType.STRING, Schema.FieldType.row(nestedSchema)) .build(); PCollection<Row> renamed = pipeline .apply( Create.of( Row.withSchema(schema) .addValue( ImmutableMap.of( "k1", Row.withSchema(nestedSchema).addValues("one", 1).build())) .build(), Row.withSchema(schema) .addValue( ImmutableMap.of( "k2", Row.withSchema(nestedSchema).addValues("two", 1).build())) .build()) .withRowSchema(schema)) .apply( RenameFields.<Row>create() .rename("map.field1", "new1") .rename("map.field2", "new2")); Schema expectedNestedSchema = Schema.builder().addStringField("new1").addInt32Field("new2").build(); Schema expectedSchema = Schema.builder() .addMapField("map", Schema.FieldType.STRING, Schema.FieldType.row(expectedNestedSchema)) .build(); assertEquals(expectedSchema, renamed.getSchema()); List<Row> expectedRows = ImmutableList.of( Row.withSchema(expectedSchema) .addValue( ImmutableMap.of( "k1", Row.withSchema(expectedNestedSchema).addValues("one", 1).build())) .build(), Row.withSchema(expectedSchema) .addValue( ImmutableMap.of( "k2", Row.withSchema(expectedNestedSchema).addValues("two", 1).build())) .build()); PAssert.that(renamed).containsInAnyOrder(expectedRows); pipeline.run(); }
public static NotificationDispatcherMetadata newMetadata() { return METADATA; }
@Test public void fpOrWontFixIssues_notification_is_enable_at_project_level() { NotificationDispatcherMetadata metadata = FPOrAcceptedNotificationHandler.newMetadata(); assertThat(metadata.getProperty(PER_PROJECT_NOTIFICATION)).isEqualTo("true"); }
public static SerializableFunction<Row, Mutation> beamRowToMutationFn( Mutation.Op operation, String table) { return (row -> { switch (operation) { case INSERT: return MutationUtils.createMutationFromBeamRows(Mutation.newInsertBuilder(table), row); case DELETE: return Mutation.delete(table, MutationUtils.createKeyFromBeamRow(row)); case UPDATE: return MutationUtils.createMutationFromBeamRows(Mutation.newUpdateBuilder(table), row); case REPLACE: return MutationUtils.createMutationFromBeamRows(Mutation.newReplaceBuilder(table), row); case INSERT_OR_UPDATE: return MutationUtils.createMutationFromBeamRows( Mutation.newInsertOrUpdateBuilder(table), row); default: throw new IllegalArgumentException( String.format("Unknown mutation operation type: %s", operation)); } }); }
@Test public void testCreateInsertMutationFromRowWithNulls() { Mutation expectedMutation = createMutationNulls(Mutation.Op.INSERT); Mutation mutation = beamRowToMutationFn(Mutation.Op.INSERT, TABLE).apply(WRITE_ROW_NULLS); assertEquals(expectedMutation, mutation); }
@Override public boolean match(Message msg, StreamRule rule) { Double msgVal = getDouble(msg.getField(rule.getField())); if (msgVal == null) { return false; } Double ruleVal = getDouble(rule.getValue()); if (ruleVal == null) { return false; } return rule.getInverted() ^ (msgVal < ruleVal); }
@Test public void testMissedInvertedMatchWithMissingField() { StreamRule rule = getSampleRule(); rule.setValue("23"); rule.setInverted(true); Message msg = getSampleMessage(); msg.addField("someother", "42"); StreamRuleMatcher matcher = getMatcher(rule); assertFalse(matcher.match(msg, rule)); }
void checkPerm(PlainAccessResource needCheckedAccess, PlainAccessResource ownedAccess) { permissionChecker.check(needCheckedAccess, ownedAccess); }
@Test(expected = AclException.class) public void checkErrorPermDefaultValueNotMatch() { plainAccessResource = new PlainAccessResource(); plainAccessResource.addResourceAndPerm("topicF", Permission.PUB); plainPermissionManager.checkPerm(plainAccessResource, subPlainAccessResource); }
public String createULID(Message message) { checkTimestamp(message.getTimestamp().getMillis()); try { return createULID(message.getTimestamp().getMillis(), message.getSequenceNr()); } catch (Exception e) { LOG.error("Exception while creating ULID.", e); return ulid.nextULID(message.getTimestamp().getMillis()); } }
@Test public void uintMaxGenerate() { final MessageULIDGenerator generator = new MessageULIDGenerator(new ULID()); final long ts = Tools.nowUTC().getMillis(); final int uIntMaxValue = ~0; ULID.Value parsedULID = ULID.parseULID(generator.createULID(ts, uIntMaxValue)); assertThat(extractSequenceNr(parsedULID)).isEqualTo(uIntMaxValue); }
public String format(Date then) { if (then == null) then = now(); Duration d = approximateDuration(then); return format(d); }
@Test public void testHoursAgoDefaultReference() throws Exception { PrettyTime t = new PrettyTime(); Assert.assertEquals("3 hours ago", t.format(now.minusHours(3))); }
@Override public Object getValue(final int columnIndex, final Class<?> type) throws SQLException { if (boolean.class == type) { return resultSet.getBoolean(columnIndex); } if (byte.class == type) { return resultSet.getByte(columnIndex); } if (short.class == type) { return resultSet.getShort(columnIndex); } if (int.class == type) { return resultSet.getInt(columnIndex); } if (long.class == type) { return resultSet.getLong(columnIndex); } if (float.class == type) { return resultSet.getFloat(columnIndex); } if (double.class == type) { return resultSet.getDouble(columnIndex); } if (String.class == type) { return resultSet.getString(columnIndex); } if (BigDecimal.class == type) { return resultSet.getBigDecimal(columnIndex); } if (byte[].class == type) { return resultSet.getBytes(columnIndex); } if (Date.class == type) { return resultSet.getDate(columnIndex); } if (Time.class == type) { return resultSet.getTime(columnIndex); } if (Timestamp.class == type) { return resultSet.getTimestamp(columnIndex); } if (Blob.class == type) { return resultSet.getBlob(columnIndex); } if (Clob.class == type) { return resultSet.getClob(columnIndex); } if (Array.class == type) { return resultSet.getArray(columnIndex); } return resultSet.getObject(columnIndex); }
@Test void assertGetValueByTimestamp() throws SQLException { ResultSet resultSet = mock(ResultSet.class); when(resultSet.getTimestamp(1)).thenReturn(new Timestamp(0L)); assertThat(new JDBCStreamQueryResult(resultSet).getValue(1, Timestamp.class), is(new Timestamp(0L))); }
public static <T> void chainFuture( CompletableFuture<? extends T> sourceFuture, CompletableFuture<T> destinationFuture ) { sourceFuture.whenComplete((BiConsumer<T, Throwable>) (val, throwable) -> { if (throwable != null) { destinationFuture.completeExceptionally(throwable); } else { destinationFuture.complete(val); } }); }
@Test public void testChainFutureExceptionally() { CompletableFuture<Integer> sourceFuture = new CompletableFuture<>(); CompletableFuture<Number> destinationFuture = new CompletableFuture<>(); FutureUtils.chainFuture(sourceFuture, destinationFuture); sourceFuture.completeExceptionally(new RuntimeException("source failed")); Throwable cause = assertThrows(ExecutionException.class, () -> destinationFuture.get()).getCause(); assertEquals(RuntimeException.class, cause.getClass()); assertEquals("source failed", cause.getMessage()); }
public static Map<List<RowExpression>, Boolean> getExpressionsPartitionedByCSE(Collection<? extends RowExpression> expressions, int expressionGroupSize) { if (expressions.isEmpty()) { return ImmutableMap.of(); } CommonSubExpressionCollector expressionCollector = new CommonSubExpressionCollector(); expressions.forEach(expression -> expression.accept(expressionCollector, null)); Set<RowExpression> cse = expressionCollector.cseByLevel.values().stream().flatMap(Set::stream).collect(toImmutableSet()); if (cse.isEmpty()) { return expressions.stream().collect(toImmutableMap(ImmutableList::of, m -> false)); } ImmutableMap.Builder<List<RowExpression>, Boolean> expressionsPartitionedByCse = ImmutableMap.builder(); SubExpressionChecker subExpressionChecker = new SubExpressionChecker(cse); Map<Boolean, List<RowExpression>> expressionsWithCseFlag = expressions.stream().collect(Collectors.partitioningBy(expression -> expression.accept(subExpressionChecker, null))); expressionsWithCseFlag.get(false).forEach(expression -> expressionsPartitionedByCse.put(ImmutableList.of(expression), false)); List<RowExpression> expressionsWithCse = expressionsWithCseFlag.get(true); if (expressionsWithCse.size() == 1) { RowExpression expression = expressionsWithCse.get(0); expressionsPartitionedByCse.put(ImmutableList.of(expression), true); return expressionsPartitionedByCse.build(); } List<Set<RowExpression>> cseDependency = expressionsWithCse.stream() .map(expression -> subExpressions(expression).stream() .filter(cse::contains) .collect(toImmutableSet())) .collect(toImmutableList()); boolean[] merged = new boolean[expressionsWithCse.size()]; int i = 0; while (i < merged.length) { while (i < merged.length && merged[i]) { i++; } if (i >= merged.length) { break; } merged[i] = true; List<RowExpression> newList = new ArrayList<>(); newList.add(expressionsWithCse.get(i)); Set<RowExpression> dependencies = new HashSet<>(); Set<RowExpression> first = cseDependency.get(i); dependencies.addAll(first); int j = i + 1; while (j < merged.length && newList.size() < expressionGroupSize) { while (j < merged.length && merged[j]) { j++; } if (j >= merged.length) { break; } Set<RowExpression> second = cseDependency.get(j); if (!Sets.intersection(dependencies, second).isEmpty()) { RowExpression expression = expressionsWithCse.get(j); newList.add(expression); dependencies.addAll(second); merged[j] = true; j = i + 1; } else { j++; } } expressionsPartitionedByCse.put(ImmutableList.copyOf(newList), true); } return expressionsPartitionedByCse.build(); }
@Test void testGetExpressionsWithCSE() { List<RowExpression> expressions = ImmutableList.of(rowExpression("x + y"), rowExpression("(x + y) * 2"), rowExpression("x + 2"), rowExpression("y * (x + 2)"), rowExpression("x * y")); Map<List<RowExpression>, Boolean> expressionsWithCSE = getExpressionsPartitionedByCSE(expressions, 3); assertEquals( expressionsWithCSE, ImmutableMap.of( ImmutableList.of(rowExpression("x + y"), rowExpression("(x + y) * 2")), true, ImmutableList.of(rowExpression("x + 2"), rowExpression("y * (x + 2)")), true, ImmutableList.of(rowExpression("x * y")), false)); expressions = ImmutableList.of(rowExpression("x + y"), rowExpression("x * 2"), rowExpression("x + y + x * 2"), rowExpression("y * 2"), rowExpression("x + y * 2")); expressionsWithCSE = getExpressionsPartitionedByCSE(expressions, 3); assertEquals( expressionsWithCSE, ImmutableMap.of( ImmutableList.of(rowExpression("x + y"), rowExpression("x + y + x * 2"), rowExpression("x * 2")), true, ImmutableList.of(rowExpression("y * 2"), rowExpression("x + y * 2")), true)); expressionsWithCSE = getExpressionsPartitionedByCSE(expressions, 2); assertEquals( expressionsWithCSE, ImmutableMap.of( ImmutableList.of(rowExpression("x + y"), rowExpression("x + y + x * 2")), true, ImmutableList.of(rowExpression("y * 2"), rowExpression("x + y * 2")), true, ImmutableList.of(rowExpression("x * 2")), true)); }
static void validateCertificate(String clusterName, String clientId, X509Certificate cert, BiConsumer<String, Throwable> reporter, DeployState state) { try { var extensions = TBSCertificate.getInstance(cert.getTBSCertificate()).getExtensions(); if (extensions == null) return; // Certificate without any extensions is okay if (extensions.getExtensionOIDs().length == 0) { /* BouncyCastle 1.77 and 1.78 did not accept certificates having an empty sequence of extensions. Earlier releases violated the ASN.1 specification as the specification forbids empty extension sequence. See https://github.com/bcgit/bc-java/issues/1479. The restriction was lifted on 1.78.1 although it's a reasonble to warn users still. */ var message = "The certificate's ASN.1 structure contains an empty sequence of extensions, " + "which is a violation of the ASN.1 specification. " + "Please update the application package with a new certificate, " + "e.g by generating a new one using the Vespa CLI `$ vespa auth cert`. "; state.getDeployLogger() .log(Level.INFO, errorMessage(clusterName, clientId, message)); } } catch (CertificateEncodingException e) { reporter.accept(errorMessage(clusterName, clientId, e.getMessage()), e); } }
@Test void logs_deployment_warning_on_certificate_with_empty_sequence_of_extensions() { var logger = new DeployLoggerStub(); var state = new DeployState.Builder().deployLogger(logger).build(); var cert = readTestCertificate("cert-with-empty-sequence-of-extensions.pem"); CloudClientsValidator.validateCertificate("default", "my-feed-client", cert, (msg, cause) -> { throw new IllegalArgumentException(msg, cause); }, state); var expected = "Client **my-feed-client** defined for cluster **default** contains an invalid certificate: " + "The certificate's ASN.1 structure contains an empty sequence of extensions, " + "which is a violation of the ASN.1 specification. " + "Please update the application package with a new certificate, " + "e.g by generating a new one using the Vespa CLI `$ vespa auth cert`. "; assertEquals(expected, logger.getLast().message); }
@Override public Messages process(Messages messages) { for (final MessageFilter filter : filterRegistry) { for (Message msg : messages) { final String timerName = name(filter.getClass(), "executionTime"); final Timer timer = metricRegistry.timer(timerName); final Timer.Context timerContext = timer.time(); try { LOG.trace("Applying filter [{}] on message <{}>.", filter.getName(), msg.getId()); if (filter.filter(msg)) { LOG.debug("Filter [{}] marked message <{}> to be discarded. Dropping message.", filter.getName(), msg.getId()); msg.setFilterOut(true); filteredOutMessages.mark(); messageQueueAcknowledger.acknowledge(msg); } } catch (Exception e) { final String shortError = String.format(Locale.US, "Could not apply filter [%s] on message <%s>", filter.getName(), msg.getId()); if (LOG.isDebugEnabled()) { LOG.error("{}:", shortError, e); } else { LOG.error("{}:\n{}", shortError, ExceptionUtils.getShortenedStackTrace(e)); } msg.addProcessingError(new Message.ProcessingError(ProcessingFailureCause.MessageFilterException, shortError, ExceptionUtils.getRootCauseMessage(e))); } finally { final long elapsedNanos = timerContext.stop(); msg.recordTiming(serverStatus, timerName, elapsedNanos); } } } return messages; }
@Test public void testHandleMessage() { MessageFilter filterOnlyFirst = new MessageFilter() { private boolean filterOut = true; @Override public boolean filter(Message msg) { if (filterOut) { msg.setFilterOut(true); filterOut = false; return true; } return false; } @Override public String getName() { return "first filtered out, subsequent pass"; } @Override public int getPriority() { return 0; } }; final MessageFilterChainProcessor filterTest = new MessageFilterChainProcessor(new MetricRegistry(), Collections.singleton(filterOnlyFirst), acknowledger, serverStatus); Message filteredoutMessage = messageFactory.createMessage("filtered out", "source", Tools.nowUTC()); filteredoutMessage.setJournalOffset(1); Message unfilteredMessage = messageFactory.createMessage("filtered out", "source", Tools.nowUTC()); final Messages messages1 = filterTest.process(filteredoutMessage); final Messages messages2 = filterTest.process(unfilteredMessage); Assert.assertTrue(filteredoutMessage.getFilterOut()); Assert.assertFalse(unfilteredMessage.getFilterOut()); Assert.assertEquals(0, Iterables.size(messages1)); Assert.assertEquals(1, Iterables.size(messages2)); }
public QueryConfiguration applyOverrides(QueryConfigurationOverrides overrides) { Map<String, String> sessionProperties; if (overrides.getSessionPropertiesOverrideStrategy() == OVERRIDE) { sessionProperties = new HashMap<>(overrides.getSessionPropertiesOverride()); } else { sessionProperties = new HashMap<>(this.sessionProperties); if (overrides.getSessionPropertiesOverrideStrategy() == SUBSTITUTE) { sessionProperties.putAll(overrides.getSessionPropertiesOverride()); } } overrides.getSessionPropertiesToRemove().forEach(sessionProperties::remove); return new QueryConfiguration( overrides.getCatalogOverride().orElse(catalog), overrides.getSchemaOverride().orElse(schema), Optional.ofNullable(overrides.getUsernameOverride().orElse(username.orElse(null))), Optional.ofNullable(overrides.getPasswordOverride().orElse(password.orElse(null))), Optional.of(sessionProperties), isReusableTable, Optional.of(partitions)); }
@Test public void testSessionPropertyRemoval() { overrides.setSessionPropertiesToRemove("property_2"); overrides.setSessionPropertiesOverrideStrategy(NO_ACTION); QueryConfiguration removed = new QueryConfiguration( CATALOG_OVERRIDE, SCHEMA_OVERRIDE, Optional.of(USERNAME_OVERRIDE), Optional.of(PASSWORD_OVERRIDE), Optional.of(ImmutableMap.of("property_1", "value_1")), Optional.of(CLIENT_TAGS), Optional.empty()); assertEquals(CONFIGURATION_1.applyOverrides(overrides), removed); }
@Override public boolean isActive() { return isActive; }
@Test(timeOut = 30000) public void testSubscribeTimeout() throws Exception { resetChannel(); setChannelConnected(); // Delay the topic creation in a deterministic way CompletableFuture<Runnable> openTopicTask = new CompletableFuture<>(); doAnswer(invocationOnMock -> { openTopicTask.complete( () -> ((OpenLedgerCallback) invocationOnMock.getArguments()[2]).openLedgerComplete(ledgerMock, null)); return null; }).when(pulsarTestContext.getManagedLedgerFactory()) .asyncOpen(matches(".*success.*"), any(ManagedLedgerConfig.class), any(OpenLedgerCallback.class), any(Supplier.class), any()); // In a subscribe timeout from client side we expect to see this sequence of commands : // 1. Subscribe // 2. close consumer (when the timeout is triggered, which may be before the consumer was created on the broker) // 3. Subscribe (triggered by reconnection logic) // These operations need to be serialized, to allow the last subscribe operation to finally succeed // (There can be more subscribe/close pairs in the sequence, depending on the client timeout ByteBuf subscribe1 = Commands.newSubscribe(successTopicName, // successSubName, 1 /* consumer id */, 1 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(subscribe1); ByteBuf subscribe2 = Commands.newSubscribe(successTopicName, // successSubName, 1 /* consumer id */, 3 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(subscribe2); ByteBuf subscribe3 = Commands.newSubscribe(successTopicName, // successSubName, 1 /* consumer id */, 4 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(subscribe3); ByteBuf subscribe4 = Commands.newSubscribe(successTopicName, // successSubName, 1 /* consumer id */, 5 /* request id */, SubType.Exclusive, 0, "test" /* consumer name */, 0 /* avoid reseting cursor */); channel.writeInbound(subscribe4); openTopicTask.get().run(); Object response; synchronized (this) { // All other subscribe should fail response = getResponse(); assertEquals(response.getClass(), CommandError.class); assertEquals(((CommandError) response).getRequestId(), 3); response = getResponse(); assertEquals(response.getClass(), CommandError.class); assertEquals(((CommandError) response).getRequestId(), 4); response = getResponse(); assertEquals(response.getClass(), CommandError.class); assertEquals(((CommandError) response).getRequestId(), 5); // We should receive response for 1st producer, since it was not cancelled by the close Awaitility.await().untilAsserted(() -> assertFalse(channel.outboundMessages().isEmpty())); assertTrue(channel.isActive()); response = getResponse(); assertEquals(response.getClass(), CommandSuccess.class); assertEquals(((CommandSuccess) response).getRequestId(), 1); } channel.finish(); }
@Deprecated @Override public Boolean hasAppendsOnly(org.apache.hadoop.hive.ql.metadata.Table hmsTable, SnapshotContext since) { TableDesc tableDesc = Utilities.getTableDesc(hmsTable); Table table = IcebergTableUtil.getTable(conf, tableDesc.getProperties()); return hasAppendsOnly(table.snapshots(), since); }
@Test public void testHasAppendsOnlyFalseWhenGivenSnapShotIsNullButHasNonAppend() { HiveIcebergStorageHandler storageHandler = new HiveIcebergStorageHandler(); Boolean result = storageHandler.hasAppendsOnly(asList(appendSnapshot, deleteSnapshot), null); assertThat(result, is(false)); }
public void updateEventQueueProcessingTime(long durationMs) { eventQueueProcessingTimeUpdater.accept(durationMs); }
@Test public void testUpdateEventQueueProcessingTime() { MetricsRegistry registry = new MetricsRegistry(); MockTime time = new MockTime(); try (QuorumControllerMetrics metrics = new QuorumControllerMetrics(Optional.of(registry), time, false)) { metrics.updateEventQueueProcessingTime(1000); assertMetricHistogram(registry, metricName("ControllerEventManager", "EventQueueProcessingTimeMs"), 1, 1000); } finally { registry.shutdown(); } }
public void shutdown(final Duration duration) { for (final TaskExecutor t: taskExecutors) { t.requestShutdown(); } signalTaskExecutors(); for (final TaskExecutor t: taskExecutors) { t.awaitShutdown(duration); } }
@Test public void shouldShutdownTaskExecutors() { final Duration duration = mock(Duration.class); taskManager.shutdown(duration); verify(taskExecutor).requestShutdown(); verify(taskExecutor).awaitShutdown(duration); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeGeneratedColumns() { Schema sourceSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByExpression("two", "one + 1") .build(); List<SqlNode> derivedColumns = Arrays.asList( regularColumn("three", DataTypes.INT()), computedColumn("four", plus("one", "3"))); Schema mergedSchema = util.mergeTables( getDefaultMergingStrategies(), sourceSchema, derivedColumns, Collections.emptyList(), null); Schema expectedSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByExpression("two", "one + 1") .column("three", DataTypes.INT()) .columnByExpression("four", "`one` + 3") .build(); assertThat(mergedSchema).isEqualTo(expectedSchema); }
public synchronized ListenableFuture<?> waitForMinimumCoordinatorSidecars() { if (currentCoordinatorSidecarCount == 1 || !isCoordinatorSidecarEnabled) { return immediateFuture(null); } SettableFuture<?> future = SettableFuture.create(); coordinatorSidecarSizeFutures.add(future); // if future does not finish in wait period, complete with an exception ScheduledFuture<?> timeoutTask = executor.schedule( () -> { synchronized (this) { future.setException(new PrestoException( NO_CPP_SIDECARS, format("Insufficient active coordinator sidecar nodes. Waited %s for at least 1 coordinator sidecars, but only 0 coordinator sidecars are active", coordinatorSidecarMaxWait))); } }, coordinatorSidecarMaxWait.toMillis(), MILLISECONDS); // remove future if finished (e.g., canceled, timed out) future.addListener(() -> { timeoutTask.cancel(true); removeCoordinatorSidecarFuture(future); }, executor); return future; }
@Test(timeOut = 60_000) public void testWaitForMinimumCoordinatorSidecars() throws InterruptedException { ListenableFuture<?> coordinatorSidecarsFuture = waitForMinimumCoordinatorSidecars(); assertFalse(monitor.hasRequiredCoordinatorSidecars()); assertFalse(coordinatorSidecarsTimeout.get()); assertEquals(minCoordinatorSidecarsLatch.getCount(), 1); addCoordinatorSidecar(nodeManager); minCoordinatorSidecarsLatch.await(1, SECONDS); assertTrue(coordinatorSidecarsFuture.isDone()); assertFalse(coordinatorSidecarsTimeout.get()); assertTrue(monitor.hasRequiredCoordinatorSidecars()); }
public int separate(File starTreeOutputDir, StarTreeV2BuilderConfig builderConfig) throws IOException { int treeIndex = _builderConfigList.indexOf(builderConfig); if (treeIndex == -1) { LOGGER.info("No existing star-tree found for config: {}", builderConfig); return -1; } LOGGER.info("Separating star-tree for config: {}", builderConfig); separate(starTreeOutputDir, treeIndex); return _numDocsList.get(treeIndex); }
@Test public void testSeparate() throws Exception { URL segmentUrl = getClass().getClassLoader().getResource(SEGMENT_PATH); assertNotNull(segmentUrl); File segmentDir = new File(segmentUrl.getFile()); try (StarTreeIndexSeparator separator = new StarTreeIndexSeparator( new File(segmentDir, StarTreeV2Constants.INDEX_MAP_FILE_NAME), new File(segmentDir, StarTreeV2Constants.INDEX_FILE_NAME), new SegmentMetadataImpl(segmentDir).getStarTreeV2MetadataList())) { separator.separate(TEMP_DIR, BUILDER_CONFIG); } String[] fileNames = TEMP_DIR.list(); assertNotNull(fileNames); Set<String> fileNameSet = new HashSet<>(Arrays.asList(fileNames)); assertTrue(fileNameSet.contains(STAR_TREE_INDEX_FILE_NAME)); BUILDER_CONFIG.getDimensionsSplitOrder() .forEach(dimension -> assertTrue(fileNameSet.contains(dimension + UNSORTED_SV_FORWARD_INDEX_FILE_EXTENSION))); BUILDER_CONFIG.getFunctionColumnPairs() .forEach(dimension -> assertTrue(fileNameSet.contains(dimension + RAW_SV_FORWARD_INDEX_FILE_EXTENSION))); }
public String filterNamespaceName(String namespaceName) { if (namespaceName.toLowerCase().endsWith(".properties")) { int dotIndex = namespaceName.lastIndexOf("."); return namespaceName.substring(0, dotIndex); } return namespaceName; }
@Test public void testFilterNamespaceName() throws Exception { String someName = "a.properties"; assertEquals("a", namespaceUtil.filterNamespaceName(someName)); }
public boolean shouldExecute(DefaultPostJobDescriptor descriptor) { if (!settingsCondition(descriptor)) { LOG.debug("'{}' skipped because one of the required properties is missing", descriptor.name()); return false; } return true; }
@Test public void should_run_analyzer_with_no_metadata() { DefaultPostJobDescriptor descriptor = new DefaultPostJobDescriptor(); optimizer = new PostJobOptimizer(settings.asConfig()); assertThat(optimizer.shouldExecute(descriptor)).isTrue(); }
public static void main(String[] args) throws Exception { try { final CommandLineParser parser = new GnuParser(); CommandLine cl = parser.parse(OPTIONS, args); if (cl.hasOption('h')) { help(); System.exit(0); } String sourceFormat = cl.getOptionValue('s', SchemaParser.FILETYPE).trim(); String destFormat = cl.getOptionValue('d', PdlSchemaParser.FILETYPE).trim(); boolean keepOriginal = cl.hasOption('o'); String preserveSourceCmd = cl.getOptionValue('p'); boolean skipVerification = cl.hasOption('k'); boolean forcePdscFullyQualifiedNames = cl.hasOption('q'); String[] cliArgs = cl.getArgs(); if (cliArgs.length != 3) { LOGGER.error("Missing arguments, expected 3 ([resolverPath] [sourceRoot] [destinationPath]), got " + cliArgs.length); help(); System.exit(1); } int i = 0; String resolverPaths = RestLiToolsUtils.readArgFromFileIfNeeded(cliArgs[i++]); String sourcePath = cliArgs[i++]; String destPath = cliArgs[i++]; File sourceDir = new File(sourcePath); File destDir = new File(destPath); if (!sourceDir.exists() || !sourceDir.canRead()) { LOGGER.error("Source directory does not exist or cannot be read: " + sourceDir.getAbsolutePath()); System.exit(1); } destDir.mkdirs(); if (!destDir.exists() || !destDir.canWrite()) { LOGGER.error("Destination directory does not exist or cannot be written to: " + destDir.getAbsolutePath()); System.exit(1); } SchemaFormatTranslator translator = new SchemaFormatTranslator( resolverPaths, sourceDir, destDir, sourceFormat, destFormat, keepOriginal, preserveSourceCmd, skipVerification, forcePdscFullyQualifiedNames); translator.translateFiles(); } catch (ParseException e) { LOGGER.error("Invalid arguments: " + e.getMessage()); help(); System.exit(1); } }
@Test(dataProvider = "fullClassName") public void testTranslatePdscToPdl(String packageName, String className) throws Exception { String temp = Files.createTempDirectory("restli").toFile().getAbsolutePath(); SchemaFormatTranslator.main(new String[]{"-o", RESOLVER_DIR, SOURCE_ROOT, temp}); MultiFormatDataSchemaResolver sourceResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(RESOLVER_DIR); MultiFormatDataSchemaResolver translatedResolver = MultiFormatDataSchemaResolver.withBuiltinFormats(temp + File.pathSeparator + EXTERNAL_RESOURCES); assertSameSchemas(packageName + "." + className, sourceResolver, translatedResolver); }
public static boolean isLmq(String lmqMetaData) { return lmqMetaData != null && lmqMetaData.startsWith(LMQ_PREFIX); }
@Test public void testIsLmq() { String testLmq = null; assertThat(MixAll.isLmq(testLmq)).isFalse(); testLmq = "lmq"; assertThat(MixAll.isLmq(testLmq)).isFalse(); testLmq = "%LMQ%queue123"; assertThat(MixAll.isLmq(testLmq)).isTrue(); testLmq = "%LMQ%GID_TEST"; assertThat(MixAll.isLmq(testLmq)).isTrue(); }
public ShareFetchContext newContext(String groupId, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData, List<TopicIdPartition> toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchDataWithMaxBytes = new HashMap<>(); shareFetchData.forEach((tp, sharePartitionData) -> { if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { // If the epoch is FINAL_EPOCH, don't try to create a new session. if (!shareFetchDataWithMaxBytes.isEmpty()) { throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } else { log.debug("Removed share session with key " + key); } context = new FinalContext(); } else { if (isAcknowledgeDataPresent) { log.error("Acknowledge data present in Initial Fetch Request for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) != null) { log.debug("Removed share session with key {}", key); } ImplicitLinkedHashCollection<CachedSharePartition> cachedSharePartitions = new ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } context = new ShareSessionContext(reqMetadata, shareFetchDataWithMaxBytes); log.debug("Created a new ShareSessionContext with key {} isSubsequent {} returning {}. A new share " + "session will be started.", responseShareSessionKey, false, partitionsToLogString(shareFetchDataWithMaxBytes.keySet())); } } else { // We update the already existing share session. synchronized (cache) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); ShareSession shareSession = cache.get(key); if (shareSession == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } if (shareSession.epoch != reqMetadata.epoch()) { log.debug("Share session error for {}: expected epoch {}, but got {} instead", key, shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } Map<ShareSession.ModifiedTopicIdPartitionType, List<TopicIdPartition>> modifiedTopicIdPartitions = shareSession.update( shareFetchDataWithMaxBytes, toForget); cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, partitionsToLogString(modifiedTopicIdPartitions.get( ShareSession.ModifiedTopicIdPartitionType.ADDED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.UPDATED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.REMOVED)) ); context = new ShareSessionContext(reqMetadata, shareSession); } } return context; }
@Test public void testNewContext() { Time time = new MockTime(); ShareSessionCache cache = new ShareSessionCache(10, 1000); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache).withTime(time).build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); topicNames.put(tpId0, "foo"); topicNames.put(tpId1, "bar"); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(tpId1, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(tpId1, new TopicPartition("bar", 1)); String groupId = "grp"; // Create a new share session with an initial share fetch request Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData2 = new LinkedHashMap<>(); reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); assertEquals(ShareSessionContext.class, context2.getClass()); assertFalse(((ShareSessionContext) context2).isSubsequent()); ((ShareSessionContext) context2).shareFetchData().forEach((topicIdPartition, sharePartitionData) -> { assertTrue(reqData2.containsKey(topicIdPartition)); assertEquals(reqData2.get(topicIdPartition), sharePartitionData); }); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData2.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(respData2, resp2.responseData(topicNames)); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, reqMetadata2.memberId()); // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test trying to create a new session with a non-existent session key Uuid memberId4 = Uuid.randomUuid(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertEquals(ShareSessionContext.class, context5.getClass()); assertTrue(((ShareSessionContext) context5).isSubsequent()); ShareSessionContext shareSessionContext5 = (ShareSessionContext) context5; synchronized (shareSessionContext5.session()) { shareSessionContext5.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); ShareFetchRequest.SharePartitionData data = cachedSharePartition.reqData(); assertTrue(reqData2.containsKey(topicIdPartition)); assertEquals(reqData2.get(topicIdPartition), data); }); } ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); assertEquals(Errors.NONE, resp5.error()); assertEquals(0, resp5.responseData(topicNames).size()); // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Close the subsequent share session. ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(0, cache.size()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData8 = new LinkedHashMap<>(); respData8.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData8.put(tp3, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); ShareFetchResponse resp8 = context8.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData8); assertEquals(Errors.NONE, resp8.error()); }
@Override public InterpreterResult interpret(String st, InterpreterContext context) { return helper.interpret(session, st, context); }
@Test void should_error_and_display_stack_trace() { // Given String query = "@consistency=THREE\n" + "SELECT * FROM zeppelin.users LIMIT 3;"; // When final InterpreterResult actual = interpreter.interpret(query, intrContext); // Then assertEquals(Code.ERROR, actual.code()); assertTrue( actual.message().get(0).getData().contains("All 1 node(s) tried for the query failed"), actual.message().get(0).getData()); }
public static NettySourceConfig load(Map<String, Object> map) throws IOException { ObjectMapper mapper = new ObjectMapper(); return mapper.readValue(mapper.writeValueAsString(map), NettySourceConfig.class); }
@Test public void testNettyTcpConfigLoadWithMap() throws IOException { Map<String, Object> map = new HashMap<>(); map.put("type", TCP); map.put("host", LOCALHOST); map.put("port", 10999); map.put("numberOfThreads", 1); NettySourceConfig nettySourceConfig = NettySourceConfig.load(map); assertNotNull(nettySourceConfig); assertEquals(TCP, nettySourceConfig.getType()); assertEquals(LOCALHOST, nettySourceConfig.getHost()); assertEquals(10999, nettySourceConfig.getPort()); assertEquals(1, nettySourceConfig.getNumberOfThreads()); }
public static long readUint32BE(ByteBuffer buf) throws BufferUnderflowException { return Integer.toUnsignedLong(buf.order(ByteOrder.BIG_ENDIAN).getInt()); }
@Test(expected = ArrayIndexOutOfBoundsException.class) public void testReadUint32BEThrowsException3() { ByteUtils.readUint32BE(new byte[]{1, 2, 3, 4, 5}, -1); }
@Override // add synchronized to avoid process 2 or more stmts at same time public synchronized ShowResultSet process(List<AlterClause> alterClauses, Database dummyDb, OlapTable dummyTbl) throws UserException { Preconditions.checkArgument(alterClauses.size() == 1); AlterClause alterClause = alterClauses.get(0); alterClause.accept(SystemHandler.Visitor.getInstance(), null); return null; }
@Test public void testDecommissionBackendsReplicasRequirement() throws UserException { List<String> hostAndPorts = Lists.newArrayList("host1:123"); DecommissionBackendClause decommissionBackendClause = new DecommissionBackendClause(hostAndPorts); Analyzer.analyze(new AlterSystemStmt(decommissionBackendClause), new ConnectContext()); expectedException.expect(RuntimeException.class); expectedException.expectMessage("It will cause insufficient BE number"); systemHandler.process(Lists.newArrayList(decommissionBackendClause), null, null); }
@Override @Nullable public int[] readIntArray(@Nonnull String fieldName) throws IOException { return readIncompatibleField(fieldName, INT_ARRAY, super::readIntArray); }
@Test(expected = IncompatibleClassChangeError.class) public void testReadIntArray_IncompatibleClass() throws Exception { reader.readIntArray("byte"); }
@Override public RefreshSuperUserGroupsConfigurationResponse refreshSuperUserGroupsConfiguration( RefreshSuperUserGroupsConfigurationRequest request) throws StandbyException, YarnException, IOException { // parameter verification. if (request == null) { routerMetrics.incrRefreshSuperUserGroupsConfigurationFailedRetrieved(); RouterServerUtil.logAndThrowException("Missing RefreshSuperUserGroupsConfiguration request.", null); } // call refreshSuperUserGroupsConfiguration of activeSubClusters. try { long startTime = clock.getTime(); RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod( new Class[] {RefreshSuperUserGroupsConfigurationRequest.class}, new Object[] {request}); String subClusterId = request.getSubClusterId(); Collection<RefreshSuperUserGroupsConfigurationResponse> refreshSuperUserGroupsConfResps = remoteMethod.invokeConcurrent(this, RefreshSuperUserGroupsConfigurationResponse.class, subClusterId); if (CollectionUtils.isNotEmpty(refreshSuperUserGroupsConfResps)) { long stopTime = clock.getTime(); routerMetrics.succeededRefreshSuperUserGroupsConfRetrieved(stopTime - startTime); return RefreshSuperUserGroupsConfigurationResponse.newInstance(); } } catch (YarnException e) { routerMetrics.incrRefreshSuperUserGroupsConfigurationFailedRetrieved(); RouterServerUtil.logAndThrowException(e, "Unable to refreshSuperUserGroupsConfiguration due to exception. " + e.getMessage()); } routerMetrics.incrRefreshSuperUserGroupsConfigurationFailedRetrieved(); throw new YarnException("Unable to refreshSuperUserGroupsConfiguration."); }
@Test public void testSC1RefreshSuperUserGroupsConfiguration() throws Exception { // case 1, test the existing subCluster (SC-1). String existSubCluster = "SC-1"; RefreshSuperUserGroupsConfigurationRequest request = RefreshSuperUserGroupsConfigurationRequest.newInstance(existSubCluster); RefreshSuperUserGroupsConfigurationResponse response = interceptor.refreshSuperUserGroupsConfiguration(request); assertNotNull(response); // case 2, test the non-exist subCluster. String notExistsSubCluster = "SC-NON"; RefreshSuperUserGroupsConfigurationRequest request1 = RefreshSuperUserGroupsConfigurationRequest.newInstance(notExistsSubCluster); LambdaTestUtils.intercept(Exception.class, "subClusterId = SC-NON is not an active subCluster.", () -> interceptor.refreshSuperUserGroupsConfiguration(request1)); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = trees.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = b; for (int i = 0; i < ntrees; i++) { base += shrinkage * trees[i].predict(xj); prediction[i][j] = base; } } return prediction; }
@Test public void testPuma8nhLAD() { test(Loss.lad(), "puma8nh", Puma8NH.formula, Puma8NH.data, 3.2486); }
@Override public synchronized List<PrivilegedOperation> postComplete( ContainerId containerId) throws ResourceHandlerException { gpuAllocator.unassignGpus(containerId); cGroupsHandler.deleteCGroup(CGroupsHandler.CGroupController.DEVICES, containerId.toString()); return null; }
@Test public void testAllocation() throws Exception { initializeGpus(); //Start container 1, asks 3 containers --> Only device=4 will be blocked. startContainerWithGpuRequests(1, 3); verifyDeniedDevices(getContainerId(1), Collections.singletonList(new GpuDevice(3, 4))); /* Start container 2, asks 2 containers. Excepted to fail */ boolean failedToAllocate = false; try { startContainerWithGpuRequests(2, 2); } catch (ResourceHandlerException e) { failedToAllocate = true; } assertTrue("Container allocation is expected to fail!", failedToAllocate); // Start container 3, ask 1 container, succeeded // devices = 0/1/3 will be blocked startContainerWithGpuRequests(3, 1); verifyDeniedDevices(getContainerId(3), Arrays.asList(new GpuDevice(0, 0), new GpuDevice(1, 1), new GpuDevice(2, 3))); // Start container 4, ask 0 container, succeeded // --> All devices will be blocked startContainerWithGpuRequests(4, 0); verifyDeniedDevices(getContainerId(4), Arrays.asList(new GpuDevice(0, 0), new GpuDevice(1, 1), new GpuDevice(2, 3), new GpuDevice(3, 4))); gpuResourceHandler.postComplete(getContainerId(1)); verifyCgroupsDeletedForContainer(1); verifyNumberOfAvailableGpus(3, gpuResourceHandler); gpuResourceHandler.postComplete(getContainerId(3)); verifyCgroupsDeletedForContainer(3); verifyNumberOfAvailableGpus(4, gpuResourceHandler); }
public static DenseSparseMatrix createDiagonal(SGDVector diagonal) { int dimension = diagonal.size(); SparseVector[] newValues = new SparseVector[dimension]; for (int i = 0; i < dimension; i++) { newValues[i] = new SparseVector(dimension, new int[]{i}, new double[]{diagonal.get(i)}); } return new DenseSparseMatrix(newValues); }
@Test public void testCreateDiagonal() { DenseSparseMatrix diagonal = DenseSparseMatrix.createDiagonal(new DenseVector(new double[] {1.0, 2.0})); assertMatrixEquals(new DenseMatrix(new double[][]{new double[]{1.0, 0.0}, new double[]{0.0, 2.0}}),diagonal); diagonal = DenseSparseMatrix.createDiagonal(new DenseVector(new double[] {1.618033988749894, Math.E, Math.PI})); assertMatrixEquals(new DenseMatrix(new double[][]{new double[]{1.618033988749894, 0.0, 0.0}, new double[]{0.0, 2.718281828459045, 0.0}, new double[]{0.0, 0.0, 3.141592653589793}}),diagonal); }
@CanIgnoreReturnValue public GsonBuilder disableJdkUnsafe() { this.useJdkUnsafe = false; return this; }
@Test public void testDisableJdkUnsafe() { Gson gson = new GsonBuilder().disableJdkUnsafe().create(); var e = assertThrows( JsonIOException.class, () -> gson.fromJson("{}", ClassWithoutNoArgsConstructor.class)); assertThat(e) .hasMessageThat() .isEqualTo( "Unable to create instance of class" + " com.google.gson.GsonBuilderTest$ClassWithoutNoArgsConstructor; usage of JDK" + " Unsafe is disabled. Registering an InstanceCreator or a TypeAdapter for this" + " type, adding a no-args constructor, or enabling usage of JDK Unsafe may fix" + " this problem."); }
public static boolean canDrop(FilterPredicate pred, List<ColumnChunkMetaData> columns) { Objects.requireNonNull(pred, "pred cannot be null"); Objects.requireNonNull(columns, "columns cannot be null"); return pred.accept(new StatisticsFilter(columns)); }
@Test public void testOr() { FilterPredicate yes = eq(intColumn, 9); FilterPredicate no = eq(doubleColumn, 50D); assertTrue(canDrop(or(yes, yes), columnMetas)); assertFalse(canDrop(or(yes, no), columnMetas)); assertFalse(canDrop(or(no, yes), columnMetas)); assertFalse(canDrop(or(no, no), columnMetas)); }
@Override public InternalLogger newInstance(String name) { return new Log4J2Logger(LogManager.getLogger(name)); }
@Test public void testCreation() { InternalLogger logger = Log4J2LoggerFactory.INSTANCE.newInstance("foo"); assertTrue(logger instanceof Log4J2Logger); assertEquals("foo", logger.name()); }
public static String executeDockerCommand(DockerCommand dockerCommand, String containerId, Map<String, String> env, PrivilegedOperationExecutor privilegedOperationExecutor, boolean disableFailureLogging, Context nmContext) throws ContainerExecutionException { PrivilegedOperation dockerOp = dockerCommand.preparePrivilegedOperation( dockerCommand, containerId, env, nmContext); if (disableFailureLogging) { dockerOp.disableFailureLogging(); } LOG.debug("Running docker command: {}", dockerCommand); try { String result = privilegedOperationExecutor .executePrivilegedOperation(null, dockerOp, null, env, true, false); if (result != null && !result.isEmpty()) { result = result.trim(); } return result; } catch (PrivilegedOperationException e) { throw new ContainerExecutionException("Docker operation failed", e.getExitCode(), e.getOutput(), e.getErrorOutput()); } }
@Test public void testExecuteDockerKillSIGQUIT() throws Exception { DockerKillCommand dockerKillCommand = new DockerKillCommand(MOCK_CONTAINER_ID) .setSignal(ContainerExecutor.Signal.QUIT.name()); DockerCommandExecutor.executeDockerCommand(dockerKillCommand, MOCK_CONTAINER_ID, env, mockExecutor, false, nmContext); List<PrivilegedOperation> ops = MockPrivilegedOperationCaptor .capturePrivilegedOperations(mockExecutor, 1, true); List<String> dockerCommands = getValidatedDockerCommands(ops); assertEquals(1, ops.size()); assertEquals(PrivilegedOperation.OperationType.RUN_DOCKER_CMD.name(), ops.get(0).getOperationType().name()); assertEquals(4, dockerCommands.size()); assertEquals("[docker-command-execution]", dockerCommands.get(0)); assertEquals(" docker-command=kill", dockerCommands.get(1)); assertEquals(" name=" + MOCK_CONTAINER_ID, dockerCommands.get(2)); assertEquals(" signal=" + ContainerExecutor.Signal.QUIT.name(), dockerCommands.get(3)); }
public static void closeQuietly(Closeable closeable) { try { if (closeable != null) { closeable.close(); } } catch (IOException e) { LOGGER.log(Level.FINE, e.toString(), e); } }
@Test public void closeQuietlyTest() { IOUtils.closeQuietly(null); DummyCloseable dummyCloseable = new DummyCloseable(); IOUtils.closeQuietly(dummyCloseable); Assert.assertTrue(dummyCloseable.closed); IOUtils.closeQuietly(dummyCloseable); }
public static void validate(TableConfig tableConfig, @Nullable Schema schema) { validate(tableConfig, schema, null); }
@Test public void validateDimensionTableConfig() { // dimension table with REALTIME type (should be OFFLINE) Schema schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME) .addSingleValueDimension(TIME_COLUMN, FieldSpec.DataType.STRING).build(); TableConfig tableConfig = new TableConfigBuilder(TableType.REALTIME).setTableName(TABLE_NAME).setIsDimTable(true) .setTimeColumnName(TIME_COLUMN).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail with a Dimension table of type REALTIME"); } catch (IllegalStateException e) { // expected } // dimension table without a schema tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIsDimTable(true) .setTimeColumnName(TIME_COLUMN).build(); try { TableConfigUtils.validate(tableConfig, null); Assert.fail("Should fail with a Dimension table without a schema"); } catch (IllegalStateException e) { // expected } // dimension table without a Primary Key schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME) .addSingleValueDimension(TIME_COLUMN, FieldSpec.DataType.STRING).build(); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIsDimTable(true) .setTimeColumnName(TIME_COLUMN).build(); try { TableConfigUtils.validate(tableConfig, schema); Assert.fail("Should fail with a Dimension without a primary key"); } catch (IllegalStateException e) { // expected } // valid dimension table schema = new Schema.SchemaBuilder().setSchemaName(TABLE_NAME).addSingleValueDimension("myCol", FieldSpec.DataType.STRING) .setPrimaryKeyColumns(Lists.newArrayList("myCol")).build(); tableConfig = new TableConfigBuilder(TableType.OFFLINE).setTableName(TABLE_NAME).setIsDimTable(true).build(); TableConfigUtils.validate(tableConfig, schema); }
public Canvas canvas() { Canvas canvas = new Canvas(getLowerBound(), getUpperBound()); canvas.add(this); if (name != null) { canvas.setTitle(name); } return canvas; }
@Test public void testHeart() throws Exception { System.out.println("Heart"); double[][] heart = new double[200][2]; for (int i = 0; i < 200; i++) { double t = PI * (i - 100) / 100; heart[i][0] = 16 * pow(sin(t), 3); heart[i][1] = 13 * cos(t) - 5 * cos(2*t) - 2 * cos(3*t) - cos(4*t); } var canvas = LinePlot.of(heart, RED).canvas(); canvas.window(); }
public synchronized void synchronizeConnections( DatabaseMeta database ) { synchronizeConnections( database, database.getName() ); }
@Test public void synchronizeConnections() throws Exception { final String databaseName = "SharedDB"; DatabaseMeta sharedDB0 = createDatabaseMeta( databaseName, true ); saveSharedObjects( SHARED_OBJECTS_FILE, sharedDB0 ); JobMeta job1 = createJobMeta(); spoonDelegates.jobs.addJob( job1 ); JobMeta job2 = createJobMeta(); spoonDelegates.jobs.addJob( job2 ); DatabaseMeta sharedDB2 = job2.getDatabase( 0 ); assertEquals( databaseName, sharedDB2.getName() ); DatabaseMeta sharedDB1 = job1.getDatabase( 0 ); assertEquals( databaseName, sharedDB1.getName() ); assertTrue( sharedDB1 != sharedDB2 ); assertThat( sharedDB1.getHostname(), equalTo( BEFORE_SYNC_VALUE ) ); sharedDB2.setHostname( AFTER_SYNC_VALUE ); sharedUtil.synchronizeConnections( sharedDB2, sharedDB2.getName() ); assertThat( sharedDB1.getHostname(), equalTo( AFTER_SYNC_VALUE ) ); }
static Boolean surrogateOperator(Boolean aBoolean, Boolean aBoolean2) { logger.trace("surrogateOperator {} {}", aBoolean, aBoolean2); return aBoolean != null ? aBoolean : aBoolean2; }
@Test void surrogateOperator() { Boolean aBoolean = null; boolean aBoolean2 = true; assertThat(KiePMMLCompoundPredicate.surrogateOperator(aBoolean, aBoolean2)).isTrue(); aBoolean2 = false; assertThat(KiePMMLCompoundPredicate.surrogateOperator(aBoolean, aBoolean2)).isFalse(); aBoolean = false; aBoolean2 = false; assertThat(KiePMMLCompoundPredicate.surrogateOperator(aBoolean, aBoolean2)).isFalse(); aBoolean = true; aBoolean2 = false; assertThat(KiePMMLCompoundPredicate.surrogateOperator(aBoolean, aBoolean2)).isTrue(); aBoolean = false; aBoolean2 = true; assertThat(KiePMMLCompoundPredicate.surrogateOperator(aBoolean, aBoolean2)).isFalse(); aBoolean = true; aBoolean2 = true; assertThat(KiePMMLCompoundPredicate.surrogateOperator(aBoolean, aBoolean2)).isTrue(); }
@Override public void define(Context context) { NewController controller = context.createController("api/plugins"); controller.setDescription("Manage the plugins on the server, including installing, uninstalling, and upgrading.") .setSince("5.2"); for (PluginsWsAction action : actions) { action.define(controller); } controller.done(); }
@Test public void defines_controller_and_binds_PluginsWsActions() { WebService.Context context = new WebService.Context(); underTest.define(context); WebService.Controller controller = context.controller("api/plugins"); assertThat(controller).isNotNull(); assertThat(controller.since()).isEqualTo("5.2"); assertThat(controller.description()).isNotEmpty(); assertThat(controller.actions()).extracting("key").containsOnly("dummy"); }