focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public String toString() { final StringBuilder sb = new StringBuilder("ThreadPoolBulkheadConfig{"); sb.append("maxThreadPoolSize=").append(maxThreadPoolSize); sb.append(", coreThreadPoolSize=").append(coreThreadPoolSize); sb.append(", queueCapacity=").append(queueCapacity); sb.append(", keepAliveDuration=").append(keepAliveDuration); sb.append(", writableStackTraceEnabled=").append(writableStackTraceEnabled); sb.append(", contextPropagators=").append(contextPropagators); sb.append(", rejectExecutionHandle=").append(rejectedExecutionHandler.getClass().getSimpleName()); sb.append('}'); return sb.toString(); }
@Test public void testToString() { int maxThreadPoolSize = 20; int coreThreadPoolSize = 2; long maxWait = 555; int queueCapacity = 50; ThreadPoolBulkheadConfig config = ThreadPoolBulkheadConfig.custom() .maxThreadPoolSize(maxThreadPoolSize) .coreThreadPoolSize(coreThreadPoolSize) .queueCapacity(queueCapacity) .keepAliveDuration(Duration.ofMillis(maxWait)) .writableStackTraceEnabled(false) .contextPropagator(TestCtxPropagator2.class) .build(); String result = config.toString(); assertThat(result).startsWith("ThreadPoolBulkheadConfig{"); assertThat(result).contains("maxThreadPoolSize=20"); assertThat(result).contains("coreThreadPoolSize=2"); assertThat(result).contains("queueCapacity=50"); assertThat(result).contains("keepAliveDuration=PT0.555S"); assertThat(result).contains("writableStackTraceEnabled=false"); assertThat(result).contains("contextPropagators=[io.github.resilience4j.bulkhead.ThreadPoolBulkheadConfigTest$TestCtxPropagator2"); assertThat(result).endsWith("}"); }
public Pair<GenericKey, GenericRow> generateRow() { final Object generatedObject = generator.generate(); if (!(generatedObject instanceof GenericRecord)) { throw new RuntimeException(String.format( "Expected Avro Random Generator to return instance of GenericRecord, found %s instead", generatedObject.getClass().getName() )); } final GenericRecord randomAvroMessage = (GenericRecord) generatedObject; final GenericRow row = new GenericRow(generator.schema().getFields().size()); SimpleDateFormat timeformatter = null; /* * Populate the record entries */ String sessionisationValue = null; for (final Schema.Field field : generator.schema().getFields()) { final boolean isSession = field.schema().getProp("session") != null; final boolean isSessionSiblingIntHash = field.schema().getProp("session-sibling-int-hash") != null; final String timeFormatFromLong = field.schema().getProp("format_as_time"); if (isSession) { final String currentValue = (String) randomAvroMessage.get(field.name()); final String newCurrentValue = handleSessionisationOfValue(sessionManager, currentValue); sessionisationValue = newCurrentValue; row.append(newCurrentValue); } else if (isSessionSiblingIntHash && sessionisationValue != null) { // super cheeky hack to link int-ids to session-values - if anything fails then we use // the 'avro-gen' randomised version handleSessionSiblingField( randomAvroMessage, row, sessionisationValue, field ); } else if (timeFormatFromLong != null) { final Date date = new Date(System.currentTimeMillis()); if (timeFormatFromLong.equals("unix_long")) { row.append(date.getTime()); } else { if (timeformatter == null) { timeformatter = new SimpleDateFormat(timeFormatFromLong); } row.append(timeformatter.format(date)); } } else { final Object value = randomAvroMessage.get(field.name()); if (value instanceof Record) { final Field ksqlField = valueSchema.field(field.name()); final Record record = (Record) value; final Object ksqlValue = avroData.toConnectData(record.getSchema(), record).value(); row.append(DataGenSchemaUtil.getOptionalValue(ksqlField.schema(), ksqlValue)); } else { row.append(value); } } } final GenericKey key = GenericKey.genericKey(row.get(keyFieldIndex)); return Pair.of(key, row); }
@Test public void shouldGenerateCorrectKey() throws IOException { final Generator generator = new Generator(new File("./src/main/resources/pageviews_schema.avro"), new Random()); final RowGenerator rowGenerator = new RowGenerator(generator, "viewtime", Optional.empty()); final Pair<GenericKey, GenericRow> rowPair = rowGenerator.generateRow(); final GenericKey key = rowPair.getLeft(); final GenericRow value = rowPair.getRight(); assertThat(key, is(notNullValue())); assertThat(key.get(0), is(instanceOf(Long.class))); assertThat("must match copy of key in value", key.get(0), is(value.get(0))); }
@Override public boolean supportsANSI92FullSQL() { return false; }
@Test void assertSupportsANSI92FullSQL() { assertFalse(metaData.supportsANSI92FullSQL()); }
@Override public boolean removeIf(Predicate<? super E> filter) { // will throw UnsupportedOperationException return underlying().removeIf(filter); }
@Test public void testDelegationOfUnsupportedFunctionRemoveIf() { final Predicate<Object> mockPredicate = mock(Predicate.class); new PCollectionsTreeSetWrapperDelegationChecker<>() .defineMockConfigurationForUnsupportedFunction(mock -> mock.removeIf(eq(mockPredicate))) .defineWrapperUnsupportedFunctionInvocation(wrapper -> wrapper.removeIf(mockPredicate)) .doUnsupportedFunctionDelegationCheck(); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuffer buf = new StringBuffer(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case FORMAT_MODIFIER_STATE: handleFormatModifierState(c, tokenList, buf); break; case OPTION_STATE: processOption(c, tokenList, buf); break; case KEYWORD_STATE: handleKeywordState(c, tokenList, buf); break; case RIGHT_PARENTHESIS_STATE: handleRightParenthesisState(c, tokenList, buf); break; default: } } // EOS switch (state) { case LITERAL_STATE: addValuedToken(Token.LITERAL, buf, tokenList); break; case KEYWORD_STATE: tokenList.add(new Token(Token.SIMPLE_KEYWORD, buf.toString())); break; case RIGHT_PARENTHESIS_STATE: tokenList.add(Token.RIGHT_PARENTHESIS_TOKEN); break; case FORMAT_MODIFIER_STATE: case OPTION_STATE: throw new ScanException("Unexpected end of pattern string"); } return tokenList; }
@Test public void testNested() throws ScanException { List<Token> tl = new TokenStream("%(%a%(%b))").tokenize(); List<Token> witness = new ArrayList<Token>(); witness.add(Token.PERCENT_TOKEN); witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "a")); witness.add(Token.PERCENT_TOKEN); witness.add(Token.BARE_COMPOSITE_KEYWORD_TOKEN); witness.add(Token.PERCENT_TOKEN); witness.add(new Token(Token.SIMPLE_KEYWORD, "b")); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); witness.add(Token.RIGHT_PARENTHESIS_TOKEN); assertEquals(witness, tl); }
public static HttpAsyncContext getOrCreateContext() { final HttpAsyncContext asyncContext = LOCAL.get(); if (asyncContext == null) { final HttpAsyncContext httpAsyncContext = new HttpAsyncContext(); LOCAL.set(httpAsyncContext); return httpAsyncContext; } return asyncContext; }
@Test public void getOrCreateContext() { final HttpAsyncContext orCreateContext = HttpAsyncUtils.getOrCreateContext(); Assert.assertNotNull(orCreateContext); }
public <T> Future<Iterable<Map.Entry<ByteString, Iterable<T>>>> multimapFetchAllFuture( boolean omitValues, ByteString encodedTag, String stateFamily, Coder<T> elemCoder) { StateTag<ByteString> stateTag = StateTag.<ByteString>of(Kind.MULTIMAP_ALL, encodedTag, stateFamily) .toBuilder() .setOmitValues(omitValues) .build(); return valuesToPagingIterableFuture(stateTag, elemCoder, this.stateFuture(stateTag, elemCoder)); }
@Test public void testReadMultimapKeysPaginated() throws Exception { Future<Iterable<Map.Entry<ByteString, Iterable<Integer>>>> future = underTest.multimapFetchAllFuture(true, STATE_KEY_1, STATE_FAMILY, INT_CODER); Mockito.verifyNoMoreInteractions(mockWindmill); Windmill.KeyedGetDataRequest.Builder expectedRequest1 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(true) .setFetchMaxBytes(WindmillStateReader.INITIAL_MAX_MULTIMAP_BYTES)); Windmill.KeyedGetDataResponse.Builder response1 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder().setEntryName(STATE_MULTIMAP_KEY_1)) .setContinuationPosition(STATE_MULTIMAP_CONT_1)); Windmill.KeyedGetDataRequest.Builder expectedRequest2 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(true) .setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES) .setRequestPosition(STATE_MULTIMAP_CONT_1)); Windmill.KeyedGetDataResponse.Builder response2 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder().setEntryName(STATE_MULTIMAP_KEY_2)) .setRequestPosition(STATE_MULTIMAP_CONT_1) .setContinuationPosition(STATE_MULTIMAP_CONT_2)); Windmill.KeyedGetDataRequest.Builder expectedRequest3 = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_CONTINUATION_KEY_BYTES) .addMultimapsToFetch( Windmill.TagMultimapFetchRequest.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .setFetchEntryNamesOnly(true) .setFetchMaxBytes(WindmillStateReader.CONTINUATION_MAX_MULTIMAP_BYTES) .setRequestPosition(STATE_MULTIMAP_CONT_2)); Windmill.KeyedGetDataResponse.Builder response3 = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addTagMultimaps( Windmill.TagMultimapFetchResponse.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addEntries( Windmill.TagMultimapEntry.newBuilder().setEntryName(STATE_MULTIMAP_KEY_3)) .setRequestPosition(STATE_MULTIMAP_CONT_2)); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest1.build())) .thenReturn(response1.build()); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest2.build())) .thenReturn(response2.build()); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest3.build())) .thenReturn(response3.build()); Iterable<Map.Entry<ByteString, Iterable<Integer>>> results = future.get(); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest1.build()); List<ByteString> keys = Lists.newArrayList(); for (Map.Entry<ByteString, Iterable<Integer>> entry : results) { keys.add(entry.getKey()); assertEquals(0, Iterables.size(entry.getValue())); } Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest2.build()); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest3.build()); Mockito.verifyNoMoreInteractions(mockWindmill); assertThat( keys, Matchers.containsInAnyOrder( STATE_MULTIMAP_KEY_1, STATE_MULTIMAP_KEY_2, STATE_MULTIMAP_KEY_3)); // NOTE: The future will still contain a reference to the underlying reader, thus not calling // assertNoReader(future). }
@Override public boolean test(Pickle pickle) { URI picklePath = pickle.getUri(); if (!lineFilters.containsKey(picklePath)) { return true; } for (Integer line : lineFilters.get(picklePath)) { if (Objects.equals(line, pickle.getLocation().getLine()) || Objects.equals(line, pickle.getScenarioLocation().getLine()) || pickle.getExamplesLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getRuleLocation().map(Location::getLine).map(line::equals).orElse(false) || pickle.getFeatureLocation().map(Location::getLine).map(line::equals).orElse(false)) { return true; } } return false; }
@Test void Matches_second_example() { LinePredicate predicate = new LinePredicate(singletonMap( featurePath, singletonList(8))); assertFalse(predicate.test(firstPickle)); assertTrue(predicate.test(secondPickle)); assertFalse(predicate.test(thirdPickle)); assertFalse(predicate.test(fourthPickle)); }
boolean isParallelExecutionEnabled() { return configurationParameters .getBoolean(PARALLEL_EXECUTION_ENABLED_PROPERTY_NAME) .orElse(false); }
@Test void isParallelExecutionEnabled() { ConfigurationParameters enabled = new MapConfigurationParameters( Constants.PARALLEL_EXECUTION_ENABLED_PROPERTY_NAME, "true"); assertTrue(new CucumberEngineOptions(enabled).isParallelExecutionEnabled()); ConfigurationParameters disabled = new MapConfigurationParameters( Constants.PARALLEL_EXECUTION_ENABLED_PROPERTY_NAME, "false"); assertFalse(new CucumberEngineOptions(disabled).isParallelExecutionEnabled()); ConfigurationParameters absent = new MapConfigurationParameters( "some key", "some value"); assertFalse(new CucumberEngineOptions(absent).isParallelExecutionEnabled()); }
@Override public BuiltIndex<NewRegularIndex> build() { checkState(mainType != null, "Mapping for main type must be defined"); checkState(!mainType.getIndex().acceptsRelations() || !getRelations().isEmpty(), "At least one relation must be defined when index accepts relations"); return new BuiltIndex<>(this); }
@Test @UseDataProvider("indexWithAndWithoutRelations") public void build_fails_with_ISE_if_no_mainType_is_defined(Index index) { NewRegularIndex underTest = new NewRegularIndex(index, defaultSettingsConfiguration); assertThatThrownBy(() -> underTest.build()) .isInstanceOf(IllegalStateException.class) .hasMessage("Mapping for main type must be defined"); }
@Override public ObjectNode encode(MappingAction action, CodecContext context) { EncodeMappingActionCodecHelper encoder = new EncodeMappingActionCodecHelper(action, context); return encoder.encode(); }
@Test public void nativeForwardActionTest() { final NativeForwardMappingAction action = MappingActions.nativeForward(); final ObjectNode actionJson = actionCodec.encode(action, context); assertThat(actionJson, matchesAction(action)); }
private CoordinatorResult<ShareGroupHeartbeatResponseData, CoordinatorRecord> shareGroupHeartbeat( String groupId, String memberId, int memberEpoch, String rackId, String clientId, String clientHost, List<String> subscribedTopicNames ) throws ApiException { final long currentTimeMs = time.milliseconds(); final List<CoordinatorRecord> records = new ArrayList<>(); // Get or create the share group. boolean createIfNotExists = memberEpoch == 0; final ShareGroup group = getOrMaybeCreatePersistedShareGroup(groupId, createIfNotExists); throwIfShareGroupIsFull(group, memberId); // Get or create the member. if (memberId.isEmpty()) memberId = Uuid.randomUuid().toString(); ShareGroupMember member = getOrMaybeSubscribeShareGroupMember( group, memberId, memberEpoch, createIfNotExists ); // 1. Create or update the member. If the member is new or has changed, a ShareGroupMemberMetadataValue // record is written to the __consumer_offsets partition to persist the change. If the subscriptions have // changed, the subscription metadata is updated and persisted by writing a ShareGroupPartitionMetadataValue // record to the __consumer_offsets partition. Finally, the group epoch is bumped if the subscriptions have // changed, and persisted by writing a ShareGroupMetadataValue record to the partition. ShareGroupMember updatedMember = new ShareGroupMember.Builder(member) .maybeUpdateRackId(Optional.ofNullable(rackId)) .maybeUpdateSubscribedTopicNames(Optional.ofNullable(subscribedTopicNames)) .setClientId(clientId) .setClientHost(clientHost) .build(); boolean bumpGroupEpoch = hasMemberSubscriptionChanged( groupId, member, updatedMember, records ); int groupEpoch = group.groupEpoch(); Map<String, TopicMetadata> subscriptionMetadata = group.subscriptionMetadata(); SubscriptionType subscriptionType = group.subscriptionType(); if (bumpGroupEpoch || group.hasMetadataExpired(currentTimeMs)) { // The subscription metadata is updated in two cases: // 1) The member has updated its subscriptions; // 2) The refresh deadline has been reached. Map<String, Integer> subscribedTopicNamesMap = group.computeSubscribedTopicNames(member, updatedMember); subscriptionMetadata = group.computeSubscriptionMetadata( subscribedTopicNamesMap, metadataImage.topics(), metadataImage.cluster() ); int numMembers = group.numMembers(); if (!group.hasMember(updatedMember.memberId())) { numMembers++; } subscriptionType = ModernGroup.subscriptionType( subscribedTopicNamesMap, numMembers ); if (!subscriptionMetadata.equals(group.subscriptionMetadata())) { log.info("[GroupId {}] Computed new subscription metadata: {}.", groupId, subscriptionMetadata); bumpGroupEpoch = true; records.add(newShareGroupSubscriptionMetadataRecord(groupId, subscriptionMetadata)); } if (bumpGroupEpoch) { groupEpoch += 1; records.add(newShareGroupEpochRecord(groupId, groupEpoch)); log.info("[GroupId {}] Bumped group epoch to {}.", groupId, groupEpoch); } group.setMetadataRefreshDeadline(currentTimeMs + shareGroupMetadataRefreshIntervalMs, groupEpoch); } // 2. Update the target assignment if the group epoch is larger than the target assignment epoch. The delta between // the existing and the new target assignment is persisted to the partition. final int targetAssignmentEpoch; final Assignment targetAssignment; if (groupEpoch > group.assignmentEpoch()) { targetAssignment = updateTargetAssignment( group, groupEpoch, updatedMember, subscriptionMetadata, subscriptionType, records ); targetAssignmentEpoch = groupEpoch; } else { targetAssignmentEpoch = group.assignmentEpoch(); targetAssignment = group.targetAssignment(updatedMember.memberId()); } // 3. Reconcile the member's assignment with the target assignment if the member is not // fully reconciled yet. updatedMember = maybeReconcile( groupId, updatedMember, targetAssignmentEpoch, targetAssignment, records ); scheduleShareGroupSessionTimeout(groupId, memberId); // Prepare the response. ShareGroupHeartbeatResponseData response = new ShareGroupHeartbeatResponseData() .setMemberId(updatedMember.memberId()) .setMemberEpoch(updatedMember.memberEpoch()) .setHeartbeatIntervalMs(shareGroupHeartbeatIntervalMs); // The assignment is only provided in the following cases: // 1. The member just joined or rejoined to group (epoch equals to zero); // 2. The member's assignment has been updated. if (memberEpoch == 0 || hasAssignedPartitionsChanged(member, updatedMember)) { response.setAssignment(createShareGroupResponseAssignment(updatedMember)); } return new CoordinatorResult<>(records, response); }
@Test public void testShareGroupUnknownMemberIdJoins() { String groupId = "fooup"; String memberId = Uuid.randomUuid().toString(); GroupMetadataManagerTestContext context = new GroupMetadataManagerTestContext.Builder() .withShareGroupAssignor(new NoOpPartitionAssignor()) .build(); // A first member joins to create the group. context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(memberId) .setMemberEpoch(0) .setSubscribedTopicNames(Arrays.asList("foo", "bar"))); // The second member is rejected because the member id is unknown and // the member epoch is not zero. assertThrows(UnknownMemberIdException.class, () -> context.shareGroupHeartbeat( new ShareGroupHeartbeatRequestData() .setGroupId(groupId) .setMemberId(Uuid.randomUuid().toString()) .setMemberEpoch(1) .setSubscribedTopicNames(Arrays.asList("foo", "bar")))); }
@Override public void handleWayTags(int edgeId, EdgeIntAccess edgeIntAccess, ReaderWay way, IntsRef relationFlags) { String highwayValue = way.getTag("highway"); if (skipEmergency && "service".equals(highwayValue) && "emergency_access".equals(way.getTag("service"))) return; int firstIndex = way.getFirstIndex(restrictionKeys); String firstValue = firstIndex < 0 ? "" : way.getTag(restrictionKeys.get(firstIndex), ""); if (restrictedValues.contains(firstValue) && !hasTemporalRestriction(way, firstIndex, restrictionKeys)) return; if (way.hasTag("gh:barrier_edge") && way.hasTag("node_tags")) { List<Map<String, Object>> nodeTags = way.getTag("node_tags", null); Map<String, Object> firstNodeTags = nodeTags.get(0); // a barrier edge has the restriction in both nodes and the tags are the same -> get(0) firstValue = getFirstPriorityNodeTag(firstNodeTags, restrictionKeys); String barrierValue = firstNodeTags.containsKey("barrier") ? (String) firstNodeTags.get("barrier") : ""; if (restrictedValues.contains(firstValue) || barriers.contains(barrierValue) || "yes".equals(firstNodeTags.get("locked")) && !INTENDED.contains(firstValue)) return; } if (FerrySpeedCalculator.isFerry(way)) { boolean isCar = restrictionKeys.contains("motorcar"); if (INTENDED.contains(firstValue) // implied default is allowed only if foot and bicycle is not specified: || isCar && firstValue.isEmpty() && !way.hasTag("foot") && !way.hasTag("bicycle") // if hgv is allowed then smaller trucks and cars are allowed too even if not specified || isCar && way.hasTag("hgv", "yes")) { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } else { boolean isRoundabout = roundaboutEnc.getBool(false, edgeId, edgeIntAccess); boolean ignoreOneway = "no".equals(way.getFirstValue(ignoreOnewayKeys)); boolean isBwd = isBackwardOneway(way); if (!ignoreOneway && (isBwd || isRoundabout || isForwardOneway(way))) { accessEnc.setBool(isBwd, edgeId, edgeIntAccess, true); } else { accessEnc.setBool(false, edgeId, edgeIntAccess, true); accessEnc.setBool(true, edgeId, edgeIntAccess, true); } } }
@Test public void testBusNodeAccess() { ReaderWay way = new ReaderWay(1); way.setTag("highway", "secondary"); way.setTag("gh:barrier_edge", true); way.setTag("node_tags", List.of(Map.of("access", "no", "bus", "yes"), Map.of())); EdgeIntAccess access = new ArrayEdgeIntAccess(1); int edgeId = 0; parser.handleWayTags(edgeId, access, way, null); assertTrue(busAccessEnc.getBool(false, edgeId, access)); way.setTag("node_tags", List.of(Map.of("access", "yes", "bus", "no"))); access = new ArrayEdgeIntAccess(1); parser.handleWayTags(edgeId, access, way, null); assertFalse(busAccessEnc.getBool(false, edgeId, access)); // ensure that allowing node tags (bus=yes) do not unblock the inaccessible way way.setTag("access", "no"); way.setTag("node_tags", List.of(Map.of("bus", "yes"), Map.of())); access = new ArrayEdgeIntAccess(1); parser.handleWayTags(edgeId, access, way, null); assertFalse(busAccessEnc.getBool(false, edgeId, access)); }
public static void notEmptyString(String str, String message) { if (StringUtils.isEmpty(str)) { throw new IllegalArgumentException(message); } }
@Test void testNotNullNotEmptyString() { notEmptyString("abcd", "Message can'be null or empty"); }
@Override public boolean removeAll(Collection<?> c) { return get(removeAllAsync(c)); }
@Test public void testRemoveAll() { Set<Integer> list = redisson.getSet("list"); list.add(1); list.add(2); list.add(3); list.add(4); list.add(5); Assertions.assertFalse(list.removeAll(Collections.emptyList())); Assertions.assertTrue(list.removeAll(Arrays.asList(3, 2, 10, 6))); assertThat(list).containsExactlyInAnyOrder(1, 4, 5); Assertions.assertTrue(list.removeAll(Arrays.asList(4))); assertThat(list).containsExactlyInAnyOrder(1, 5); Assertions.assertTrue(list.removeAll(Arrays.asList(1, 5, 1, 5))); Assertions.assertTrue(list.isEmpty()); }
@Override public void rotate(IndexSet indexSet) { indexRotator.rotate(indexSet, this::shouldRotate); }
@Test public void testRotateFailed() { when(indices.getStoreSizeInBytes("name")).thenReturn(Optional.empty()); when(indexSet.getNewestIndex()).thenReturn("name"); when(indexSet.getConfig()).thenReturn(indexSetConfig); when(indexSetConfig.rotationStrategyConfig()).thenReturn(SizeBasedRotationStrategyConfig.create(100L)); final SizeBasedRotationStrategy strategy = createStrategy(); strategy.rotate(indexSet); verify(indexSet, never()).cycle(); reset(indexSet); }
public static KubernetesJobManagerSpecification buildKubernetesJobManagerSpecification( FlinkPod podTemplate, KubernetesJobManagerParameters kubernetesJobManagerParameters) throws IOException { FlinkPod flinkPod = Preconditions.checkNotNull(podTemplate).copy(); List<HasMetadata> accompanyingResources = new ArrayList<>(); final List<KubernetesStepDecorator> stepDecorators = new ArrayList<>( Arrays.asList( new InitJobManagerDecorator(kubernetesJobManagerParameters), new EnvSecretsDecorator(kubernetesJobManagerParameters), new MountSecretsDecorator(kubernetesJobManagerParameters), new CmdJobManagerDecorator(kubernetesJobManagerParameters), new InternalServiceDecorator(kubernetesJobManagerParameters), new ExternalServiceDecorator(kubernetesJobManagerParameters))); Configuration configuration = kubernetesJobManagerParameters.getFlinkConfiguration(); if (configuration.get(KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED)) { stepDecorators.add(new HadoopConfMountDecorator(kubernetesJobManagerParameters)); } if (configuration.get(KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED)) { stepDecorators.add(new KerberosMountDecorator(kubernetesJobManagerParameters)); } stepDecorators.addAll( Arrays.asList( new FlinkConfMountDecorator(kubernetesJobManagerParameters), new PodTemplateMountDecorator(kubernetesJobManagerParameters))); for (KubernetesStepDecorator stepDecorator : stepDecorators) { flinkPod = stepDecorator.decorateFlinkPod(flinkPod); accompanyingResources.addAll(stepDecorator.buildAccompanyingKubernetesResources()); } final Deployment deployment = createJobManagerDeployment(flinkPod, kubernetesJobManagerParameters); return new KubernetesJobManagerSpecification(deployment, accompanyingResources); }
@Test void testHadoopDecoratorsCanBeTurnedOff() throws Exception { setHadoopConfDirEnv(); generateHadoopConfFileItems(); flinkConfig.set( KubernetesConfigOptions.KUBERNETES_HADOOP_CONF_MOUNT_DECORATOR_ENABLED, false); flinkConfig.set(KubernetesConfigOptions.KUBERNETES_KERBEROS_MOUNT_DECORATOR_ENABLED, false); kubernetesJobManagerSpecification = KubernetesJobManagerFactory.buildKubernetesJobManagerSpecification( flinkPod, kubernetesJobManagerParameters); assertThat( getConfigMapList( HadoopConfMountDecorator.getHadoopConfConfigMapName(CLUSTER_ID))) .isEmpty(); assertThat( getConfigMapList( KerberosMountDecorator.getKerberosKrb5confConfigMapName( CLUSTER_ID))) .isEmpty(); assertThat(getConfigMapList(KerberosMountDecorator.getKerberosKeytabSecretName(CLUSTER_ID))) .isEmpty(); }
public void addValueProviders(final String segmentName, final RocksDB db, final Cache cache, final Statistics statistics) { if (storeToValueProviders.isEmpty()) { logger.debug("Adding metrics recorder of task {} to metrics recording trigger", taskId); streamsMetrics.rocksDBMetricsRecordingTrigger().addMetricsRecorder(this); } else if (storeToValueProviders.containsKey(segmentName)) { throw new IllegalStateException("Value providers for store " + segmentName + " of task " + taskId + " has been already added. This is a bug in Kafka Streams. " + "Please open a bug report under https://issues.apache.org/jira/projects/KAFKA/issues"); } verifyDbAndCacheAndStatistics(segmentName, db, cache, statistics); logger.debug("Adding value providers for store {} of task {}", segmentName, taskId); storeToValueProviders.put(segmentName, new DbAndCacheAndStatistics(db, cache, statistics)); }
@Test public void shouldNotSetStatsLevelToExceptDetailedTimersWhenValueProvidersWithoutStatisticsAreAdded() { recorder.addValueProviders(SEGMENT_STORE_NAME_1, dbToAdd1, cacheToAdd1, null); verifyNoInteractions(statisticsToAdd1); }
public Future<Instant> watermarkFuture(ByteString encodedTag, String stateFamily) { return stateFuture(StateTag.of(StateTag.Kind.WATERMARK, encodedTag, stateFamily), null); }
@Test public void testReadWatermark() throws Exception { Future<Instant> future = underTest.watermarkFuture(STATE_KEY_1, STATE_FAMILY); Mockito.verifyNoMoreInteractions(mockWindmill); Windmill.KeyedGetDataRequest.Builder expectedRequest = Windmill.KeyedGetDataRequest.newBuilder() .setKey(DATA_KEY) .setShardingKey(SHARDING_KEY) .setWorkToken(WORK_TOKEN) .setMaxBytes(WindmillStateReader.MAX_KEY_BYTES) .addWatermarkHoldsToFetch( Windmill.WatermarkHold.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY)); Windmill.KeyedGetDataResponse.Builder response = Windmill.KeyedGetDataResponse.newBuilder() .setKey(DATA_KEY) .addWatermarkHolds( Windmill.WatermarkHold.newBuilder() .setTag(STATE_KEY_1) .setStateFamily(STATE_FAMILY) .addTimestamps(5000000) .addTimestamps(6000000)); Mockito.when(mockWindmill.getStateData(COMPUTATION, expectedRequest.build())) .thenReturn(response.build()); Instant result = future.get(); Mockito.verify(mockWindmill).getStateData(COMPUTATION, expectedRequest.build()); assertThat(result, Matchers.equalTo(new Instant(5000))); assertNoReader(future); }
boolean isEquality() { return false; }
@Test public void testTransforming_both_isEquality() { assertThat(HYPHENS_MATCH_COLONS.isEquality()).isFalse(); }
@Override public void available() { if (!allocationExcludeChecked) { this.checkAllocationEnabledStatus(); } }
@Test public void testResetAllocationUnneccessary() throws IOException { Settings settings = Settings.builder() .put(OpensearchProcessImpl.CLUSTER_ROUTING_ALLOCATION_EXCLUDE_SETTING, "notmynodename") .build(); when(clusterClient.getSettings(any(), any())).thenReturn(new ClusterGetSettingsResponse(null, settings, null)); opensearchProcess.available(); verify(clusterClient).getSettings(any(), any()); verifyNoMoreInteractions(clusterClient); assertTrue(opensearchProcess.allocationExcludeChecked); }
public <T extends AbstractMessageListenerContainer> T decorateMessageListenerContainer(T container) { Advice[] advice = prependTracingMessageContainerAdvice(container); if (advice != null) { container.setAdviceChain(advice); } return container; }
@Test void decorateSimpleMessageListenerContainer_prepends_as_first_when_absent() { SimpleMessageListenerContainer listenerContainer = new SimpleMessageListenerContainer(); listenerContainer.setAdviceChain(new CacheInterceptor()); assertThat(rabbitTracing.decorateMessageListenerContainer(listenerContainer)) .extracting("adviceChain") .asInstanceOf(array(Advice[].class)) .hasSize(2) .matches(adviceArray -> adviceArray[0] instanceof TracingRabbitListenerAdvice); }
public static StreamedRow toRowFromDelimited(final Buffer buff) { try { final QueryResponseMetadata metadata = deserialize(buff, QueryResponseMetadata.class); return StreamedRow.header(new QueryId(Strings.nullToEmpty(metadata.queryId)), createSchema(metadata)); } catch (KsqlRestClientException e) { // Not a {@link QueryResponseMetadata} } try { final KsqlErrorMessage error = deserialize(buff, KsqlErrorMessage.class); return StreamedRow.error(new RuntimeException(error.getMessage()), error.getErrorCode()); } catch (KsqlRestClientException e) { // Not a {@link KsqlErrorMessage} } try { final PushContinuationToken continuationToken = deserialize(buff, PushContinuationToken.class); return StreamedRow.continuationToken(continuationToken); } catch (KsqlRestClientException e) { // Not a {@link KsqlErrorMessage} } try { final List<?> row = deserialize(buff, List.class); return StreamedRow.pushRow(GenericRow.fromList(row)); } catch (KsqlRestClientException e) { // Not a {@link List} } throw new IllegalStateException("Couldn't parse message: " + buff.toString()); }
@Test public void shouldParseHeader() { // When: final StreamedRow row = KsqlTargetUtil.toRowFromDelimited(Buffer.buffer( "{\"queryId\": \"query_id_10\", " + "\"columnNames\":[\"col1\",\"col2\"], " + "\"columnTypes\":[\"BIGINT\",\"DOUBLE\"]}")); // Then: assertThat(row.getHeader().isPresent(), is(true)); assertThat(row.getHeader().get().getQueryId().toString(), is("query_id_10")); assertThat(row.getHeader().get().getSchema().key(), is(Collections.emptyList())); assertThat(row.getHeader().get().getSchema().value().size(), is(2)); assertThat(row.getHeader().get().getSchema().value().get(0), is (Column.of(ColumnName.of("col1"), SqlTypes.BIGINT, Namespace.VALUE, 0))); assertThat(row.getHeader().get().getSchema().value().get(1), is (Column.of(ColumnName.of("col2"), SqlTypes.DOUBLE, Namespace.VALUE, 1))); }
public TimeBoundaryManager(TableConfig tableConfig, ZkHelixPropertyStore<ZNRecord> propertyStore, BrokerMetrics brokerMetrics) { Preconditions.checkState(tableConfig.getTableType() == TableType.OFFLINE, "Cannot construct TimeBoundaryManager for real-time table: %s", tableConfig.getTableName()); _offlineTableName = tableConfig.getTableName(); _propertyStore = propertyStore; _brokerMetrics = brokerMetrics; _segmentZKMetadataPathPrefix = ZKMetadataProvider.constructPropertyStorePathForResource(_offlineTableName) + "/"; Schema schema = ZKMetadataProvider.getTableSchema(_propertyStore, _offlineTableName); Preconditions.checkState(schema != null, "Failed to find schema for table: %s", _offlineTableName); _timeColumn = tableConfig.getValidationConfig().getTimeColumnName(); Preconditions.checkNotNull(_timeColumn, "Time column must be configured in table config for table: %s", _offlineTableName); DateTimeFieldSpec dateTimeSpec = schema.getSpecForTimeColumn(_timeColumn); Preconditions.checkNotNull(dateTimeSpec, "Field spec must be specified in schema for time column: %s of table: %s", _timeColumn, _offlineTableName); _timeFormatSpec = dateTimeSpec.getFormatSpec(); Preconditions.checkNotNull(_timeFormatSpec.getColumnUnit(), "Time unit must be configured in the field spec for time column: %s of table: %s", _timeColumn, _offlineTableName); // For HOURLY table with time unit other than DAYS, use (maxEndTime - 1 HOUR) as the time boundary; otherwise, use // (maxEndTime - 1 DAY) boolean isHourlyTable = CommonConstants.Table.PUSH_FREQUENCY_HOURLY.equalsIgnoreCase( IngestionConfigUtils.getBatchSegmentIngestionFrequency(tableConfig)) && _timeFormatSpec.getColumnUnit() != TimeUnit.DAYS; _timeOffsetMs = isHourlyTable ? TimeUnit.HOURS.toMillis(1) : TimeUnit.DAYS.toMillis(1); LOGGER.info("Constructed TimeBoundaryManager with timeColumn: {}, timeFormat: {}, isHourlyTable: {} for table: {}", _timeColumn, dateTimeSpec.getFormat(), isHourlyTable, _offlineTableName); }
@Test public void testTimeBoundaryManager() { for (TimeUnit timeUnit : TimeUnit.values()) { // Test DAILY push table, with timeFieldSpec String rawTableName = "testTable_" + timeUnit + "_DAILY"; TableConfig tableConfig = getTableConfig(rawTableName, "DAILY"); setSchemaTimeFieldSpec(rawTableName, timeUnit); testDailyPushTable(rawTableName, tableConfig, timeUnit); // Test HOURLY push table, with timeFieldSpec rawTableName = "testTable_" + timeUnit + "_HOURLY"; tableConfig = getTableConfig(rawTableName, "HOURLY"); setSchemaTimeFieldSpec(rawTableName, timeUnit); testHourlyPushTable(rawTableName, tableConfig, timeUnit); // Test DAILY push table with dateTimeFieldSpec rawTableName = "testTableDateTime_" + timeUnit + "_DAILY"; tableConfig = getTableConfig(rawTableName, "DAILY"); setSchemaDateTimeFieldSpec(rawTableName, timeUnit); testDailyPushTable(rawTableName, tableConfig, timeUnit); // Test HOURLY push table rawTableName = "testTableDateTime_" + timeUnit + "_HOURLY"; tableConfig = getTableConfig(rawTableName, "HOURLY"); setSchemaDateTimeFieldSpec(rawTableName, timeUnit); testHourlyPushTable(rawTableName, tableConfig, timeUnit); } }
public static char getEncoding(LockType lockType) { return PERSISTENCE_ENCODINGS.getOrDefault(lockType, UNKNOWN_LOCK_TYPE_ENCODING); }
@Test public void testGetEncoding() { assertEquals('r', LockTypeUtil.getEncoding(LockType.SHARED_READ)); assertEquals('w', LockTypeUtil.getEncoding(LockType.SHARED_WRITE)); assertEquals('x', LockTypeUtil.getEncoding(LockType.EXCL_WRITE)); assertEquals('e', LockTypeUtil.getEncoding(LockType.EXCLUSIVE)); }
public void stopRunning( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { if ( this.isStopped() || sdi.isDisposed() ) { return; } dbLock.lock(); try { meta = (TableInputMeta) smi; data = (TableInputData) sdi; setStopped( true ); if ( data.db != null && data.db.getConnection() != null && !data.isCanceled ) { data.db.cancelQuery(); data.isCanceled = true; } } finally { dbLock.unlock(); } }
@Test public void testStopRunningWhenStepIsNotStoppedNorStepDataInterfaceIsDisposedAndDatabaseConnectionIsValid() throws KettleException { doReturn( false ).when( mockTableInput ).isStopped(); doReturn( false ).when( mockStepDataInterface ).isDisposed(); when( mockStepDataInterface.db.getConnection() ).thenReturn( mock( Connection.class ) ); mockTableInput.stopRunning( mockStepMetaInterface, mockStepDataInterface ); verify( mockTableInput, times( 1 ) ).isStopped(); verify( mockStepDataInterface, times( 1 ) ).isDisposed(); verify( mockStepDataInterface.db, times( 1 ) ).getConnection(); verify( mockStepDataInterface.db, times( 1 ) ).cancelQuery(); assertTrue( mockStepDataInterface.isCanceled ); }
public static List<String> listMatchedFilesWithRecursiveOption(PinotFS pinotFs, URI fileUri, @Nullable String includePattern, @Nullable String excludePattern, boolean searchRecursively) throws Exception { String[] files; // listFiles throws IOException files = pinotFs.listFiles(fileUri, searchRecursively); //TODO: sort input files based on creation time PathMatcher includeFilePathMatcher = null; if (includePattern != null) { includeFilePathMatcher = FileSystems.getDefault().getPathMatcher(includePattern); } PathMatcher excludeFilePathMatcher = null; if (excludePattern != null) { excludeFilePathMatcher = FileSystems.getDefault().getPathMatcher(excludePattern); } List<String> filteredFiles = new ArrayList<>(); for (String file : files) { if (includeFilePathMatcher != null) { if (!includeFilePathMatcher.matches(Paths.get(file))) { continue; } } if (excludeFilePathMatcher != null) { if (excludeFilePathMatcher.matches(Paths.get(file))) { continue; } } if (!pinotFs.isDirectory(new URI(sanitizeURIString(file)))) { // In case PinotFS implementations list files without a scheme (e.g. hdfs://), then we may lose it in the // input file path. Call SegmentGenerationUtils.getFileURI() to fix this up. // getFileURI throws URISyntaxException filteredFiles.add(SegmentGenerationUtils.getFileURI(file, fileUri).toString()); } } if (filteredFiles.isEmpty()) { throw new RuntimeException(String.format( "No file found in the input directory: %s matching includeFileNamePattern: %s," + " excludeFileNamePattern: %s", fileUri, includePattern, excludePattern)); } return filteredFiles; }
@Test public void testMatchFilesRecursiveSearchExcludeFilePattern() throws Exception { File testDir = makeTestDir(); File inputDir = new File(testDir, "dir"); File inputSubDir1 = new File(inputDir, "2009"); inputSubDir1.mkdirs(); File inputFile1 = new File(inputDir, "input1.csv"); FileUtils.writeLines(inputFile1, Lists.newArrayList("col1,col2", "value1,1", "value2,2")); File inputFile2 = new File(inputSubDir1, "input2.csv"); FileUtils.writeLines(inputFile2, Lists.newArrayList("col1,col2", "value3,3", "value4,4")); URI inputDirURI = new URI(inputDir.getAbsolutePath()); if (inputDirURI.getScheme() == null) { inputDirURI = new File(inputDir.getAbsolutePath()).toURI(); } PinotFS inputDirFS = PinotFSFactory.create(inputDirURI.getScheme()); String includePattern = "glob:" + inputDir.getAbsolutePath() + "/**.csv"; String excludePattern = "glob:" + inputDir.getAbsolutePath() + "/2009/input2.csv"; List<String> files = SegmentGenerationUtils.listMatchedFilesWithRecursiveOption(inputDirFS, inputDirURI, includePattern, excludePattern, true); Assert.assertEquals(files.size(), 1); }
public void validate(AlmSettingDto almSettingDto) { String gitlabUrl = almSettingDto.getUrl(); String accessToken = almSettingDto.getDecryptedPersonalAccessToken(encryption); validate(ValidationMode.COMPLETE, gitlabUrl, accessToken); }
@Test public void validate_whenCompleteMode_validatesTokenNotNull() { assertThatIllegalArgumentException() .isThrownBy(() -> underTest.validate(COMPLETE, GITLAB_API_URL, null)) .withMessage("Your Gitlab global configuration is incomplete. The GitLab access token must be set."); }
public void start() { if (isStarted()) return; int errorCount = 0; if (port <= 0) { errorCount++; addError("No port was configured for appender" + name + " For more information, please visit http://logback.qos.ch/codes.html#socket_no_port"); } if (remoteHost == null) { errorCount++; addError("No remote host was configured for appender" + name + " For more information, please visit http://logback.qos.ch/codes.html#socket_no_host"); } if (queueSize == 0) { addWarn("Queue size of zero is deprecated, use a size of one to indicate synchronous processing"); } if (queueSize < 0) { errorCount++; addError("Queue size must be greater than zero"); } if (errorCount == 0) { try { address = InetAddress.getByName(remoteHost); } catch (UnknownHostException ex) { addError("unknown host: " + remoteHost); errorCount++; } } if (errorCount == 0) { deque = queueFactory.newLinkedBlockingDeque(queueSize); peerId = "remote peer " + remoteHost + ":" + port + ": "; connector = createConnector(address, port, 0, reconnectionDelay.getMilliseconds()); task = getContext().getScheduledExecutorService().submit(new Runnable() { public void run() { connectSocketAndDispatchEvents(); } }); super.start(); } }
@Test public void shutsDownOnInterruptWhileWaitingForEvent() throws Exception { // given mockOneSuccessfulSocketConnection(); doThrow(new InterruptedException()).when(deque).takeFirst(); // when appender.start(); // then verify(deque, timeout(TIMEOUT)).takeFirst(); }
public DataTableDiff calculateDiffs() { Map<Integer, Delta> deltasByLine = createDeltasByLine(); return createTableDiff(deltasByLine); }
@Test void considers_same_table_as_equal() { assertTrue(new TableDiffer(table(), table()).calculateDiffs().isEmpty()); }
@Override @SuppressWarnings("rawtypes") public void report(SortedMap<String, Gauge> gauges, SortedMap<String, Counter> counters, SortedMap<String, Histogram> histograms, SortedMap<String, Meter> meters, SortedMap<String, Timer> timers) { final long timestamp = clock.getTime() / 1000; // oh it'd be lovely to use Java 7 here try { graphite.connect(); for (Map.Entry<String, Gauge> entry : gauges.entrySet()) { reportGauge(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Counter> entry : counters.entrySet()) { reportCounter(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Histogram> entry : histograms.entrySet()) { reportHistogram(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Meter> entry : meters.entrySet()) { reportMetered(entry.getKey(), entry.getValue(), timestamp); } for (Map.Entry<String, Timer> entry : timers.entrySet()) { reportTimer(entry.getKey(), entry.getValue(), timestamp); } graphite.flush(); } catch (IOException e) { LOGGER.warn("Unable to report to Graphite", graphite, e); } finally { try { graphite.close(); } catch (IOException e1) { LOGGER.warn("Error closing Graphite", graphite, e1); } } }
@Test public void reportsBooleanGaugeValues() throws Exception { reporter.report(map("gauge", gauge(true)), map(), map(), map(), map()); reporter.report(map("gauge", gauge(false)), map(), map(), map(), map()); final InOrder inOrder = inOrder(graphite); inOrder.verify(graphite).connect(); inOrder.verify(graphite).send("prefix.gauge", "1", timestamp); inOrder.verify(graphite).flush(); inOrder.verify(graphite).close(); inOrder.verify(graphite).connect(); inOrder.verify(graphite).send("prefix.gauge", "0", timestamp); inOrder.verify(graphite).flush(); inOrder.verify(graphite).close(); verifyNoMoreInteractions(graphite); }
@Udf(description = "Converts a number of milliseconds since 1970-01-01 00:00:00 UTC/GMT into the" + " string representation of the timestamp in the given format. Single quotes in the" + " timestamp format can be escaped with '', for example: 'yyyy-MM-dd''T''HH:mm:ssX'." + " The system default time zone is used when no time zone is explicitly provided." + " The format pattern should be in the format expected" + " by java.time.format.DateTimeFormatter") public String timestampToString( @UdfParameter( description = "Milliseconds since" + " January 1, 1970, 00:00:00 UTC/GMT.") final long epochMilli, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (formatPattern == null) { return null; } try { final Timestamp timestamp = new Timestamp(epochMilli); final DateTimeFormatter formatter = formatters.get(formatPattern); return timestamp.toInstant() .atZone(ZoneId.systemDefault()) .format(formatter); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format timestamp " + epochMilli + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void testPSTTimeZone() { // When: final String result = udf.timestampToString(1534353043000L, "yyyy-MM-dd HH:mm:ss", "America/Los_Angeles"); // Then: assertThat(result, is("2018-08-15 10:10:43")); }
public static boolean compare(Object source, Object target) { if (source == target) { return true; } if (source == null || target == null) { return false; } if (source.equals(target)) { return true; } if (source instanceof Boolean) { return compare(((Boolean) source), target); } if (source instanceof Number) { return compare(((Number) source), target); } if (target instanceof Number) { return compare(((Number) target), source); } if (source instanceof Date) { return compare(((Date) source), target); } if (target instanceof Date) { return compare(((Date) target), source); } if (source instanceof String) { return compare(((String) source), target); } if (target instanceof String) { return compare(((String) target), source); } if (source instanceof Collection) { return compare(((Collection) source), target); } if (target instanceof Collection) { return compare(((Collection) target), source); } if (source instanceof Map) { return compare(((Map) source), target); } if (target instanceof Map) { return compare(((Map) target), source); } if (source.getClass().isEnum() || source instanceof Enum) { return compare(((Enum) source), target); } if (target.getClass().isEnum() || source instanceof Enum) { return compare(((Enum) target), source); } if (source.getClass().isArray()) { return compare(((Object[]) source), target); } if (target.getClass().isArray()) { return compare(((Object[]) target), source); } return compare(FastBeanCopier.copy(source, HashMap.class), FastBeanCopier.copy(target, HashMap.class)); }
@Test public void beanTest() { Date date = new Date(); Assert.assertTrue(CompareUtils.compare(new TestBean(date), new TestBean(DateFormatter.toString(date, "yyyy-MM-dd")))); Assert.assertTrue(CompareUtils.compare(new TestBean(1), new TestBean("1"))); Assert.assertTrue(CompareUtils.compare(new TestBean(1), new TestBean("1.0"))); Assert.assertFalse(CompareUtils.compare(new TestBean(1), new TestBean("1.0000000001"))); }
@Override public synchronized TaskReport getReport() { if (report == null) { constructTaskReport(); } return report; }
@Test (timeout=5000) public void testTaskStartTimes() { TaskId taskId = mock(TaskId.class); TaskInfo taskInfo = mock(TaskInfo.class); Map<TaskAttemptID, TaskAttemptInfo> taskAttempts = new TreeMap<TaskAttemptID, TaskAttemptInfo>(); TaskAttemptID id = new TaskAttemptID("0", 0, TaskType.MAP, 0, 0); TaskAttemptInfo info = mock(TaskAttemptInfo.class); when(info.getAttemptId()).thenReturn(id); when(info.getStartTime()).thenReturn(10l); taskAttempts.put(id, info); id = new TaskAttemptID("1", 0, TaskType.MAP, 1, 1); info = mock(TaskAttemptInfo.class); when(info.getAttemptId()).thenReturn(id); when(info.getStartTime()).thenReturn(20l); taskAttempts.put(id, info); when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts); CompletedTask task = new CompletedTask(taskId, taskInfo); TaskReport report = task.getReport(); // Make sure the startTime returned by report is the lesser of the // attempy launch times assertTrue(report.getStartTime() == 10); }
public void go(PrintStream out) { KieServices ks = KieServices.Factory.get(); KieRepository kr = ks.getRepository(); KieModule kModule = kr.addKieModule(ks.getResources().newFileSystemResource(getFile("named-kiesession"))); KieContainer kContainer = ks.newKieContainer(kModule.getReleaseId()); KieSession kSession = kContainer.newKieSession("ksession1"); kSession.setGlobal("out", out); Object msg1 = createMessage(kContainer, "Dave", "Hello, HAL. Do you read me, HAL?"); kSession.insert(msg1); kSession.fireAllRules(); }
@Test public void testGo() { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream ps = new PrintStream(baos); new NamedKieSessionFromFileExample().go(ps); ps.close(); String actual = baos.toString(); String expected = "" + "Dave: Hello, HAL. Do you read me, HAL?" + NL + "HAL: Dave. I read you." + NL; assertEquals(expected, actual); }
@Override public void setModuleState(boolean enable) { if (mEnable != enable) { mEnable = enable; } }
@Test public void setModuleState() { SAHelper.initSensors(mApplication); SAEncryptProtocolImpl encryptProtocol = new SAEncryptProtocolImpl(); encryptProtocol.install(SensorsDataAPI.sharedInstance(mApplication).getSAContextManager()); Assert.assertTrue(encryptProtocol.isEnable()); encryptProtocol.setModuleState(false); Assert.assertFalse(encryptProtocol.isEnable()); }
public static PathCache empty() { return EMPTY; }
@Test public void testDisabledCache() { PathCache cache = PathCache.empty(); final Path file = new Path("name", EnumSet.of(Path.Type.file)); cache.put(file, AttributedList.emptyList()); assertFalse(cache.containsKey(file)); assertEquals(0, cache.size()); }
@Override public <VO, VR> KStream<K, VR> leftJoin(final KStream<K, VO> otherStream, final ValueJoiner<? super V, ? super VO, ? extends VR> joiner, final JoinWindows windows) { return leftJoin(otherStream, toValueJoinerWithKey(joiner), windows); }
@Test public void shouldNotAllowNullValueJoinerOnLeftJoinWithGlobalTable() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.leftJoin(testGlobalTable, MockMapper.selectValueMapper(), (ValueJoiner<? super String, ? super String, ?>) null)); assertThat(exception.getMessage(), equalTo("joiner can't be null")); }
@Override public void onNext(T value) { synchronized (lock) { if (++numMessages >= maxMessagesBeforeCheck) { numMessages = 0; int waitSeconds = 1; int totalSecondsWaited = 0; int phase = phaser.getPhase(); // Record the initial phase in case we are in the inbound gRPC thread where the phase won't // advance. int initialPhase = phase; // A negative phase indicates that the phaser is terminated. while (phase >= 0 && !outboundObserver.isReady()) { try { phase = phaser.awaitAdvanceInterruptibly(phase, waitSeconds, TimeUnit.SECONDS); } catch (TimeoutException e) { totalSecondsWaited += waitSeconds; // Double the backoff for re-evaluating the isReady bit up to a maximum of once per // minute. This bounds the waiting if the onReady callback is not called as expected. waitSeconds = Math.min(waitSeconds * 2, 60); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); } } if (totalSecondsWaited > 0) { // If the phase didn't change, this means that the installed onReady callback had not // been invoked. if (initialPhase == phase) { LOG.info( "Output channel stalled for {}s, outbound thread {}. OnReady notification was " + "not invoked, ensure the inbound gRPC thread is not used for output.", totalSecondsWaited, Thread.currentThread().getName()); } else if (totalSecondsWaited > 60) { LOG.warn( "Output channel stalled for {}s, outbound thread {}.", totalSecondsWaited, Thread.currentThread().getName()); } else { LOG.debug( "Output channel stalled for {}s, outbound thread {}.", totalSecondsWaited, Thread.currentThread().getName()); } } } outboundObserver.onNext(value); } }
@Test public void testMessageCheckInterval() throws Exception { final AtomicInteger index = new AtomicInteger(); ArrayListMultimap<Integer, String> values = ArrayListMultimap.create(); final DirectStreamObserver<String> streamObserver = new DirectStreamObserver<>( new AdvancingPhaser(1), TestStreams.withOnNext((String t) -> assertTrue(values.put(index.get(), t))) .withIsReady( () -> { index.incrementAndGet(); return true; }) .build(), 10); List<String> prefixes = ImmutableList.of("0", "1", "2", "3", "4"); List<Future<String>> results = new ArrayList<>(); for (final String prefix : prefixes) { results.add( executor.submit( () -> { for (int i = 0; i < 10; i++) { streamObserver.onNext(prefix + i); } return prefix; })); } for (Future<?> result : results) { result.get(); } assertEquals(50, values.size()); for (Collection<String> valuesPerMessageCheck : values.asMap().values()) { assertThat(valuesPerMessageCheck, hasSize(10)); } // Check that order was maintained per writer. int[] prefixesIndex = new int[prefixes.size()]; for (String onNextValue : values.values()) { int prefix = Integer.parseInt(onNextValue.substring(0, 1)); int suffix = Integer.parseInt(onNextValue.substring(1, 2)); assertEquals(prefixesIndex[prefix], suffix); prefixesIndex[prefix] += 1; } }
@Override public Set<String> getUuids() { checkState(uuids != null, "UUIDs have not been set in repository"); return uuids; }
@Test public void getUuids_fails_if_not_initialized() { assertThatThrownBy(() -> underTest.getUuids()) .isInstanceOf(IllegalStateException.class) .hasMessage("UUIDs have not been set in repository"); }
@Override public String toString() { return StringUtils.join(versionParts, '.'); }
@Test public void testToString() { DependencyVersion instance = new DependencyVersion("1.2.3r1"); String expResult = "1.2.3.r1"; String result = instance.toString(); assertEquals(expResult, result); }
public void doesNotMatch(@Nullable String regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that does not match", regex); } else if (actual.matches(regex)) { failWithActual("expected not to match", regex); } }
@Test public void stringDoesNotMatchStringFailNull() { expectFailureWhenTestingThat(null).doesNotMatch(".*aaa.*"); assertFailureValue("expected a string that does not match", ".*aaa.*"); }
public static <T extends PipelineOptions> T as(Class<T> klass) { return new Builder().as(klass); }
@Test public void testGetterSetterTypeMismatchThrows() throws Exception { expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage( "Type mismatch between getter and setter methods for property [value]. Getter is of type " + "[boolean] whereas setter is of type [int]."); PipelineOptionsFactory.as(GetterSetterTypeMismatch.class); }
@JsonProperty public List<ReporterFactory> getReporters() { return reporters; }
@Test void canReadExcludedAndIncludedAttributes() { assertThat(config.getReporters()) .hasSize(3) .element(0) .isInstanceOfSatisfying(ConsoleReporterFactory.class, consoleReporterFactory -> assertThat(consoleReporterFactory) .satisfies(factory -> assertThat(factory.getIncludesAttributes()) .isEqualTo(EnumSet.of(MetricAttribute.P50, MetricAttribute.P95, MetricAttribute.P98, MetricAttribute.P99))) .satisfies(factory -> assertThat(factory.getExcludesAttributes()).isEqualTo(EnumSet.of(MetricAttribute.P98)))); }
public void isFalse() { if (actual == null) { isEqualTo(false); // fails } else if (actual) { failWithoutActual(simpleFact("expected to be false")); } }
@Test public void isFalse() { assertThat(false).isFalse(); }
@SuppressWarnings("unchecked") public static <T> Collection<T> getServiceInstances(final Class<T> serviceInterface) { return (Collection<T>) getRegisteredSPI(serviceInterface).getServiceInstances(); }
@Test void assertGetServiceInstancesWithNoInterface() { assertThrows(IllegalArgumentException.class, () -> ShardingSphereServiceLoader.getServiceInstances(Object.class)); }
public static Tuple2<Long, Long> discardStateFuture(Future<? extends StateObject> stateFuture) throws Exception { long stateSize = 0, checkpointedSize = 0; if (null != stateFuture) { if (!stateFuture.cancel(true)) { try { // We attempt to get a result, in case the future completed before cancellation. if (stateFuture instanceof RunnableFuture<?> && !stateFuture.isDone()) { ((RunnableFuture<?>) stateFuture).run(); } StateObject stateObject = stateFuture.get(); if (stateObject != null) { stateSize = stateObject.getStateSize(); checkpointedSize = getCheckpointedSize(stateObject, stateSize); stateObject.discardState(); } } catch (Exception ex) { LOG.debug( "Cancelled execution of snapshot future runnable. Cancellation produced the following " + "exception, which is expected an can be ignored.", ex); } } else if (stateFuture.isDone()) { try { StateObject stateObject = stateFuture.get(); stateSize = stateObject.getStateSize(); checkpointedSize = getCheckpointedSize(stateObject, stateSize); } catch (Exception e) { // ignored } } } return Tuple2.of(stateSize, checkpointedSize); }
@Test void testDiscardStateSize() throws Exception { assertThat(discardStateFuture(completedFuture(new TestStateObject(1234, 123)))) .isEqualTo(Tuple2.of(1234L, 123L)); Tuple2<Long, Long> zeroSize = Tuple2.of(0L, 0L); assertThat(discardStateFuture(null)).isEqualTo(zeroSize); assertThat(discardStateFuture(new CompletableFuture<>())).isEqualTo(zeroSize); assertThat(discardStateFuture(completedExceptionally(new RuntimeException()))) .isEqualTo(zeroSize); assertThat(discardStateFuture(emptyFuture(false, true))).isEqualTo(zeroSize); assertThat(discardStateFuture(emptyFuture(false, false))).isEqualTo(zeroSize); assertThat(discardStateFuture(emptyFuture(true, true))).isEqualTo(zeroSize); assertThat(discardStateFuture(emptyFuture(true, false))).isEqualTo(zeroSize); }
static Result coerceUserList( final Collection<Expression> expressions, final ExpressionTypeManager typeManager ) { return coerceUserList(expressions, typeManager, Collections.emptyMap()); }
@Test public void shouldNotCoerceMapWithDifferentKeyExpression() { // Given: final ImmutableList<Expression> expressions = ImmutableList.of( new CreateMapExpression( ImmutableMap.of( new IntegerLiteral(10), new IntegerLiteral(10) ) ), new CreateMapExpression( ImmutableMap.of( STRING_EXPRESSION, new IntegerLiteral(10) ) ) ); // When: final Exception e = assertThrows( KsqlException.class, () -> CoercionUtil.coerceUserList(expressions, typeManager) ); // Then: assertThat(e.getMessage(), startsWith("operator does not exist: MAP<INTEGER, INTEGER> = MAP<STRING, INTEGER> (MAP(STR:=10))")); }
@VisibleForTesting @CheckForNull CharsetHandler getHandler(Dialect dialect) { switch (dialect.getId()) { case H2.ID: // nothing to check return null; case Oracle.ID: return new OracleCharsetHandler(sqlExecutor); case PostgreSql.ID: return new PostgresCharsetHandler(sqlExecutor, new PostgresMetadataReader(sqlExecutor)); case MsSql.ID: return new MssqlCharsetHandler(sqlExecutor, new MssqlMetadataReader(sqlExecutor)); default: throw new IllegalArgumentException("Database not supported: " + dialect.getId()); } }
@Test public void getHandler_returns_MssqlCharsetHandler_if_mssql() { assertThat(underTest.getHandler(new MsSql())).isInstanceOf(MssqlCharsetHandler.class); }
@Override public void merge(ColumnStatisticsObj aggregateColStats, ColumnStatisticsObj newColStats) { LOG.debug("Merging statistics: [aggregateColStats:{}, newColStats: {}]", aggregateColStats, newColStats); TimestampColumnStatsDataInspector aggregateData = timestampInspectorFromStats(aggregateColStats); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(newColStats); Timestamp lowValue = mergeLowValue(getLowValue(aggregateData), getLowValue(newData)); if (lowValue != null) { aggregateData.setLowValue(lowValue); } Timestamp highValue = mergeHighValue(getHighValue(aggregateData), getHighValue(newData)); if (highValue != null) { aggregateData.setHighValue(highValue); } aggregateData.setNumNulls(mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); NumDistinctValueEstimator oldNDVEst = aggregateData.getNdvEstimator(); NumDistinctValueEstimator newNDVEst = newData.getNdvEstimator(); List<NumDistinctValueEstimator> ndvEstimatorsList = Arrays.asList(oldNDVEst, newNDVEst); aggregateData.setNumDVs(mergeNumDistinctValueEstimator(aggregateColStats.getColName(), ndvEstimatorsList, aggregateData.getNumDVs(), newData.getNumDVs())); aggregateData.setNdvEstimator(ndvEstimatorsList.get(0)); KllHistogramEstimator oldKllEst = aggregateData.getHistogramEstimator(); KllHistogramEstimator newKllEst = newData.getHistogramEstimator(); aggregateData.setHistogramEstimator(mergeHistogramEstimator(aggregateColStats.getColName(), oldKllEst, newKllEst)); aggregateColStats.getStatsData().setTimestampStats(aggregateData); }
@Test public void testMergeNonNullWithNullValues() { ColumnStatisticsObj aggrObj = createColumnStatisticsObj(new ColStatsBuilder<>(Timestamp.class) .low(TS_1) .high(TS_3) .numNulls(4) .numDVs(2) .hll(TS_1.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch()) .kll(TS_1.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch()) .build()); ColumnStatisticsObj newObj = createColumnStatisticsObj(new ColStatsBuilder<>(Timestamp.class) .low(null) .high(null) .numNulls(2) .numDVs(0) .build()); merger.merge(aggrObj, newObj); ColumnStatisticsData expectedColumnStatisticsData = new ColStatsBuilder<>(Timestamp.class) .low(TS_1) .high(TS_3) .numNulls(6) .numDVs(2) .hll(TS_1.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch()) .kll(TS_1.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch()) .build(); assertEquals(expectedColumnStatisticsData, aggrObj.getStatsData()); }
@Override public Consumer createConsumer(Processor aProcessor) throws Exception { // validate that all of the endpoint is configured properly if (getMonitorType() != null) { if (!isPlatformServer()) { throw new IllegalArgumentException(ERR_PLATFORM_SERVER); } if (ObjectHelper.isEmpty(getObservedAttribute())) { throw new IllegalArgumentException(ERR_OBSERVED_ATTRIBUTE); } if (getMonitorType().equals("string")) { if (ObjectHelper.isEmpty(getStringToCompare())) { throw new IllegalArgumentException(ERR_STRING_TO_COMPARE); } if (!isNotifyDiffer() && !isNotifyMatch()) { throw new IllegalArgumentException(ERR_STRING_NOTIFY); } } else if (getMonitorType().equals("gauge")) { if (!isNotifyHigh() && !isNotifyLow()) { throw new IllegalArgumentException(ERR_GAUGE_NOTIFY); } if (getThresholdHigh() == null) { throw new IllegalArgumentException(ERR_THRESHOLD_HIGH); } if (getThresholdLow() == null) { throw new IllegalArgumentException(ERR_THRESHOLD_LOW); } } JMXMonitorConsumer answer = new JMXMonitorConsumer(this, aProcessor); configureConsumer(answer); return answer; } else { // shouldn't need any other validation. JMXConsumer answer = new JMXConsumer(this, aProcessor); configureConsumer(answer); return answer; } }
@Test public void noThresholdLow() throws Exception { JMXEndpoint ep = context.getEndpoint( "jmx:platform?objectDomain=FooDomain&objectName=theObjectName&monitorType=gauge&observedAttribute=foo&thresholdHigh=100&notifyHigh=true", JMXEndpoint.class); try { ep.createConsumer(null); fail("expected exception"); } catch (IllegalArgumentException e) { assertEquals(JMXEndpoint.ERR_THRESHOLD_LOW, e.getMessage()); } }
@Override protected int rsv(WebSocketFrame msg) { return msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame? msg.rsv() | WebSocketExtension.RSV1 : msg.rsv(); }
@Test public void testCompressionSkipForBinaryFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false, ALWAYS_SKIP)); byte[] payload = new byte[300]; random.nextBytes(payload); WebSocketFrame binaryFrame = new BinaryWebSocketFrame(Unpooled.wrappedBuffer(payload)); assertTrue(encoderChannel.writeOutbound(binaryFrame.copy())); WebSocketFrame outboundFrame = encoderChannel.readOutbound(); assertEquals(0, outboundFrame.rsv()); assertArrayEquals(payload, ByteBufUtil.getBytes(outboundFrame.content())); assertTrue(outboundFrame.release()); assertFalse(encoderChannel.finish()); }
public PipelineColumnMetaData getColumnMetaData(final int columnIndex) { return getColumnMetaData(columnNames.get(columnIndex - 1)); }
@Test void assertGetColumnMetaDataGivenColumnIndex() { PipelineColumnMetaData actual = pipelineTableMetaData.getColumnMetaData(1); assertThat(actual.getOrdinalPosition(), is(1)); assertThat(actual.getName(), is("test")); assertThat(actual.getDataType(), is(Types.INTEGER)); assertTrue(actual.isPrimaryKey()); }
@Override public String[] split(String text) { for (Pattern regexp : CONTRACTIONS2) text = regexp.matcher(text).replaceAll("$1 $2"); for (Pattern regexp : CONTRACTIONS3) text = regexp.matcher(text).replaceAll("$1 $2 $3"); text = DELIMITERS[0].matcher(text).replaceAll(" $1 "); text = DELIMITERS[1].matcher(text).replaceAll(" $1"); text = DELIMITERS[2].matcher(text).replaceAll(" $1"); text = DELIMITERS[3].matcher(text).replaceAll(" . "); String[] words = WHITESPACE.split(text); if (words.length > 1 && words[words.length-1].equals(".")) { if (EnglishAbbreviations.contains(words[words.length-2])) { words[words.length-2] = words[words.length-2] + "."; } } return words; }
@Test public void testSplit() { System.out.println("tokenize"); String text = "Good muffins cost $3.88\nin New York. Please buy " + "me\ntwo of them.\n\nYou cannot eat them. I gonna eat them. " + "Thanks."; String[] expResult = {"Good", "muffins", "cost", "$", "3.88", "in", "New", "York.", "Please", "buy", "me", "two", "of", "them", ".", "You", "can", "not", "eat", "them.", "I", "gon", "na", "eat", "them.", "Thanks", "."}; PennTreebankTokenizer instance = PennTreebankTokenizer.getInstance(); String[] result = instance.split(text); assertEquals(expResult.length, result.length); for (int i = 0; i < result.length; i++) { assertEquals(expResult[i], result[i]); } }
@GET @Path("/apps/{appid}/appattempts/{appattemptid}") @Produces(MediaType.APPLICATION_JSON) public TimelineEntity getAppAttempt(@Context HttpServletRequest req, @Context HttpServletResponse res, @PathParam("appid") String appId, @PathParam("appattemptid") String appAttemptId, @QueryParam("userid") String userId, @QueryParam("flowname") String flowName, @QueryParam("flowrunid") String flowRunId, @QueryParam("confstoretrieve") String confsToRetrieve, @QueryParam("metricstoretrieve") String metricsToRetrieve, @QueryParam("fields") String fields, @QueryParam("metricslimit") String metricsLimit, @QueryParam("metricstimestart") String metricsTimeStart, @QueryParam("metricstimeend") String metricsTimeEnd, @QueryParam("entityidprefix") String entityIdPrefix) { return getAppAttempt(req, res, null, appId, appAttemptId, userId, flowName, flowRunId, confsToRetrieve, metricsToRetrieve, fields, metricsLimit, metricsTimeStart, metricsTimeEnd, entityIdPrefix); }
@Test void testGetAppAttempt() throws Exception { Client client = createClient(); try { URI uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/apps/app1/entities/" + "YARN_APPLICATION_ATTEMPT/app-attempt-1"); ClientResponse resp = getResponse(client, uri); TimelineEntity entities1 = resp.getEntity(new GenericType<TimelineEntity>() { }); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, resp.getType().toString()); assertNotNull(entities1); uri = URI.create("http://localhost:" + serverPort + "/ws/v2/" + "timeline/clusters/cluster1/apps/app1/appattempts/app-attempt-1"); resp = getResponse(client, uri); TimelineEntity entities2 = resp.getEntity(new GenericType<TimelineEntity>() { }); assertEquals(MediaType.APPLICATION_JSON_TYPE, resp.getType()); assertNotNull(entities2); assertEquals(entities1, entities2); } finally { client.destroy(); } }
@ShellMethod(key = "desc", value = "Describe Hoodie Table properties") public String descTable() { HoodieTableMetaClient client = HoodieCLI.getTableMetaClient(); TableHeader header = new TableHeader().addTableHeaderField("Property").addTableHeaderField("Value"); List<Comparable[]> rows = new ArrayList<>(); rows.add(new Comparable[] {"basePath", client.getBasePath()}); rows.add(new Comparable[] {"metaPath", client.getMetaPath()}); rows.add(new Comparable[] {"fileSystem", client.getStorage().getScheme()}); client.getTableConfig().propsMap().entrySet().forEach(e -> { rows.add(new Comparable[] {e.getKey(), e.getValue()}); }); return HoodiePrintHelper.print(header, new HashMap<>(), "", false, -1, false, rows); }
@Test public void testDescTable() { // Prepare table assertTrue(prepareTable()); // Test desc table Object result = shell.evaluate(() -> "desc"); assertTrue(ShellEvaluationResultUtil.isSuccess(result)); // check table's basePath metaPath and type assertTrue(result.toString().contains(tablePath)); assertTrue(result.toString().contains(metaPath)); assertTrue(result.toString().contains("COPY_ON_WRITE")); }
public static Map<String, String> getProperties(ByteBuffer message) { return MessageDecoder.decodeProperties(message.slice()); }
@Test public void verifyMockedMessageBuffer() { ByteBuffer buffer = buildMockedMessageBuffer(); Assert.assertEquals(MSG_LEN, buffer.remaining()); Assert.assertEquals(MSG_LEN, buffer.getInt()); Assert.assertEquals(MessageDecoder.MESSAGE_MAGIC_CODE_V2, buffer.getInt()); Assert.assertEquals(3, buffer.getInt()); Assert.assertEquals(4, buffer.getInt()); Assert.assertEquals(5, buffer.getInt()); Assert.assertEquals(6, buffer.getLong()); Assert.assertEquals(7, buffer.getLong()); Assert.assertEquals(8, buffer.getInt()); Assert.assertEquals(9, buffer.getLong()); Assert.assertEquals(10, buffer.getLong()); Assert.assertEquals(11, buffer.getLong()); Assert.assertEquals(10, buffer.getLong()); Assert.assertEquals(13, buffer.getInt()); Assert.assertEquals(14, buffer.getLong()); Assert.assertEquals(0, buffer.getInt()); Assert.assertEquals(0, buffer.getShort()); buffer.rewind(); Map<String, String> properties = MessageFormatUtil.getProperties(buffer); Assert.assertEquals("uk", properties.get(MessageConst.PROPERTY_UNIQ_CLIENT_MESSAGE_ID_KEYIDX)); Assert.assertEquals("UserValue0", properties.get("UserKey")); }
public ShenyuResponse(final int status, final String reason, final Map<String, Collection<String>> headers, final String body, final ShenyuRequest request) { this.status = status; this.reason = reason; this.headers = headers; this.body = body; this.request = request; }
@Test public void testShenyuResponse() { Map<String, Collection<String>> headerMap = new HashMap<>(); headerMap.put("header", Arrays.asList("header1", "header2")); String body = "{key1:\"value1\"}"; ShenyuRequest request = ShenyuRequest.create(ShenyuRequest.HttpMethod.GET, "https://shenyu.apache.org", headerMap, null, null, null); ShenyuResponse response = new ShenyuResponse( HttpStatus.SC_OK, "success", headerMap, body, request ); Assert.assertNotNull(response); }
public static String getComputeNodeStateNodePath(final String instanceId) { return String.join("/", "", ROOT_NODE, COMPUTE_NODE, STATUS_NODE, instanceId); }
@Test void assertGetComputeNodeStateNodePath() { assertThat(ComputeNode.getComputeNodeStateNodePath("foo_instance"), is("/nodes/compute_nodes/status/foo_instance")); }
@ExecuteOn(TaskExecutors.IO) @Post(uri = "executions/daily") @Operation(tags = {"Stats"}, summary = "Get daily statistics for executions") public List<DailyExecutionStatistics> dailyStatistics(@Body @Valid StatisticRequest statisticRequest) { // @TODO: seems to be converted back to utc by micronaut return executionRepository.dailyStatistics( statisticRequest.q(), tenantService.resolveTenant(), statisticRequest.namespace(), statisticRequest.flowId(), statisticRequest.startDate() != null ? statisticRequest.startDate().withZoneSameInstant(ZoneId.systemDefault()) : null, statisticRequest.endDate() != null ? statisticRequest.endDate().withZoneSameInstant(ZoneId.systemDefault()) : null, null, statisticRequest.state(), false); }
@Test void dailyStatistics() { var dailyStatistics = client.toBlocking().retrieve( HttpRequest .POST("/api/v1/stats/executions/daily", new StatsController.StatisticRequest(null, null, null, ZonedDateTime.now().minusDays(1), ZonedDateTime.now(), null)) .contentType(MediaType.APPLICATION_JSON), Argument.listOf(DailyExecutionStatistics.class) ); assertThat(dailyStatistics, notNullValue()); }
public static Node build(final List<JoinInfo> joins) { Node root = null; for (final JoinInfo join : joins) { if (root == null) { root = new Leaf(join.getLeftSource()); } if (root.containsSource(join.getRightSource()) && root.containsSource(join.getLeftSource())) { throw new KsqlException("Cannot perform circular join - both " + join.getRightSource() + " and " + join.getLeftJoinExpression() + " are already included in the current join tree: " + root.debugString(0)); } else if (root.containsSource(join.getLeftSource())) { root = new Join(root, new Leaf(join.getRightSource()), join); } else if (root.containsSource(join.getRightSource())) { root = new Join(root, new Leaf(join.getLeftSource()), join.flip()); } else { throw new KsqlException( "Cannot build JOIN tree; neither source in the join is the FROM source or included " + "in a previous JOIN: " + join + ". The current join tree is " + root.debugString(0) ); } } return root; }
@Test public void shouldIgnoreNonQualifiedColumnReferencesWhenComputingViableKeys() { // Given: when(j1.getLeftSource()).thenReturn(a); when(j1.getRightSource()).thenReturn(b); when(j2.getLeftSource()).thenReturn(a); when(j2.getRightSource()).thenReturn(c); when(j1.getLeftJoinExpression()).thenReturn(e1); when(j1.getRightJoinExpression()).thenReturn(e2); when(j2.getLeftJoinExpression()).thenReturn(e1); when(j2.getRightJoinExpression()).thenReturn(e3); final List<JoinInfo> joins = ImmutableList.of(j1, j2); final Node root = JoinTree.build(joins); // When: final List<?> keys = root.viableKeyColumns(); // Then: assertThat(keys, is(empty())); }
private void getMode(Request req) { dispatchRpcRequest(req, () -> { req.returnValues().add(new StringValue(proxyServer.getMode().name())); req.returnRequest(); }); }
@Test void testRpcMethodGetModeAndSetMode() { Request req = new Request("getMode"); client.invoke(req); assertFalse(req.isError(), req.errorMessage()); assertEquals(1, req.returnValues().size()); assertEquals("default", req.returnValues().get(0).asString()); req = new Request("setMode"); String mode = "memorycache"; req.parameters().add(new StringValue(mode)); client.invoke(req); assertFalse(req.isError(), req.errorMessage()); assertEquals(1, req.returnValues().size()); String[] ret = req.returnValues().get(0).asStringArray(); assertEquals(2, ret.length); assertEquals("0", ret[0]); assertEquals("success", ret[1]); assertEquals(mode, server.proxyServer().getMode().name()); req = new Request("getMode"); client.invoke(req); assertFalse(req.isError(), req.errorMessage()); assertEquals(1, req.returnValues().size()); assertEquals(mode, req.returnValues().get(0).asString()); req = new Request("setMode"); String oldMode = mode; mode = "invalid"; req.parameters().add(new StringValue(mode)); client.invoke(req); assertFalse(req.isError(), req.errorMessage()); ret = req.returnValues().get(0).asStringArray(); assertEquals(2, ret.length); assertEquals("1", ret[0]); assertEquals("Unrecognized mode '" + mode + "' supplied. Legal modes are '" + Mode.modes() + "'", ret[1]); assertEquals(oldMode, server.proxyServer().getMode().name()); }
public B check(Boolean check) { this.check = check; return getThis(); }
@Test void check() { ReferenceBuilder builder = new ReferenceBuilder(); builder.check(true); Assertions.assertTrue(builder.build().isCheck()); builder.check(false); Assertions.assertFalse(builder.build().isCheck()); }
List<MetricsReporter> metricsReporters() { List<MetricsReporter> reporters = CommonClientConfigs.metricsReporters(this); MetricsContext metricsContext = new KafkaMetricsContext("kafka.connect.mirror"); for (MetricsReporter reporter : reporters) { reporter.contextChange(metricsContext); } return reporters; }
@Test @SuppressWarnings("deprecation") public void testMetricsReporters() { Map<String, String> connectorProps = makeProps("metric.reporters", MockMetricsReporter.class.getName()); MirrorConnectorConfig config = new TestMirrorConnectorConfig(connectorProps); assertEquals(2, config.metricsReporters().size()); connectorProps.put(CommonClientConfigs.AUTO_INCLUDE_JMX_REPORTER_CONFIG, "false"); config = new TestMirrorConnectorConfig(connectorProps); assertEquals(1, config.metricsReporters().size()); }
void fetchRepositoryAndPackageMetaData(GoPluginDescriptor pluginDescriptor) { try { RepositoryConfiguration repositoryConfiguration = packageRepositoryExtension.getRepositoryConfiguration(pluginDescriptor.id()); com.thoughtworks.go.plugin.api.material.packagerepository.PackageConfiguration packageConfiguration = packageRepositoryExtension.getPackageConfiguration(pluginDescriptor.id()); if (repositoryConfiguration == null) { throw new RuntimeException(format("Plugin[%s] returned null repository configuration", pluginDescriptor.id())); } if (packageConfiguration == null) { throw new RuntimeException(format("Plugin[%s] returned null package configuration", pluginDescriptor.id())); } repositoryMetadataStore.addMetadataFor(pluginDescriptor.id(), new PackageConfigurations(repositoryConfiguration)); packageMetadataStore.addMetadataFor(pluginDescriptor.id(), new PackageConfigurations(packageConfiguration)); } catch (GoPluginFrameworkException e) { LOGGER.error("Failed to fetch package metadata for plugin : {}", pluginDescriptor.id(), e); } }
@Test public void shouldThrowExceptionWhenNullPackageConfigurationReturned() { when(packageRepositoryExtension.getPackageConfiguration(pluginDescriptor.id())).thenReturn(null); try { metadataLoader.fetchRepositoryAndPackageMetaData(pluginDescriptor); } catch (Exception e) { assertThat(e.getMessage(), is("Plugin[plugin-id] returned null repository configuration")); } assertThat(RepositoryMetadataStore.getInstance().getMetadata(pluginDescriptor.id()), nullValue()); assertThat(PackageMetadataStore.getInstance().getMetadata(pluginDescriptor.id()), nullValue()); }
public static long ordinalOf(double value) { if (value == Double.POSITIVE_INFINITY) { return 0xFFFFFFFFFFFFFFFFL; } if (value == Double.NEGATIVE_INFINITY || Double.isNaN(value)) { return 0; } long bits = Double.doubleToLongBits(value); // need negatives to come before positives if ((bits & Long.MIN_VALUE) == Long.MIN_VALUE) { // conflate 0/-0, or reverse order of negatives bits = bits == Long.MIN_VALUE ? Long.MIN_VALUE : ~bits; } else { // positives after negatives bits ^= Long.MIN_VALUE; } return bits; }
@Test public void testInfinities() { assertEquals(FPOrdering.ordinalOf(Double.NEGATIVE_INFINITY), 0); assertEquals(FPOrdering.ordinalOf(Float.NEGATIVE_INFINITY), 0); assertEquals(FPOrdering.ordinalOf(Double.POSITIVE_INFINITY), 0xFFFFFFFFFFFFFFFFL); assertEquals(FPOrdering.ordinalOf(Float.POSITIVE_INFINITY), 0xFFFFFFFFL); }
Cycles<EDGE> findCycles() { JohnsonCycleFinder johnsonCycleFinder = new JohnsonCycleFinder(createPrimitiveGraph()); JohnsonCycleFinder.Result rawCycles = johnsonCycleFinder.findCycles(); return new CyclesInternal<>(mapToCycles(rawCycles), rawCycles.maxNumberOfCyclesReached()); }
@Test public void multiple_cycles_are_detected() { Graph<String, Edge<String>> graph = new Graph<>(); Cycle<Edge<String>> threeElements = randomCycle(3); Cycle<Edge<String>> fourElements = randomCycle(4); Cycle<Edge<String>> fiveElements = randomCycle(5); addCycles(graph, threeElements, fourElements, fiveElements); addCrossLink(graph, threeElements, fourElements); addCrossLink(graph, fourElements, fiveElements); Collection<Cycle<Edge<String>>> cycles = graph.findCycles(); assertThatCycles(cycles).containsOnly(threeElements, fourElements, fiveElements); }
public static boolean hasPublicNullaryConstructor(Class<?> clazz) { return Arrays.stream(clazz.getConstructors()) .anyMatch(constructor -> constructor.getParameterCount() == 0); }
@Test void testHasNullaryConstructor() { assertThat(InstantiationUtil.hasPublicNullaryConstructor(StringValue.class)).isTrue(); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; // This handles a tombstone message if (value == null) { return SchemaAndValue.NULL; } try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); // The deserialized data should either be an envelope object containing the schema and the payload or the schema // was stripped during serialization and we need to fill in an all-encompassing schema. if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); jsonValue = envelope; } Schema schema = asConnectSchema(jsonValue.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); return new SchemaAndValue( schema, convertToConnect(schema, jsonValue.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME), config) ); }
@Test public void dateToConnect() { Schema schema = Date.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.DATE, 10000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1 }, \"payload\": 10000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); }
@Override public List<BlockWorkerInfo> getPreferredWorkers(WorkerClusterView workerClusterView, String fileId, int count) throws ResourceExhaustedException { if (workerClusterView.size() < count) { throw new ResourceExhaustedException(String.format( "Not enough workers in the cluster %d workers in the cluster but %d required", workerClusterView.size(), count)); } Set<WorkerIdentity> workerIdentities = workerClusterView.workerIds(); mHashProvider.refresh(workerIdentities); List<WorkerIdentity> workers = mHashProvider.getMultiple(fileId, count); if (workers.size() != count) { throw new ResourceExhaustedException(String.format( "Found %d workers from the hash ring but %d required", workers.size(), count)); } ImmutableList.Builder<BlockWorkerInfo> builder = ImmutableList.builder(); for (WorkerIdentity worker : workers) { Optional<WorkerInfo> optionalWorkerInfo = workerClusterView.getWorkerById(worker); final WorkerInfo workerInfo; if (optionalWorkerInfo.isPresent()) { workerInfo = optionalWorkerInfo.get(); } else { // the worker returned by the policy does not exist in the cluster view // supplied by the client. // this can happen when the membership changes and some callers fail to update // to the latest worker cluster view. // in this case, just skip this worker LOG.debug("Inconsistency between caller's view of cluster and that of " + "the consistent hash policy's: worker {} selected by policy does not exist in " + "caller's view {}. Skipping this worker.", worker, workerClusterView); continue; } BlockWorkerInfo blockWorkerInfo = new BlockWorkerInfo( worker, workerInfo.getAddress(), workerInfo.getCapacityBytes(), workerInfo.getUsedBytes(), workerInfo.getState() == WorkerState.LIVE ); builder.add(blockWorkerInfo); } List<BlockWorkerInfo> infos = builder.build(); return infos; }
@Test public void getOneWorker() throws Exception { WorkerLocationPolicy policy = WorkerLocationPolicy.Factory.create(mConf); assertTrue(policy instanceof MultiProbeHashPolicy); // Prepare a worker list WorkerClusterView workers = new WorkerClusterView(Arrays.asList( new WorkerInfo() .setIdentity(WorkerIdentityTestUtils.ofLegacyId(1)) .setAddress(new WorkerNetAddress() .setHost("master1").setRpcPort(29998).setDataPort(29999).setWebPort(30000)) .setCapacityBytes(1024) .setUsedBytes(0), new WorkerInfo() .setIdentity(WorkerIdentityTestUtils.ofLegacyId(2)) .setAddress(new WorkerNetAddress() .setHost("master2").setRpcPort(29998).setDataPort(29999).setWebPort(30000)) .setCapacityBytes(1024) .setUsedBytes(0))); List<BlockWorkerInfo> assignedWorkers = policy.getPreferredWorkers(workers, "hdfs://a/b/c", 1); assertEquals(1, assignedWorkers.size()); assertTrue(contains(workers, assignedWorkers.get(0))); assertThrows(ResourceExhaustedException.class, () -> { // Getting 1 out of no workers will result in an error policy.getPreferredWorkers(new WorkerClusterView(ImmutableList.of()), "hdfs://a/b/c", 1); }); }
@Override public FlinkPod decorateFlinkPod(FlinkPod flinkPod) { final Container mainContainerWithStartCmd = new ContainerBuilder(flinkPod.getMainContainer()) .withCommand(kubernetesJobManagerParameters.getContainerEntrypoint()) .withArgs(getJobManagerStartCommand()) .build(); return new FlinkPod.Builder(flinkPod).withMainContainer(mainContainerWithStartCmd).build(); }
@Test void testContainerIsDecorated() { flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.SESSION.getName()); final FlinkPod resultFlinkPod = cmdJobManagerDecorator.decorateFlinkPod(baseFlinkPod); assertThat(resultFlinkPod.getPodWithoutMainContainer()) .isEqualTo(baseFlinkPod.getPodWithoutMainContainer()); assertThat(resultFlinkPod.getMainContainer()).isNotEqualTo(baseFlinkPod.getMainContainer()); }
@Override // Exposes internal mutable reference by design - Spotbugs is right to warn that this is dangerous public synchronized byte[] toByteArray() { // Note: count == buf.length is not a correct criteria to "return buf;", because the internal // buf may be reused after reset(). if (!isFallback && count > 0) { return buf; } else { return super.toByteArray(); } }
@Test public void testWriteArrayFastAfterReset2() throws IOException { resetBoth(); writeToBothFast(TEST_DATA); assertStreamContentsEquals(stream, exposedStream); assertSame(TEST_DATA, exposedStream.toByteArray()); }
public static JibContainerBuilder toJibContainerBuilder( Path projectRoot, Path buildFilePath, Build buildCommandOptions, CommonCliOptions commonCliOptions, ConsoleLogger logger) throws InvalidImageReferenceException, IOException { BuildFileSpec buildFile = toBuildFileSpec(buildFilePath, buildCommandOptions.getTemplateParameters()); Optional<BaseImageSpec> baseImageSpec = buildFile.getFrom(); JibContainerBuilder containerBuilder = baseImageSpec.isPresent() ? createJibContainerBuilder(baseImageSpec.get(), commonCliOptions, logger) : Jib.fromScratch(); buildFile.getCreationTime().ifPresent(containerBuilder::setCreationTime); buildFile.getFormat().ifPresent(containerBuilder::setFormat); containerBuilder.setEnvironment(buildFile.getEnvironment()); containerBuilder.setLabels(buildFile.getLabels()); containerBuilder.setVolumes(buildFile.getVolumes()); containerBuilder.setExposedPorts(buildFile.getExposedPorts()); buildFile.getUser().ifPresent(containerBuilder::setUser); buildFile.getWorkingDirectory().ifPresent(containerBuilder::setWorkingDirectory); buildFile.getEntrypoint().ifPresent(containerBuilder::setEntrypoint); buildFile.getCmd().ifPresent(containerBuilder::setProgramArguments); Optional<LayersSpec> layersSpec = buildFile.getLayers(); if (layersSpec.isPresent()) { containerBuilder.setFileEntriesLayers(Layers.toLayers(projectRoot, layersSpec.get())); } return containerBuilder; }
@Test public void testToBuildFileSpec_alternativeRootContext() throws URISyntaxException, InvalidImageReferenceException, IOException { Path buildfile = Paths.get( Resources.getResource("buildfiles/projects/allProperties/altYamls/alt-jib.yaml") .toURI()); Path projectRoot = buildfile.getParent().getParent(); JibContainerBuilder jibContainerBuilder = BuildFiles.toJibContainerBuilder( projectRoot, buildfile, buildCli, commonCliOptions, consoleLogger); ContainerBuildPlan resolved = jibContainerBuilder.toContainerBuildPlan(); Assert.assertEquals( FileEntriesLayer.builder() .addEntry( projectRoot.resolve("project/script.sh"), AbsoluteUnixPath.get("/home/script.sh")) .build() .getEntries(), ((FileEntriesLayer) resolved.getLayers().get(0)).getEntries()); }
public static Identifier parse(String stringValue) { return parse(stringValue, -1); }
@Test(expected = IllegalArgumentException.class) public void testParseIntegerAboveMax() { Identifier.parse("65536"); }
public boolean match(String left, String right) { if (left != null && left.startsWith("\"") && left.endsWith("\"")) { left = left.substring(1, left.length() - 1); } if (right != null && right.startsWith("\"") && right.endsWith("\"")) { right = right.substring(1, right.length() - 1); } return Objects.equals(left, right); }
@Test public void doubleShouldEqualWhenLargerThan128() { Double a = 334.0; Double b = 334.0; boolean match = new StringMatch().match(a, b); assertTrue(match); }
public static Map<String, PartitionColumnFilter> convertColumnFilter(List<ScalarOperator> predicates) { return convertColumnFilter(predicates, null); }
@Test public void convertColumnFilterExprBinaryType() { List<ScalarOperator> listEq = buildOperator("day", BinaryType.EQ); OlapTable olapTable = buildOlapTable("day"); Map<String, PartitionColumnFilter> resultEq = ColumnFilterConverter.convertColumnFilter(listEq, olapTable); assertEquals(1, resultEq.size()); List<ScalarOperator> listGe = buildOperator("day", BinaryType.GE); Map<String, PartitionColumnFilter> resultGe = ColumnFilterConverter.convertColumnFilter(listGe, olapTable); assertEquals(1, resultGe.size()); List<ScalarOperator> listGt = buildOperator("day", BinaryType.GT); Map<String, PartitionColumnFilter> resultGt = ColumnFilterConverter.convertColumnFilter(listGt, olapTable); assertEquals(1, resultGt.size()); List<ScalarOperator> listLe = buildOperator("day", BinaryType.LE); Map<String, PartitionColumnFilter> resultLe = ColumnFilterConverter.convertColumnFilter(listLe, olapTable); assertEquals(1, resultLe.size()); List<ScalarOperator> listLt = buildOperator("day", BinaryType.LT); Map<String, PartitionColumnFilter> resultLt = ColumnFilterConverter.convertColumnFilter(listLt, olapTable); assertEquals(1, resultLt.size()); }
@Override public void validate(String value, @Nullable List<String> options) { try { Integer.parseInt(value); } catch (NumberFormatException e) { throw BadRequestException.create(format("Value '%s' must be an integer.", value)); } }
@Test public void fail_on_string() { assertThatThrownBy(() -> validation.validate("abc", null)) .isInstanceOf(BadRequestException.class) .hasMessage("Value 'abc' must be an integer."); }
@DELETE @Path("{id}/{name}") @Produces(MediaType.APPLICATION_JSON) public Response delete(@PathParam("id") String id, @PathParam("name") String name) { AppCatalogSolrClient sc = new AppCatalogSolrClient(); sc.deleteApp(id); YarnServiceClient yc = new YarnServiceClient(); yc.deleteApp(name); return Response.status(Status.ACCEPTED).build(); }
@Test void testDelete() throws Exception { String id = "application 1"; AppListController ac = Mockito.mock(AppListController.class); Response expected = Response.ok().build(); when(ac.delete(id, id)).thenReturn(Response.ok().build()); final Response actual = ac.delete(id, id); assertEquals(expected.getStatus(), actual.getStatus()); }
public static String lookupEnvironmentVariable(String key) { // lookup OS env with upper case key String upperKey = key.toUpperCase(); String value = System.getenv(upperKey); if (value == null) { // some OS do not support dashes in keys, so replace with underscore String normalizedKey = upperKey.replace('-', '_'); // and replace dots with underscores so keys like my.key are // translated to MY_KEY normalizedKey = normalizedKey.replace('.', '_'); value = System.getenv(normalizedKey); } return value; }
@Test public void testLookupEnvironmentVariable() { assertEquals("8081", IOHelper.lookupEnvironmentVariable("FOO_SERVICE_PORT")); assertEquals("8081", IOHelper.lookupEnvironmentVariable("foo-service.port")); assertEquals("8081", IOHelper.lookupEnvironmentVariable("foo-service-port")); assertEquals("8081", IOHelper.lookupEnvironmentVariable("foo.service.port")); }
@Override public ExecuteContext before(ExecuteContext context) throws Exception { final FlowControlConfig pluginConfig = PluginConfigManager.getPluginConfig(FlowControlConfig.class); FlowControlServiceMeta.getInstance().setDubboService(true); if (!pluginConfig.isUseCseRule() || !pluginConfig.isBaseSdk()) { return context; } FlowControlServiceMeta.getInstance().setVersion(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_VERSION, CseConstants.DEFAULT_DUBBO_VERSION)); FlowControlServiceMeta.getInstance().setProject(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_KIE_PROJECT, CseConstants.DEFAULT_PROJECT)); FlowControlServiceMeta.getInstance().setServiceName(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_SERVICE_NAME, CseConstants.DEFAULT_DUBBO_SERVICE_NAME)); FlowControlServiceMeta.getInstance().setEnvironment(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_ENVIRONMENT, CseConstants.DEFAULT_DUBBO_ENVIRONMENT)); FlowControlServiceMeta.getInstance().setApp(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_APP_NAME, CseConstants.DEFAULT_DUBBO_APP_NAME)); FlowControlServiceMeta.getInstance().setCustomLabel(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_CUSTOM_LABEL, CseConstants.DEFAULT_CUSTOM_LABEL)); FlowControlServiceMeta.getInstance().setCustomLabelValue(com.alibaba.dubbo.common.utils.ConfigUtils.getProperty( CseConstants.KEY_DUBBO_CUSTOM_LABEL_VALUE, CseConstants.DEFAULT_CUSTOM_LABEL_VALUE)); return context; }
@Test public void testOpen() throws Exception { String version = "1.0.0"; final AbstractInterceptor interceptor = getInterceptor(); flowControlConfig.setBaseSdk(true); flowControlConfig.setUseCseRule(true); final HashMap<String, String> env = new HashMap<>(); env.put(CseConstants.KEY_DUBBO_VERSION, version); EnvUtils.addEnv(env); interceptor.before(buildContext()); assertEquals(version, FlowControlServiceMeta.getInstance().getVersion()); }
@PublicAPI(usage = ACCESS) public Set<Dependency> getDependenciesFromSelf() { ImmutableSet.Builder<Dependency> result = ImmutableSet.builder(); for (JavaClass javaClass : this) { for (Dependency dependency : javaClass.getDirectDependenciesFromSelf()) { if (isNotAssignedToOwnSlice(dependency.getTargetClass())) { result.add(dependency); } } } return result.build(); }
@Test public void dependencies_from_self() { Slice slice = getSlice(slicesOfTestClasses(), "first"); assertThatDependencies(slice.getDependenciesFromSelf()).containOnly( from(FirstAnyPkgClass.class).to(Object.class) .from(FirstAnyPkgClass.class).to(SomePkgSubclass.class) .from(FirstAnyPkgClass.class).to(SecondThreeAnyClass.class) .from(ClassOnlyDependentOnOwnPackageAndObject.class).to(Object.class) .from(FirstThreeAnyClass.class).to(Object.class) .from(FirstThreeAnyClass.class).to(SecondThreeAnyClass.class) ); }
@Override public RemoteEnvironment createEnvironment(Environment environment, String workerId) throws Exception { Preconditions.checkState( environment .getUrn() .equals(BeamUrns.getUrn(RunnerApi.StandardEnvironments.Environments.PROCESS)), "The passed environment does not contain a ProcessPayload."); final RunnerApi.ProcessPayload processPayload = RunnerApi.ProcessPayload.parseFrom(environment.getPayload()); String executable = processPayload.getCommand(); String provisionEndpoint = provisioningServiceServer.getApiServiceDescriptor().getUrl(); String semiPersistDir = pipelineOptions.as(RemoteEnvironmentOptions.class).getSemiPersistDir(); ImmutableList.Builder<String> argsBuilder = ImmutableList.<String>builder() .add(String.format("--id=%s", workerId)) .add(String.format("--provision_endpoint=%s", provisionEndpoint)); if (semiPersistDir != null) { argsBuilder.add(String.format("--semi_persist_dir=%s", semiPersistDir)); } LOG.debug("Creating Process for worker ID {}", workerId); // Wrap the blocking call to clientSource.get in case an exception is thrown. InstructionRequestHandler instructionHandler = null; try { ProcessManager.RunningProcess process = processManager.startProcess( workerId, executable, argsBuilder.build(), processPayload.getEnvMap()); // Wait on a client from the gRPC server. while (instructionHandler == null) { try { // If the process is not alive anymore, we abort. process.isAliveOrThrow(); instructionHandler = clientSource.take(workerId, Duration.ofSeconds(5)); } catch (TimeoutException timeoutEx) { LOG.info( "Still waiting for startup of environment '{}' for worker id {}", processPayload.getCommand(), workerId); } catch (InterruptedException interruptEx) { Thread.currentThread().interrupt(); throw new RuntimeException(interruptEx); } } } catch (Exception e) { try { processManager.stopProcess(workerId); } catch (Exception processKillException) { e.addSuppressed(processKillException); } throw e; } return ProcessEnvironment.create(processManager, environment, workerId, instructionHandler); }
@Test public void destroysCorrectContainer() throws Exception { RemoteEnvironment handle = factory.createEnvironment(ENVIRONMENT, "workerId"); handle.close(); verify(processManager).stopProcess("workerId"); }
@Override public ReservationDeleteResponse deleteReservation( ReservationDeleteRequest request) throws YarnException, IOException { if (request == null || request.getReservationId() == null) { routerMetrics.incrDeleteReservationFailedRetrieved(); String msg = "Missing deleteReservation request or reservationId."; RouterServerUtil.logAndThrowException(msg, null); RouterAuditLogger.logFailure(user.getShortUserName(), DELETE_RESERVATION, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); } long startTime = clock.getTime(); ReservationId reservationId = request.getReservationId(); SubClusterId subClusterId = getReservationHomeSubCluster(reservationId); try { ApplicationClientProtocol client = getClientRMProxyForSubCluster(subClusterId); ReservationDeleteResponse response = client.deleteReservation(request); if (response != null) { federationFacade.deleteReservationHomeSubCluster(reservationId); long stopTime = clock.getTime(); routerMetrics.succeededDeleteReservationRetrieved(stopTime - startTime); RouterAuditLogger.logSuccess(user.getShortUserName(), DELETE_RESERVATION, TARGET_CLIENT_RM_SERVICE); return response; } } catch (Exception ex) { routerMetrics.incrUpdateReservationFailedRetrieved(); String msg = "Unable to reservation delete due to exception."; RouterAuditLogger.logFailure(user.getShortUserName(), DELETE_RESERVATION, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); RouterServerUtil.logAndThrowException(msg, ex); } routerMetrics.incrDeleteReservationFailedRetrieved(); String msg = String.format("Reservation %s failed to be delete.", reservationId); RouterAuditLogger.logFailure(user.getShortUserName(), DELETE_RESERVATION, UNKNOWN, TARGET_CLIENT_RM_SERVICE, msg); throw new YarnException(msg); }
@Test public void testDeleteReservation() throws Exception { LOG.info("Test FederationClientInterceptor : DeleteReservation request."); // get new reservationId GetNewReservationRequest request = GetNewReservationRequest.newInstance(); GetNewReservationResponse response = interceptor.getNewReservation(request); Assert.assertNotNull(response); // allow plan follower to synchronize, manually trigger an assignment Map<SubClusterId, MockRM> mockRMs = interceptor.getMockRMs(); for (MockRM mockRM : mockRMs.values()) { ReservationSystem reservationSystem = mockRM.getReservationSystem(); reservationSystem.synchronizePlan("root.decided", true); } // Submit Reservation ReservationId reservationId = response.getReservationId(); ReservationDefinition rDefinition = createReservationDefinition(1024, 1); ReservationSubmissionRequest rSubmissionRequest = ReservationSubmissionRequest.newInstance( rDefinition, "decided", reservationId); ReservationSubmissionResponse submissionResponse = interceptor.submitReservation(rSubmissionRequest); Assert.assertNotNull(submissionResponse); // Delete Reservation ReservationDeleteRequest deleteRequest = ReservationDeleteRequest.newInstance(reservationId); ReservationDeleteResponse deleteResponse = interceptor.deleteReservation(deleteRequest); Assert.assertNotNull(deleteResponse); LambdaTestUtils.intercept(YarnException.class, "Reservation " + reservationId + " does not exist", () -> stateStoreUtil.queryReservationHomeSC(reservationId)); }
public void register(OpChain operatorChain) { Future<?> scheduledFuture = _executorService.submit(new TraceRunnable() { @Override public void runJob() { boolean isFinished = false; TransferableBlock returnedErrorBlock = null; Throwable thrown = null; try { LOGGER.trace("({}): Executing", operatorChain); TransferableBlock result = operatorChain.getRoot().nextBlock(); while (!result.isEndOfStreamBlock()) { result = operatorChain.getRoot().nextBlock(); } isFinished = true; if (result.isErrorBlock()) { returnedErrorBlock = result; LOGGER.error("({}): Completed erroneously {} {}", operatorChain, result.getQueryStats(), result.getExceptions()); } else { LOGGER.debug("({}): Completed {}", operatorChain, result.getQueryStats()); } } catch (Exception e) { LOGGER.error("({}): Failed to execute operator chain!", operatorChain, e); thrown = e; } finally { _submittedOpChainMap.remove(operatorChain.getId()); if (returnedErrorBlock != null || thrown != null) { if (thrown == null) { thrown = new RuntimeException("Error block " + returnedErrorBlock.getExceptions()); } operatorChain.cancel(thrown); } else if (isFinished) { operatorChain.close(); } } } }); _submittedOpChainMap.put(operatorChain.getId(), scheduledFuture); }
@Test public void shouldScheduleSingleOpChainRegisteredBeforeStart() throws InterruptedException { OpChain opChain = getChain(_operatorA); OpChainSchedulerService schedulerService = new OpChainSchedulerService(_executor); CountDownLatch latch = new CountDownLatch(1); Mockito.when(_operatorA.nextBlock()).thenAnswer(inv -> { latch.countDown(); return TransferableBlockTestUtils.getEndOfStreamTransferableBlock(0); }); schedulerService.register(opChain); Assert.assertTrue(latch.await(10, TimeUnit.SECONDS), "expected await to be called in less than 10 seconds"); }
@Override public JType apply(String nodeName, JsonNode schemaNode, JsonNode parent, JClassContainer generatableType, Schema schema) { if (schemaNode.has("$ref")) { final String nameFromRef = nameFromRef(schemaNode.get("$ref").asText()); schema = ruleFactory.getSchemaStore().create(schema, schemaNode.get("$ref").asText(), ruleFactory.getGenerationConfig().getRefFragmentPathDelimiters()); schemaNode = schema.getContent(); if (schema.isGenerated()) { return schema.getJavaType(); } return apply(nameFromRef != null ? nameFromRef : nodeName, schemaNode, parent, generatableType, schema); } JType javaType; if (schemaNode.has("enum")) { javaType = ruleFactory.getEnumRule().apply(nodeName, schemaNode, parent, generatableType, schema); } else { javaType = ruleFactory.getTypeRule().apply(nodeName, schemaNode, parent, generatableType.getPackage(), schema); } schema.setJavaTypeIfEmpty(javaType); return javaType; }
@Test public void refsToOtherSchemasAreLoaded() throws URISyntaxException, JClassAlreadyExistsException { URI schemaUri = getClass().getResource("/schema/address.json").toURI(); ObjectNode schemaWithRef = new ObjectMapper().createObjectNode(); schemaWithRef.put("$ref", schemaUri.toString()); JDefinedClass jclass = new JCodeModel()._class(TARGET_CLASS_NAME); final GenerationConfig mockGenerationConfig = mock(GenerationConfig.class); when(mockGenerationConfig.getRefFragmentPathDelimiters()).thenReturn("#/."); TypeRule mockTypeRule = mock(TypeRule.class); when(mockRuleFactory.getTypeRule()).thenReturn(mockTypeRule); when(mockRuleFactory.getSchemaStore()).thenReturn(new SchemaStore()); when(mockRuleFactory.getGenerationConfig()).thenReturn(mockGenerationConfig); ArgumentCaptor<JsonNode> captureJsonNode = ArgumentCaptor.forClass(JsonNode.class); ArgumentCaptor<Schema> captureSchema = ArgumentCaptor.forClass(Schema.class); rule.apply(NODE_NAME, schemaWithRef, null, jclass, null); verify(mockTypeRule).apply(eq("address"), captureJsonNode.capture(), any(), eq(jclass.getPackage()), captureSchema.capture()); assertThat(captureSchema.getValue().getId(), is(equalTo(schemaUri))); assertThat(captureSchema.getValue().getContent(), is(equalTo(captureJsonNode.getValue()))); assertThat(captureJsonNode.getValue().get("description").asText(), is(equalTo("An Address following the convention of http://microformats.org/wiki/hcard"))); }
public AstNode rewrite(final AstNode node, final C context) { return rewriter.process(node, context); }
@Test public void shouldRewriteCreateTable() { // Given: final TableElement tableElement1 = givenTableElement("foo"); final TableElement tableElement2 = givenTableElement("bar"); final TableElement rewrittenTableElement1 = givenTableElement("baz"); final TableElement rewrittenTableElement2 = givenTableElement("boz"); final CreateTable ct = new CreateTable( location, sourceName, TableElements.of(tableElement1, tableElement2), false, false, sourceProperties, false ); when(mockRewriter.apply(tableElement1, context)).thenReturn(rewrittenTableElement1); when(mockRewriter.apply(tableElement2, context)).thenReturn(rewrittenTableElement2); // When: final AstNode rewritten = rewriter.rewrite(ct, context); // Then: assertThat( rewritten, equalTo( new CreateTable( location, sourceName, TableElements.of(rewrittenTableElement1, rewrittenTableElement2), false, false, sourceProperties, false ) ) ); }
public synchronized Schema create(URI id, String refFragmentPathDelimiters) { URI normalizedId = id.normalize(); if (!schemas.containsKey(normalizedId)) { URI baseId = removeFragment(id).normalize(); if (!schemas.containsKey(baseId)) { logger.debug("Reading schema: " + baseId); final JsonNode baseContent = contentResolver.resolve(baseId); schemas.put(baseId, new Schema(baseId, baseContent, null)); } final Schema baseSchema = schemas.get(baseId); if (normalizedId.toString().contains("#")) { JsonNode childContent = fragmentResolver.resolve(baseSchema.getContent(), '#' + id.getFragment(), refFragmentPathDelimiters); schemas.put(normalizedId, new Schema(normalizedId, childContent, baseSchema)); } } return schemas.get(normalizedId); }
@Test public void schemaAlreadyReadIsReused() throws URISyntaxException { URI schemaUri = getClass().getResource("/schema/address.json").toURI(); SchemaStore schemaStore = new SchemaStore(); Schema schema1 = schemaStore.create(schemaUri, "#/."); Schema schema2 = schemaStore.create(schemaUri, "#/."); assertThat(schema1, is(sameInstance(schema2))); }
@Override public Collection<RedisServer> slaves(NamedNode master) { List<Map<String, String>> slaves = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_SLAVES, master.getName()); return toRedisServersList(slaves); }
@Test public void testSlaves() { Collection<RedisServer> masters = connection.masters(); Collection<RedisServer> slaves = connection.slaves(masters.iterator().next()); assertThat(slaves).hasSize(2); }
@Restricted(NoExternalUse.class) public static Icon tryGetIcon(String iconGuess) { // Jenkins Symbols don't have metadata so return null if (iconGuess == null || iconGuess.startsWith("symbol-")) { return null; } Icon iconMetadata = IconSet.icons.getIconByClassSpec(iconGuess); // `iconGuess` must be class names if it contains a whitespace. // It may contains extra css classes unrelated to icons. // Filter classes with `icon-` prefix. if (iconMetadata == null && iconGuess.contains(" ")) { iconMetadata = IconSet.icons.getIconByClassSpec(filterIconNameClasses(iconGuess)); } if (iconMetadata == null) { // Icon could be provided as a simple iconFileName e.g. "help.svg" iconMetadata = IconSet.icons.getIconByClassSpec(IconSet.toNormalizedIconNameClass(iconGuess) + " icon-md"); } if (iconMetadata == null) { // Icon could be provided as an absolute iconFileName e.g. "/plugin/foo/abc.png" iconMetadata = IconSet.icons.getIconByUrl(iconGuess); } return iconMetadata; }
@Test public void tryGetIcon_shouldReturnNullForNull() throws Exception { assertThat(Functions.tryGetIcon(null), is(nullValue())); }
static CapsVersionAndHash generateVerificationString(DiscoverInfoView discoverInfo) { return generateVerificationString(discoverInfo, null); }
@Test public void testReversedDataFormOrder() throws XmppStringprepException { final DiscoverInfoBuilder builderA = createSimpleSampleBuilder(); builderA.addExtension(createSampleServerInfoDataForm()); // This works, as the underlying MultiMap maintains insertion-order. builderA.addExtension(createSampleSoftwareInfoDataForm()); final DiscoverInfoBuilder builderB = createSimpleSampleBuilder(); builderB.addExtension(createSampleSoftwareInfoDataForm()); builderB.addExtension(createSampleServerInfoDataForm()); CapsVersionAndHash versionAndHashA = EntityCapsManager.generateVerificationString(builderA.build(), StringUtils.SHA1); CapsVersionAndHash versionAndHashB = EntityCapsManager.generateVerificationString(builderB.build(), StringUtils.SHA1); assertEquals(versionAndHashA.version, versionAndHashB.version); }
@WithSpan @Override public SearchResponse apply(SearchResponse searchResponse) { final LookupTable table = lookupTable.getTable(); if (table != null) { Span.current().setAttribute(LOOKUP_TABLE_NAME, table.name()) .setAttribute(LOOKUP_CACHE_NAME, table.cache().name()) .setAttribute(LOOKUP_CACHE_TYPE, table.cache().getConfig().type()) .setAttribute(LOOKUP_DATA_ADAPTER_NAME, table.dataAdapter().name()) .setAttribute(LOOKUP_DATA_ADAPTER_TYPE, table.dataAdapter().getConfig().type()); } final List<ResultMessageSummary> summaries = searchResponse.messages().stream() .map(summary -> { // Do not touch the message if the field does not exist. if (!summary.message().containsKey(sourceField)) { return summary; } final LookupResult result = lookupTable.lookup(summary.message().get(sourceField)); // Do not touch the message if there is no result if (result == null || result.isEmpty()) { return summary; } final Message message = messageFactory.createMessage(ImmutableMap.copyOf(summary.message())); message.addField(targetField, result.singleValue()); return summary.toBuilder().message(message.getFields()).build(); }) .collect(Collectors.toList()); return searchResponse.toBuilder().messages(summaries).build(); }
@Test public void decorate() throws Exception { final String sourceField = "source"; final String targetField = "source_decorated"; final String lookupTableName = "test"; final Decorator decorator = createDecorator(sourceField, targetField, lookupTableName); final Pair<LookupTableDecorator, LookupTableService.Function> lookupTableDecoratorPair = createLookupTableDecorator(decorator); final LookupTableDecorator lookupTableDecorator = lookupTableDecoratorPair.getLeft(); final LookupTableService.Function function = lookupTableDecoratorPair.getRight(); final List<ResultMessageSummary> messages = ImmutableList.of( ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "a", sourceField, "0"), "graylog_0"), ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "b", sourceField, "1"), "graylog_0"), ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "c", sourceField, "2"), "graylog_0"), ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "d", sourceField, "3"), "graylog_0"), ResultMessageSummary.create(ImmutableMultimap.of(), ImmutableMap.of("_id", "e", "invalid", "4"), "graylog_0") ); final SearchResponse searchResponse = createSearchResponse(messages); when(function.lookup("0")).thenReturn(LookupResult.single("zero")); when(function.lookup("1")).thenReturn(LookupResult.single("one")); when(function.lookup("2")).thenReturn(LookupResult.empty()); when(function.lookup("3")).thenReturn(null); final SearchResponse response = lookupTableDecorator.apply(searchResponse); assertThat(response.messages().get(0).message().get(sourceField)).isEqualTo("0"); assertThat(response.messages().get(0).message().get(targetField)).isEqualTo("zero"); assertThat(response.messages().get(1).message().get(sourceField)).isEqualTo("1"); assertThat(response.messages().get(1).message().get(targetField)).isEqualTo("one"); assertThat(response.messages().get(2).message().get(sourceField)).isEqualTo("2"); assertThat(response.messages().get(2).message()).doesNotContainKey(targetField); assertThat(response.messages().get(3).message().get(sourceField)).isEqualTo("3"); assertThat(response.messages().get(3).message()).doesNotContainKey(targetField); assertThat(response.messages().get(4).message().get("invalid")).isEqualTo("4"); assertThat(response.messages().get(4).message()).doesNotContainKey(targetField); }
public static boolean validateCSConfiguration( final Configuration oldConfParam, final Configuration newConf, final RMContext rmContext) throws IOException { // ensure that the oldConf is deep copied Configuration oldConf = new Configuration(oldConfParam); QueueMetrics.setConfigurationValidation(oldConf, true); QueueMetrics.setConfigurationValidation(newConf, true); CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler(); CapacityScheduler newCs = new CapacityScheduler(); try { //TODO: extract all the validation steps and replace reinitialize with //the specific validation steps newCs.setConf(oldConf); newCs.setRMContext(rmContext); newCs.init(oldConf); newCs.addNodes(liveScheduler.getAllNodes()); newCs.reinitialize(newConf, rmContext, true); return true; } finally { newCs.stop(); } }
@Test public void testValidateCSConfigStopALeafQueue() throws IOException { Configuration oldConfig = CapacitySchedulerConfigGeneratorForTest .createBasicCSConfiguration(); Configuration newConfig = new Configuration(oldConfig); newConfig .set("yarn.scheduler.capacity.root.test1.state", "STOPPED"); RMContext rmContext = prepareRMContext(); boolean isValidConfig = CapacitySchedulerConfigValidator .validateCSConfiguration(oldConfig, newConfig, rmContext); Assert.assertTrue(isValidConfig); }
public static AvroGenericCoder of(Schema schema) { return AvroGenericCoder.of(schema); }
@Test public void testKryoSerialization() throws Exception { Pojo value = new Pojo("Hello", 42, DATETIME_A); AvroCoder<Pojo> coder = AvroCoder.of(Pojo.class); // Kryo instantiation Kryo kryo = new Kryo(); kryo.setInstantiatorStrategy(new StdInstantiatorStrategy()); kryo.addDefaultSerializer(AvroCoder.SerializableSchemaSupplier.class, JavaSerializer.class); // Serialization of object without any memoization ByteArrayOutputStream coderWithoutMemoizationBos = new ByteArrayOutputStream(); try (Output output = new Output(coderWithoutMemoizationBos)) { kryo.writeClassAndObject(output, coder); } // Force thread local memoization to store values. CoderProperties.coderDecodeEncodeEqual(coder, value); // Serialization of object with memoized fields ByteArrayOutputStream coderWithMemoizationBos = new ByteArrayOutputStream(); try (Output output = new Output(coderWithMemoizationBos)) { kryo.writeClassAndObject(output, coder); } // Copy empty and memoized variants of the Coder ByteArrayInputStream bisWithoutMemoization = new ByteArrayInputStream(coderWithoutMemoizationBos.toByteArray()); AvroCoder<Pojo> copiedWithoutMemoization = (AvroCoder<Pojo>) kryo.readClassAndObject(new Input(bisWithoutMemoization)); ByteArrayInputStream bisWithMemoization = new ByteArrayInputStream(coderWithMemoizationBos.toByteArray()); AvroCoder<Pojo> copiedWithMemoization = (AvroCoder<Pojo>) kryo.readClassAndObject(new Input(bisWithMemoization)); CoderProperties.coderDecodeEncodeEqual(copiedWithoutMemoization, value); CoderProperties.coderDecodeEncodeEqual(copiedWithMemoization, value); }
@Udf(description = "Converts the number of days since 1970-01-01 00:00:00 UTC/GMT to a date " + "string using the given format pattern. The format pattern should be in the format" + " expected by java.time.format.DateTimeFormatter") public String dateToString( @UdfParameter( description = "The Epoch Day to convert," + " based on the epoch 1970-01-01") final int epochDays, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.time.format.DateTimeFormatter.") final String formatPattern) { if (formatPattern == null) { return null; } try { final DateTimeFormatter formatter = formatters.get(formatPattern); return LocalDate.ofEpochDay(epochDays).format(formatter); } catch (final ExecutionException | RuntimeException e) { throw new KsqlFunctionException("Failed to format date " + epochDays + " with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldByThreadSafeAndWorkWithManyDifferentFormatters() { IntStream.range(0, 10_000) .parallel() .forEach(idx -> { try { final String pattern = "yyyy-MM-dd'X" + idx + "'"; final String result = udf.dateToString(18765, pattern); assertThat(result, is("2021-05-18X" + idx)); } catch (final Exception e) { fail(e.getMessage()); } }); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName = null; // check if all the ColumnStatisticsObjs contain stats and all the ndv are // bitvectors boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size(); NumDistinctValueEstimator ndvEstimator = null; boolean areAllNDVEstimatorsMergeable = true; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); } DoubleColumnStatsDataInspector columnStatsData = doubleInspectorFromStats(cso); // check if we can merge NDV estimators if (columnStatsData.getNdvEstimator() == null) { areAllNDVEstimatorsMergeable = false; break; } else { NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator(); if (ndvEstimator == null) { ndvEstimator = estimator; } else { if (!ndvEstimator.canMerge(estimator)) { areAllNDVEstimatorsMergeable = false; break; } } } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable); ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) { DoubleColumnStatsDataInspector aggregateData = null; long lowerBound = 0; long higherBound = 0; double densityAvgSum = 0.0; DoubleColumnStatsMerger merger = new DoubleColumnStatsMerger(); for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(cso); lowerBound = Math.max(lowerBound, newData.getNumDVs()); higherBound += newData.getNumDVs(); densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(merger.mergeLowValue( merger.getLowValue(aggregateData), merger.getLowValue(newData))); aggregateData.setHighValue(merger.mergeHighValue( merger.getHighValue(aggregateData), merger.getHighValue(newData))); aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs())); } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { // if all the ColumnStatisticsObjs contain bitvectors, we do not need to // use uniform distribution assumption because we can merge bitvectors // to get a good estimation. aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); } else { long estimation; if (useDensityFunctionForNDVEstimation) { // We have estimation, lowerbound and higherbound. We use estimation // if it is between lowerbound and higherbound. double densityAvg = densityAvgSum / partNames.size(); estimation = (long) ((aggregateData.getHighValue() - aggregateData.getLowValue()) / densityAvg); if (estimation < lowerBound) { estimation = lowerBound; } else if (estimation > higherBound) { estimation = higherBound; } } else { estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); } aggregateData.setNumDVs(estimation); } columnStatisticsData.setDoubleStats(aggregateData); } else { // TODO: bail out if missing stats are over a certain threshold // we need extrapolation LOG.debug("start extrapolation for {}", colName); Map<String, Integer> indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } Map<String, Double> adjustedIndexMap = new HashMap<>(); Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higherbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; if (!areAllNDVEstimatorsMergeable) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); DoubleColumnStatsData newData = cso.getStatsData().getDoubleStats(); if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += (newData.getHighValue() - newData.getLowValue()) / newData.getNumDVs(); } adjustedIndexMap.put(partName, (double) indexMap.get(partName)); adjustedStatsMap.put(partName, cso.getStatsData()); } } else { // we first merge all the adjacent bitvectors that we could merge and // derive new partition names and index. StringBuilder pseudoPartName = new StringBuilder(); double pseudoIndexSum = 0; int length = 0; int curIndex = -1; DoubleColumnStatsData aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); DoubleColumnStatsDataInspector newData = doubleInspectorFromStats(cso); // newData.isSetBitVectors() should be true for sure because we // already checked it before. if (indexMap.get(partName) != curIndex) { // There is bitvector, but it is not adjacent to the previous ones. if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setDoubleStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); } // reset everything pseudoPartName = new StringBuilder(); pseudoIndexSum = 0; length = 0; ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } aggregateData = null; } curIndex = indexMap.get(partName); pseudoPartName.append(partName); pseudoIndexSum += curIndex; length++; curIndex++; if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(Math.min(aggregateData.getLowValue(), newData.getLowValue())); aggregateData.setHighValue(Math.max(aggregateData.getHighValue(), newData.getHighValue())); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setDoubleStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += (aggregateData.getHighValue() - aggregateData.getLowValue()) / aggregateData.getNumDVs(); } } } extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); } LOG.debug( "Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}", colName, columnStatisticsData.getDoubleStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size()); KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo); if (mergedKllHistogramEstimator != null) { columnStatisticsData.getDoubleStats().setHistogram(mergedKllHistogramEstimator.serialize()); } statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateMultiStatsWhenUnmergeableBitVectors() throws MetaException { List<String> partitions = Arrays.asList("part1", "part2", "part3"); ColumnStatisticsData data1 = new ColStatsBuilder<>(double.class).numNulls(1).numDVs(3) .low(1d).high(3d).fmSketch(1, 2, 3).build(); ColumnStatisticsData data2 = new ColStatsBuilder<>(double.class).numNulls(2).numDVs(3) .low(3d).high(5d).hll(3, 4, 5).build(); ColumnStatisticsData data3 = new ColStatsBuilder<>(double.class).numNulls(3).numDVs(4) .low(1d).high(8d).hll(1, 2, 6, 8).build(); List<ColStatsObjWithSourceInfo> statsList = Arrays.asList( createStatsWithInfo(data1, TABLE, COL, partitions.get(0)), createStatsWithInfo(data2, TABLE, COL, partitions.get(1)), createStatsWithInfo(data3, TABLE, COL, partitions.get(2))); DoubleColumnStatsAggregator aggregator = new DoubleColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); // the aggregation does not update the bitvector, only numDVs is, it keeps the first bitvector; // numDVs is set to the maximum among all stats when non-mergeable bitvectors are detected ColumnStatisticsData expectedStats = new ColStatsBuilder<>(double.class).numNulls(6).numDVs(4) .low(1d).high(8d).fmSketch(1, 2, 3).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); aggregator.useDensityFunctionForNDVEstimation = true; computedStatsObj = aggregator.aggregate(statsList, partitions, true); // the use of the density function leads to a different estimation for numNDV expectedStats = new ColStatsBuilder<>(double.class).numNulls(6).numDVs(6) .low(1d).high(8d).fmSketch(1, 2, 3).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); aggregator.useDensityFunctionForNDVEstimation = false; double[] tunerValues = new double[] { 0, 0.5, 0.75, 1 }; long[] expectedDVs = new long[] { 4, 7, 8, 10 }; for (int i = 0; i < tunerValues.length; i++) { aggregator.ndvTuner = tunerValues[i]; computedStatsObj = aggregator.aggregate(statsList, partitions, true); expectedStats = new ColStatsBuilder<>(double.class).numNulls(6).numDVs(expectedDVs[i]) .low(1d).high(8d).fmSketch(1, 2, 3).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); } }
@Override public void validate(String methodName, Class<?>[] parameterTypes, Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (methodClass != null) { groups.add(methodClass); } Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = groups.toArray(new Class[0]); Set<ConstraintViolation<?>> violations = new HashSet<>(); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { logger.info("Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations); throw new ConstraintViolationException( "Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations, violations); } }
@Test void testItWhenItMeetsConstraint() throws Exception { URL url = URL.valueOf("test://test:11/org.apache.dubbo.validation.support.jvalidation.mock.JValidatorTestTarget"); JValidator jValidator = new JValidator(url); jValidator.validate("someMethod2", new Class<?>[] {ValidationParameter.class}, new Object[] { new ValidationParameter("NotBeNull") }); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case BOOLEAN: builder.columnType(DM_BIT); builder.dataType(DM_BIT); break; case TINYINT: builder.columnType(DM_TINYINT); builder.dataType(DM_TINYINT); break; case SMALLINT: builder.columnType(DM_SMALLINT); builder.dataType(DM_SMALLINT); break; case INT: builder.columnType(DM_INT); builder.dataType(DM_INT); break; case BIGINT: builder.columnType(DM_BIGINT); builder.dataType(DM_BIGINT); break; case FLOAT: builder.columnType(DM_REAL); builder.dataType(DM_REAL); break; case DOUBLE: builder.columnType(DM_DOUBLE); builder.dataType(DM_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.columnType(String.format("%s(%s,%s)", DM_DECIMAL, precision, scale)); builder.dataType(DM_DECIMAL); builder.precision(precision); builder.scale(scale); break; case STRING: builder.length(column.getColumnLength()); if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(DM_TEXT); builder.dataType(DM_TEXT); } else if (column.getColumnLength() <= MAX_CHAR_LENGTH_FOR_PAGE_4K) { builder.columnType( String.format("%s(%s)", DM_VARCHAR2, column.getColumnLength())); builder.dataType(DM_VARCHAR2); } else { builder.columnType(DM_TEXT); builder.dataType(DM_TEXT); } break; case BYTES: builder.length(column.getColumnLength()); if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(DM_LONGVARBINARY); builder.dataType(DM_LONGVARBINARY); } else if (column.getColumnLength() <= MAX_BINARY_LENGTH_FOR_PAGE_4K) { builder.columnType( String.format("%s(%s)", DM_VARBINARY, column.getColumnLength())); builder.dataType(DM_VARBINARY); } else { builder.columnType(DM_LONGVARBINARY); builder.dataType(DM_LONGVARBINARY); } break; case DATE: builder.columnType(DM_DATE); builder.dataType(DM_DATE); break; case TIME: builder.dataType(DM_TIME); if (column.getScale() != null && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", DM_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(DM_TIME); } break; case TIMESTAMP: builder.dataType(DM_TIMESTAMP); if (column.getScale() != null && column.getScale() > 0) { Integer timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("%s(%s)", DM_TIMESTAMP, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(DM_TIMESTAMP); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.DAMENG, column.getDataType().toString(), column.getName()); } return builder.build(); }
@Test public void testReconvertDouble() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.DOUBLE_TYPE).build(); BasicTypeDefine typeDefine = DmdbTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(DmdbTypeConverter.DM_DOUBLE, typeDefine.getColumnType()); Assertions.assertEquals(DmdbTypeConverter.DM_DOUBLE, typeDefine.getDataType()); }
@Override public Double getLocalValue() { return this.min; }
@Test void testGet() { DoubleMinimum min = new DoubleMinimum(); assertThat(min.getLocalValue()).isCloseTo(Double.POSITIVE_INFINITY, within(0.0)); }
static Builder newBuilder() { return new AutoValue_SplunkEventWriter.Builder(); }
@Test public void eventWriterFullEndpoint() { Exception thrown = assertThrows( IllegalArgumentException.class, () -> SplunkEventWriter.newBuilder() .withUrl("http://test-url:8088/services/collector/event") .build()); assertTrue(thrown.getMessage().contains(SplunkEventWriter.INVALID_URL_FORMAT_MESSAGE)); }
public Customer getCustomer(String id) { return customerMap.get(id); }
@Test void shouldReturnNullWhenQueriedCustomerIsNotInRegistry() { Customer customerWithId5 = customerRegistry.getCustomer("5"); assertNull(customerWithId5); }