focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestReplicate() { internalEncodeLogHeader(buffer, 0, 1000, 1000, () -> 500_000_000L); final ReplicateRequestEncoder requestEncoder = new ReplicateRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(2) .correlationId(5) .srcRecordingId(17) .dstRecordingId(2048) .srcControlStreamId(10) .srcControlChannel("CTRL ch") .liveDestination("live destination"); dissectControlRequest(CMD_IN_REPLICATE, buffer, 0, builder); assertEquals("[0.500000000] " + CONTEXT + ": " + CMD_IN_REPLICATE.name() + " [1000/1000]:" + " controlSessionId=2" + " correlationId=5" + " srcRecordingId=17" + " dstRecordingId=2048" + " srcControlStreamId=10" + " srcControlChannel=CTRL ch" + " liveDestination=live destination", builder.toString()); }
Mono<ServerResponse> listComments(ServerRequest request) { CommentQuery commentQuery = new CommentQuery(request); return commentPublicQueryService.list(commentQuery.toRef(), commentQuery.toPageRequest()) .flatMap(result -> { if (commentQuery.getWithReplies()) { return commentPublicQueryService.convertToWithReplyVo(result, commentQuery.getReplySize()); } return Mono.just(result); }) .flatMap(list -> ServerResponse.ok().bodyValue(list)); }
@Test void listComments() { when(commentPublicQueryService.list(any(), any(PageRequest.class))) .thenReturn(Mono.just(new ListResult<>(1, 10, 0, List.of()))); Ref ref = new Ref(); ref.setGroup("content.halo.run"); ref.setVersion("v1alpha1"); ref.setKind("Post"); ref.setName("test"); webTestClient.get() .uri(uriBuilder -> uriBuilder.path("/comments") .queryParam("group", ref.getGroup()) .queryParam("version", ref.getVersion()) .queryParam("kind", ref.getKind()) .queryParam("name", ref.getName()) .queryParam("page", 1) .queryParam("size", 10) .build()) .exchange() .expectStatus() .isOk(); ArgumentCaptor<Ref> refCaptor = ArgumentCaptor.forClass(Ref.class); verify(commentPublicQueryService, times(1)) .list(refCaptor.capture(), any(PageRequest.class)); Ref value = refCaptor.getValue(); assertThat(value).isEqualTo(ref); }
public void checkForUpgradeAndExtraProperties() throws IOException { if (upgradesEnabled()) { checkForUpgradeAndExtraProperties(systemEnvironment.getAgentMd5(), systemEnvironment.getGivenAgentLauncherMd5(), systemEnvironment.getAgentPluginsMd5(), systemEnvironment.getTfsImplMd5()); } else { LOGGER.debug("[Agent Upgrade] Skipping check as there is no wrapping launcher to relaunch the agent JVM..."); } }
@Test void checkForUpgradeShouldKillAgentIfAgentMD5doesNotMatch() { when(systemEnvironment.getAgentMd5()).thenReturn("old-agent-md5"); expectHeaderValue(SystemEnvironment.AGENT_CONTENT_MD5_HEADER, "new-agent-md5"); RuntimeException toBeThrown = new RuntimeException("Boo!"); doThrow(toBeThrown).when(jvmExitter).jvmExit(anyString(), anyString(), anyString()); try { agentUpgradeService.checkForUpgradeAndExtraProperties(); fail("should have done jvm exit"); } catch (Exception e) { assertThat(toBeThrown).isSameAs(e); } verify(jvmExitter).jvmExit("itself", "old-agent-md5", "new-agent-md5"); }
public PropertyPanel removeAllProps() { properties.clear(); return this; }
@Test public void removeAllProps() { props(); assertEquals("wrong props", 3, pp.properties().size()); pp.removeAllProps(); assertEquals("unexpected props", 0, pp.properties().size()); }
public String getProperty(String key) { return EnvUtil.getProperty(key); }
@Test void testGetPropertyV2() { envUtilMockedStatic.when(() -> EnvUtil.getProperty(eq("test"), eq("default"))).thenReturn("default"); assertEquals("default", new PropertyUtil().getProperty("test", "default")); }
public void deregisterListener(String groupName, String serviceName, NamingSelectorWrapper wrapper) { if (wrapper == null) { return; } String subId = NamingUtils.getGroupedName(serviceName, groupName); selectorManager.removeSelectorWrapper(subId, wrapper); }
@Test void testDeregisterListener() { List<String> clusters = Collections.singletonList(CLUSTER_STR_CASE); EventListener listener = Mockito.mock(EventListener.class); NamingSelector selector = NamingSelectorFactory.newClusterSelector(clusters); NamingSelectorWrapper wrapper = new NamingSelectorWrapper(selector, listener); instancesChangeNotifier.registerListener(GROUP_CASE, SERVICE_NAME_CASE, wrapper); List<ServiceInfo> subscribeServices = instancesChangeNotifier.getSubscribeServices(); assertEquals(1, subscribeServices.size()); instancesChangeNotifier.deregisterListener(GROUP_CASE, SERVICE_NAME_CASE, wrapper); List<ServiceInfo> subscribeServices2 = instancesChangeNotifier.getSubscribeServices(); assertEquals(0, subscribeServices2.size()); }
@Override public void streamRequest(StreamRequest request, Callback<StreamResponse> callback) { streamRequest(request, new RequestContext(), callback); }
@Test public void testStreamRetry() throws Exception { SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/good"), HttpClientFactory.UNLIMITED_CLIENT_REQUEST_RETRY_RATIO); DynamicClient dynamicClient = new DynamicClient(balancer, null); RetryClient client = new RetryClient( dynamicClient, balancer, D2ClientConfig.DEFAULT_RETRY_LIMIT, RetryClient.DEFAULT_UPDATE_INTERVAL_MS, RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM, SystemClock.instance(), true, true); URI uri = URI.create("d2://retryService?arg1arg2"); StreamRequest streamRequest = new StreamRequestBuilder(uri).build(EntityStreams.newEntityStream(new ByteStringWriter(CONTENT))); DegraderTrackerClientTest.TestCallback<StreamResponse> restCallback = new DegraderTrackerClientTest.TestCallback<>(); client.streamRequest(streamRequest, restCallback); assertNull(restCallback.e); assertNotNull(restCallback.t); }
public static long currentTimeMillis() { return CLOCK.currentTimeMillis(); }
@Test public void testCurrentTimeMillis() { assertTrue(Clock.currentTimeMillis() > 0); }
@SuppressWarnings("unchecked") @Override public <S extends StateStore> S getStateStore(final String name) { final StateStore store = stateManager.getGlobalStore(name); return (S) getReadWriteStore(store); }
@Test public void shouldNotAllowCloseForTimestampedWindowStore() { when(stateManager.getGlobalStore(GLOBAL_TIMESTAMPED_WINDOW_STORE_NAME)).thenReturn(mock(TimestampedWindowStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_TIMESTAMPED_WINDOW_STORE_NAME); try { store.close(); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } }
public static Optional<ApiVersion> intersect(ApiVersion thisVersion, ApiVersion other) { if (thisVersion == null || other == null) return Optional.empty(); if (thisVersion.apiKey() != other.apiKey()) throw new IllegalArgumentException("thisVersion.apiKey: " + thisVersion.apiKey() + " must be equal to other.apiKey: " + other.apiKey()); short minVersion = (short) Math.max(thisVersion.minVersion(), other.minVersion()); short maxVersion = (short) Math.min(thisVersion.maxVersion(), other.maxVersion()); return minVersion > maxVersion ? Optional.empty() : Optional.of(new ApiVersion() .setApiKey(thisVersion.apiKey()) .setMinVersion(minVersion) .setMaxVersion(maxVersion)); }
@Test public void testIntersect() { assertFalse(ApiVersionsResponse.intersect(null, null).isPresent()); assertThrows(IllegalArgumentException.class, () -> ApiVersionsResponse.intersect(new ApiVersion().setApiKey((short) 10), new ApiVersion().setApiKey((short) 3))); short min = 0; short max = 10; ApiVersion thisVersion = new ApiVersion() .setApiKey(ApiKeys.FETCH.id) .setMinVersion(min) .setMaxVersion(Short.MAX_VALUE); ApiVersion other = new ApiVersion() .setApiKey(ApiKeys.FETCH.id) .setMinVersion(Short.MIN_VALUE) .setMaxVersion(max); ApiVersion expected = new ApiVersion() .setApiKey(ApiKeys.FETCH.id) .setMinVersion(min) .setMaxVersion(max); assertFalse(ApiVersionsResponse.intersect(thisVersion, null).isPresent()); assertFalse(ApiVersionsResponse.intersect(null, other).isPresent()); assertEquals(expected, ApiVersionsResponse.intersect(thisVersion, other).get()); // test for symmetric assertEquals(expected, ApiVersionsResponse.intersect(other, thisVersion).get()); }
public static ScanReport fromJson(String json) { return JsonUtil.parse(json, ScanReportParser::fromJson); }
@Test public void invalidSchema() { assertThatThrownBy( () -> ScanReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23,\"filter\":true,\"schema-id\":\"23\"}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse to an integer value: schema-id: \"23\""); assertThatThrownBy( () -> ScanReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23,\"filter\":true,\"schema-id\":23,\"projected-field-ids\": [\"1\"],\"metrics\":{}}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse integer from non-int value in projected-field-ids: \"1\""); assertThatThrownBy( () -> ScanReportParser.fromJson( "{\"table-name\":\"roundTripTableName\",\"snapshot-id\":23,\"filter\":true,\"schema-id\":23,\"projected-field-ids\": [1],\"projected-field-names\": [1],\"metrics\":{}}")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse string from non-text value in projected-field-names: 1"); }
static FEELFnResult<Boolean> matchFunctionWithFlags(String input, String pattern, String flags) { log.debug("Input: {} , Pattern: {}, Flags: {}", input, pattern, flags); if ( input == null ) { throw new InvalidParameterException("input"); } if ( pattern == null ) { throw new InvalidParameterException("pattern"); } final String flagsString; if (flags != null && !flags.isEmpty()) { checkFlags(flags); if(!flags.contains("U")){ flags += "U"; } flagsString = String.format("(?%s)", flags); } else { flagsString = ""; } log.debug("flagsString: {}", flagsString); String stringToBeMatched = flagsString + pattern; log.debug("stringToBeMatched: {}", stringToBeMatched); Pattern p=Pattern.compile(stringToBeMatched); Matcher m = p.matcher( input ); boolean matchFound=m.find(); log.debug("matchFound: {}", matchFound); return FEELFnResult.ofResult(matchFound); }
@Test void invokeWithoutFlagsMatch() { FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("test", "test",null), true); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("foobar", "^fo*b",null), true); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("abracadabra", "bra", ""), true); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("abracadabra", "bra",null), true); FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("(?xi)[hello world()]", "hello",null), true); }
public byte[] oui() { return Arrays.copyOfRange(this.address, 0, 3); }
@Test public void testOui() throws Exception { assertArrayEquals(MAC_ONOS.oui(), OUI_ONOS); }
static @Nullable Value lookupDocumentValue(Document document, String fieldPath) { OrderByFieldPath resolvedPath = OrderByFieldPath.fromString(fieldPath); // __name__ is a special field and doesn't exist in (top-level) valueMap (see // https://firebase.google.com/docs/firestore/reference/rest/v1/projects.databases.documents#Document). if (resolvedPath.isDocumentName()) { return Value.newBuilder().setReferenceValue(document.getName()).build(); } return findMapValue(new ArrayList<>(resolvedPath.getSegments()), document.getFieldsMap()); }
@Test public void lookupDocumentValue_returnsNullIfNotFound() { assertNull(QueryUtils.lookupDocumentValue(testDocument, "foobar")); }
public Analysis analyze( final Query query, final Optional<Sink> sink ) { final Analysis analysis = analyzer.analyze(query, sink); if (query.isPullQuery()) { pullQueryValidator.validate(analysis); } else { pushQueryValidator.validate(analysis); } if (!analysis.getTableFunctions().isEmpty()) { final AliasedDataSource ds = analysis.getFrom(); if (ds.getDataSource().getDataSourceType() == DataSourceType.KTABLE) { throw new KsqlException("Table source is not supported with table functions"); } } return analysis; }
@Test public void shouldPreValidateStaticQueries() { // Given: when(query.isPullQuery()).thenReturn(true); // When: queryAnalyzer.analyze(query, Optional.of(sink)); // Then: verify(staticValidator).validate(analysis); verifyNoMoreInteractions(continuousValidator); }
private static FeedbackDelayGenerator resolveDelayGenerator( final Context ctx, final UdpChannel channel, final boolean isMulticastSemantics) { if (isMulticastSemantics) { return ctx.multicastFeedbackDelayGenerator(); } final Long nakDelayNs = channel.nakDelayNs(); if (null != nakDelayNs) { final long retryDelayNs = nakDelayNs * ctx.nakUnicastRetryDelayRatio(); return new StaticDelayGenerator(nakDelayNs, retryDelayNs); } else { return ctx.unicastFeedbackDelayGenerator(); } }
@Test void shouldFixMulticastFeedbackGeneratorBasedOnReceiverGroupConsideration() { final MediaDriver.Context context = new MediaDriver.Context() .multicastFeedbackDelayGenerator(new OptimalMulticastDelayGenerator(10, 10)) .unicastFeedbackDelayGenerator(new StaticDelayGenerator(10)); final UdpChannel udpChannel = UdpChannel.parse("aeron:udp?endpoint=192.168.0.1:24326"); final FeedbackDelayGenerator feedbackDelayGenerator = DriverConductor.resolveDelayGenerator( context, udpChannel, InferableBoolean.FORCE_TRUE, (short)0); assertSame(context.multicastFeedbackDelayGenerator(), feedbackDelayGenerator); }
public synchronized GpuDeviceInformation parseXml(String xmlContent) throws YarnException { InputSource inputSource = new InputSource(new StringReader(xmlContent)); SAXSource source = new SAXSource(xmlReader, inputSource); try { return (GpuDeviceInformation) unmarshaller.unmarshal(source); } catch (JAXBException e) { String msg = "Failed to parse XML output of " + GPU_SCRIPT_REFERENCE + "!"; LOG.error(msg, e); throw new YarnException(msg, e); } }
@Test public void testParseEmptyString() throws YarnException { expected.expect(YarnException.class); GpuDeviceInformationParser parser = new GpuDeviceInformationParser(); parser.parseXml(""); }
@Override public double quantile(double p) { if (p < 0.0 || p > 1.0) { throw new IllegalArgumentException(); } return mu + scale * Math.log(p / (1.0 - p)); }
@Test public void testQuantile() { System.out.println("quantile"); LogisticDistribution instance = new LogisticDistribution(2.0, 1.0); instance.rand(); assertEquals(-4.906755, instance.quantile(0.001), 1E-6); assertEquals(-2.59512, instance.quantile(0.01), 1E-5); assertEquals(-0.1972246, instance.quantile(0.1), 1E-7); assertEquals(0.6137056, instance.quantile(0.2), 1E-6); assertEquals(2.0, instance.quantile(0.5), 1E-7); assertEquals(4.197225, instance.quantile(0.9), 1E-6); assertEquals(6.59512, instance.quantile(0.99), 1E-5); assertEquals(8.906755, instance.quantile(0.999), 1E-6); }
public T getResult() { return result; }
@Test public void testGetResult() { // Create a Result with a result value Result<String> result = new Result<>("Success", null, null); // Test the getResult method assertEquals("Success", result.getResult()); }
int getDurationDays() { if (period == null) { // attention endDate contient le dernier jour inclus jusqu'à 23h59m59s (cf parse), // donc on ajoute 1s pour compter le dernier jour return (int) ((endDate.getTime() + 1000 - startDate.getTime()) / MILLISECONDS_PER_DAY); } return period.getDurationDays(); }
@Test public void testGetDurationDays() { assertEquals("getDurationDays", 1L, periodRange.getDurationDays()); assertEquals("getDurationDays", 1L, customRange.getDurationDays()); }
static JavaType constructType(Type type) { try { return constructTypeInner(type); } catch (Exception e) { throw new InvalidDataTableTypeException(type, e); } }
@Test void wild_card_parameterized_types_use_upper_bound_in_equality() { JavaType javaType = TypeFactory.constructType(SUPPLIER_WILD_CARD_NUMBER); JavaType other = TypeFactory.constructType(SUPPLIER_NUMBER); assertThat(javaType, equalTo(other)); TypeFactory.Parameterized parameterized = (TypeFactory.Parameterized) javaType; JavaType elementType = parameterized.getElementTypes()[0]; assertThat(elementType.getOriginal(), equalTo(Number.class)); }
Collection<AuxiliaryService> getServices() { return Collections.unmodifiableCollection(serviceMap.values()); }
@Test public void testAuxServiceRecoverySetup() throws IOException { Configuration conf = getABConf("Asrv", "Bsrv", RecoverableServiceA.class, RecoverableServiceB.class); conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true); conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString()); try { final AuxServices aux = new AuxServices(MOCK_AUX_PATH_HANDLER, MOCK_CONTEXT, MOCK_DEL_SERVICE); aux.init(conf); Assert.assertEquals(2, aux.getServices().size()); File auxStorageDir = new File(TEST_DIR, AuxServices.STATE_STORE_ROOT_NAME); Assert.assertEquals(2, auxStorageDir.listFiles().length); aux.close(); } finally { FileUtil.fullyDelete(TEST_DIR); } }
@VisibleForTesting static long roundTo(long x, int multiple) { return ((x + multiple - 1) / multiple) * multiple; }
@Test public void testSimpleSubclassSize() { assertSizeIs(mObjectHeaderSize + roundTo(1, mSuperClassPaddingSize) + 4, new Class2()); }
List<Token> tokenize() throws ScanException { List<Token> tokenList = new ArrayList<Token>(); StringBuilder buf = new StringBuilder(); while (pointer < patternLength) { char c = pattern.charAt(pointer); pointer++; switch (state) { case LITERAL_STATE: handleLiteralState(c, tokenList, buf); break; case START_STATE: handleStartState(c, tokenList, buf); break; case DEFAULT_VAL_STATE: handleDefaultValueState(c, tokenList, buf); default: } } // EOS switch (state) { case LITERAL_STATE: addLiteralToken(tokenList, buf); break; case DEFAULT_VAL_STATE: // trailing colon. see also LOGBACK-1140 buf.append(CoreConstants.COLON_CHAR); addLiteralToken(tokenList, buf); break; case START_STATE: // trailing $. see also LOGBACK-1149 buf.append(CoreConstants.DOLLAR); addLiteralToken(tokenList, buf); break; } return tokenList; }
@Test public void defaultSeparatorOutsideVariable() throws ScanException { String input = "{a:-b}"; Tokenizer tokenizer = new Tokenizer(input); List<Token> tokenList = tokenizer.tokenize(); witnessList.add(Token.CURLY_LEFT_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "a")); witnessList.add(Token.DEFAULT_SEP_TOKEN); witnessList.add(new Token(Token.Type.LITERAL, "b")); witnessList.add(Token.CURLY_RIGHT_TOKEN); assertEquals(witnessList, tokenList); }
@Override public OUT nextRecord(OUT record) throws IOException { OUT returnRecord = null; do { returnRecord = super.nextRecord(record); } while (returnRecord == null && !reachedEnd()); return returnRecord; }
@Test void readStringFields() { try { final String fileContent = "abc|def|ghijk\nabc||hhg\n|||"; final FileInputSplit split = createTempFile(fileContent); final TupleTypeInfo<Tuple3<String, String, String>> typeInfo = TupleTypeInfo.getBasicTupleTypeInfo(String.class, String.class, String.class); final CsvInputFormat<Tuple3<String, String, String>> format = new TupleCsvInputFormat<>(PATH, "\n", "|", typeInfo); final Configuration parameters = new Configuration(); format.configure(parameters); format.open(split); Tuple3<String, String, String> result = new Tuple3<>(); result = format.nextRecord(result); assertThat(result.f0).isEqualTo("abc"); assertThat(result.f1).isEqualTo("def"); assertThat(result.f2).isEqualTo("ghijk"); result = format.nextRecord(result); assertThat(result.f0).isEqualTo("abc"); assertThat(result.f1).isEmpty(); assertThat(result.f2).isEqualTo("hhg"); result = format.nextRecord(result); assertThat(result.f0).isEmpty(); assertThat(result.f1).isEmpty(); assertThat(result.f2).isEmpty(); result = format.nextRecord(result); assertThat(result).isNull(); assertThat(format.reachedEnd()).isTrue(); } catch (Exception ex) { ex.printStackTrace(); fail("Test failed due to a " + ex.getClass().getName() + ": " + ex.getMessage()); } }
public static String simpleTypeDescription(Type input) { StringBuilder builder = new StringBuilder(); format(builder, input); return builder.toString(); }
@Test public <T> void testTypeFormatterWithWildcards() throws Exception { assertEquals( "Map<T, T>", ReflectHelpers.simpleTypeDescription(new TypeDescriptor<Map<T, T>>() {}.getType())); }
@Override public BaseMetabolicRate calculate(BMRAttributes bmrAttributes) { if(bmrAttributes == null) return new BaseMetabolicRate(BigDecimal.valueOf(0)); if (bmrAttributes.getGender() == Gender.MALE) return calculateUsingMaleEquation(bmrAttributes); else return calculateUsingFemaleEquation(bmrAttributes); }
@Test void calculate_male() { BMRAttributes attributes = BMRAttributes.builder() .bodyWeightInKg(BigDecimal.valueOf(110)) .heightInCm(BigDecimal.valueOf(174)) .age(BigDecimal.valueOf(30)) .gender(Gender.MALE) .build(); BaseMetabolicRate baseMetabolicRate = bmrCalculator.calculate(attributes); assertEquals(new BigDecimal("2042.50"), baseMetabolicRate.getBMR()); }
public static EvictionConfig newEvictionConfig(Integer maxSize, MaxSizePolicy maxSizePolicy, EvictionPolicy evictionPolicy, boolean isNearCache, boolean isIMap, String comparatorClassName, EvictionPolicyComparator<?, ?, ?> comparator) { int finalSize = maxSize(maxSize, isIMap); MaxSizePolicy finalMaxSizePolicy = maxSizePolicy(maxSizePolicy, isIMap); EvictionPolicy finalEvictionPolicy = evictionPolicy(evictionPolicy, isIMap); try { doEvictionConfigChecks(finalMaxSizePolicy, finalEvictionPolicy, comparatorClassName, comparator, isIMap, isNearCache); } catch (IllegalArgumentException e) { throw new InvalidConfigurationException(e.getMessage()); } EvictionConfig evictionConfig = new EvictionConfig() .setSize(finalSize) .setMaxSizePolicy(finalMaxSizePolicy) .setEvictionPolicy(finalEvictionPolicy); if (comparatorClassName != null) { evictionConfig.setComparatorClassName(comparatorClassName); } if (comparator != null) { evictionConfig.setComparator(comparator); } return evictionConfig; }
@Test public void should_use_default_map_max_size_for_0_size() { EvictionConfig evictionConfig = newEvictionConfig(0, true); assertThat(evictionConfig.getSize()).isEqualTo(MapConfig.DEFAULT_MAX_SIZE); }
@Override public void close() { if (snapshotUtility != null) { snapshotUtility.close(); } }
@Test public void test_resumeTransaction() throws Exception { properties.setProperty("transactional.id", "txn.resumeTransactionTest"); // produce items KafkaProducer<String, String> producer = new KafkaProducer<>(properties); producer.initTransactions(); producer.beginTransaction(); producer.send(new ProducerRecord<>(topic, 0, null, "0")).get(); producer.send(new ProducerRecord<>(topic, 0, null, "1")).get(); long producerId = ResumeTransactionUtil.getProducerId(producer); short epoch = ResumeTransactionUtil.getEpoch(producer); // close the producer immediately to avoid aborting transaction producer.close(Duration.ZERO); // verify items are not visible KafkaConsumer<Integer, String> consumer = kafkaTestSupport.createConsumer(topic); ConsumerRecords<Integer, String> polledRecords = consumer.poll(Duration.ofSeconds(2)); assertEquals(0, polledRecords.count()); // recover and commit producer = new KafkaProducer<>(properties); ResumeTransactionUtil.resumeTransaction(producer, producerId, epoch); producer.commitTransaction(); // verify items are visible StringBuilder actualContents = new StringBuilder(); for (int receivedCount = 0; receivedCount < 2; ) { polledRecords = consumer.poll(Duration.ofSeconds(2)); for (ConsumerRecord<Integer, String> record : polledRecords) { actualContents.append(record.value()).append('\n'); receivedCount++; } logger.info("Received " + receivedCount + " records so far"); } assertEquals("0\n1\n", actualContents.toString()); producer.close(); consumer.close(); }
public StatsItem getStatsItem(final String statsName, final String statsKey) { try { return this.statsTable.get(statsName).getStatsItem(statsKey); } catch (Exception e) { } return null; }
@Test public void testGetStatsItem() { assertThat(brokerStatsManager.getStatsItem("TEST", "TEST")).isNull(); }
public static Properties loadProperties(Set<ClassLoader> classLoaders, String fileName) { return loadProperties(classLoaders, fileName, false, false); }
@Test void testPropertiesWithStructedValue() throws Exception { Properties p = ConfigUtils.loadProperties(Collections.emptySet(), "parameters.properties", false); Properties expected = new Properties(); expected.put("dubbo.parameters", "[{a:b},{c_.d: r*}]"); assertEquals(expected, p); }
@Override public List<ValidationMessage> validate(ValidationContext context) { return context.query().tokens().stream() .filter(this::isInvalidOperator) .map(token -> { final String errorMessage = String.format(Locale.ROOT, "Query contains invalid operator \"%s\". All AND / OR / NOT operators have to be written uppercase", token.image()); return ValidationMessage.builder(ValidationStatus.WARNING, ValidationType.INVALID_OPERATOR) .errorMessage(errorMessage) .relatedProperty(token.image()) .position(QueryPosition.from(token)) .build(); }).collect(Collectors.toList()); }
@Test void testInvalidOperatorLowercaseAnd() { final ValidationContext context = TestValidationContext.create("foo:bar and") .build(); final List<ValidationMessage> messages = sut.validate(context); assertThat(messages.size()).isEqualTo(1); final ValidationMessage message = messages.iterator().next(); assertThat(message.validationType()).isEqualTo(ValidationType.INVALID_OPERATOR); assertThat(message.relatedProperty()).hasValue("and"); }
public void watchChildChange(final String key, final BiConsumer<String, String> updateHandler, final Consumer<String> deleteHandler) { Watch.Listener listener = watch(updateHandler, deleteHandler); WatchOption option = WatchOption.newBuilder() .withPrefix(ByteSequence.from(key, UTF_8)) .build(); if (!watchChildCache.containsKey(key)) { Watch.Watcher watch = client.getWatchClient().watch(ByteSequence.from(key, UTF_8), option, listener); watchChildCache.put(key, watch); } }
@Test public void testWatchChildChange() { BiConsumer<String, String> updateHandler = mock(BiConsumer.class); Consumer<String> deleteHandler = mock(Consumer.class); etcdClient.watchChildChange(WATCH_CHILD_CHANGE_KEY, updateHandler, deleteHandler); etcdClient.watchClose(WATCH_CHILD_CHANGE_KEY); verify(watcher).close(); }
public void setComplexProperty(String name, Object complexProperty) { Method setter = aggregationAssessor.findSetterMethod(name); if (setter == null) { addWarn("Not setter method for property [" + name + "] in " + obj.getClass().getName()); return; } Class<?>[] paramTypes = setter.getParameterTypes(); if (!isSanityCheckSuccessful(name, setter, paramTypes, complexProperty)) { return; } try { invokeMethodWithSingleParameterOnThisObject(setter, complexProperty); } catch (Exception e) { addError("Could not set component " + obj + " for parent component " + obj, e); } }
@Test public void testSetComplexProperty() { Door door = new Door(); setter.setComplexProperty("door", door); assertEquals(door, house.getDoor()); }
@Override public CompletableFuture<T> toCompletableFuture() { return _task.toCompletionStage().toCompletableFuture(); }
@Test public void testCreateStageFromFuture_CompletableFuture() throws Exception { String testResult = "testCreateStageFromCompletableFuture"; CompletableFuture<String> completableFuture = new CompletableFuture<>(); completableFuture.complete(testResult); ParSeqBasedCompletionStage<String> stageFromCompletionStage = _parSeqBasedCompletionStageFactory.buildStageFromFuture(completableFuture, _executor); Assert.assertEquals(testResult, stageFromCompletionStage.toCompletableFuture().get()); }
protected Collection<AlterConfigOp> getLoggingDiff() { return diff; }
@Test public void testDiffUsingLoggerInheritance() { // Prepare desiredConfig String desiredConfig = getRealisticDesiredConfig(); // Prepare currentConfig Config currentConfig = getRealisticConfig(); KafkaBrokerLoggingConfigurationDiff diff = new KafkaBrokerLoggingConfigurationDiff(Reconciliation.DUMMY_RECONCILIATION, currentConfig, desiredConfig); assertThat(diff.getLoggingDiff(), is(getRealisticConfigDiff())); }
@SuppressWarnings({"deprecation", "checkstyle:linelength"}) public void convertSiteProperties(Configuration conf, Configuration yarnSiteConfig, boolean drfUsed, boolean enableAsyncScheduler, boolean userPercentage, FSConfigToCSConfigConverterParams.PreemptionMode preemptionMode) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName()); if (conf.getBoolean( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_ENABLED, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_ENABLED)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); int interval = conf.getInt( FairSchedulerConfiguration.CONTINUOUS_SCHEDULING_SLEEP_MS, FairSchedulerConfiguration.DEFAULT_CONTINUOUS_SCHEDULING_SLEEP_MS); yarnSiteConfig.setInt(PREFIX + "schedule-asynchronously.scheduling-interval-ms", interval); } // This should be always true to trigger cs auto // refresh queue. yarnSiteConfig.setBoolean( YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true); if (conf.getBoolean(FairSchedulerConfiguration.PREEMPTION, FairSchedulerConfiguration.DEFAULT_PREEMPTION)) { preemptionEnabled = true; String policies = addMonitorPolicy(ProportionalCapacityPreemptionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); int waitTimeBeforeKill = conf.getInt( FairSchedulerConfiguration.WAIT_TIME_BEFORE_KILL, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_KILL); yarnSiteConfig.setInt( CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, waitTimeBeforeKill); long waitBeforeNextStarvationCheck = conf.getLong( FairSchedulerConfiguration.WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS, FairSchedulerConfiguration.DEFAULT_WAIT_TIME_BEFORE_NEXT_STARVATION_CHECK_MS); yarnSiteConfig.setLong( CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, waitBeforeNextStarvationCheck); } else { if (preemptionMode == FSConfigToCSConfigConverterParams.PreemptionMode.NO_POLICY) { yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ""); } } // For auto created queue's auto deletion. if (!userPercentage) { String policies = addMonitorPolicy(AutoCreatedQueueDeletionPolicy. class.getCanonicalName(), yarnSiteConfig); yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, policies); // Set the expired for deletion interval to 10s, consistent with fs. yarnSiteConfig.setInt(CapacitySchedulerConfiguration. AUTO_CREATE_CHILD_QUEUE_EXPIRED_TIME, 10); } if (conf.getBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, FairSchedulerConfiguration.DEFAULT_ASSIGN_MULTIPLE)) { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, true); } else { yarnSiteConfig.setBoolean( CapacitySchedulerConfiguration.ASSIGN_MULTIPLE_ENABLED, false); } // Make auto cs conf refresh enabled. yarnSiteConfig.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, addMonitorPolicy(QueueConfigurationAutoRefreshPolicy .class.getCanonicalName(), yarnSiteConfig)); int maxAssign = conf.getInt(FairSchedulerConfiguration.MAX_ASSIGN, FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN); if (maxAssign != FairSchedulerConfiguration.DEFAULT_MAX_ASSIGN) { yarnSiteConfig.setInt( CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, maxAssign); } float localityThresholdNode = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_NODE, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE); if (localityThresholdNode != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_NODE) { yarnSiteConfig.setFloat(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY, localityThresholdNode); } float localityThresholdRack = conf.getFloat( FairSchedulerConfiguration.LOCALITY_THRESHOLD_RACK, FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK); if (localityThresholdRack != FairSchedulerConfiguration.DEFAULT_LOCALITY_THRESHOLD_RACK) { yarnSiteConfig.setFloat( CapacitySchedulerConfiguration.RACK_LOCALITY_ADDITIONAL_DELAY, localityThresholdRack); } if (conf.getBoolean(FairSchedulerConfiguration.SIZE_BASED_WEIGHT, FairSchedulerConfiguration.DEFAULT_SIZE_BASED_WEIGHT)) { sizeBasedWeight = true; } if (drfUsed) { yarnSiteConfig.set( CapacitySchedulerConfiguration.RESOURCE_CALCULATOR_CLASS, DominantResourceCalculator.class.getCanonicalName()); } if (enableAsyncScheduler) { yarnSiteConfig.setBoolean(CapacitySchedulerConfiguration.SCHEDULE_ASYNCHRONOUSLY_ENABLE, true); } }
@Test public void testSiteMaxAssignConversion() { yarnConfig.setInt(FairSchedulerConfiguration.MAX_ASSIGN, 111); converter.convertSiteProperties(yarnConfig, yarnConvertedConfig, false, false, false, null); assertEquals("Max assign", 111, yarnConvertedConfig.getInt( CapacitySchedulerConfiguration.MAX_ASSIGN_PER_HEARTBEAT, -1)); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName = null; // check if all the ColumnStatisticsObjs contain stats and all the ndv are // bitvectors boolean doAllPartitionContainStats = partNames.size() == colStatsWithSourceInfo.size(); NumDistinctValueEstimator ndvEstimator = null; boolean areAllNDVEstimatorsMergeable = true; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); LOG.trace("doAllPartitionContainStats for column: {} is: {}", colName, doAllPartitionContainStats); } TimestampColumnStatsDataInspector columnStatsData = timestampInspectorFromStats(cso); // check if we can merge NDV estimators if (columnStatsData.getNdvEstimator() == null) { areAllNDVEstimatorsMergeable = false; break; } else { NumDistinctValueEstimator estimator = columnStatsData.getNdvEstimator(); if (ndvEstimator == null) { ndvEstimator = estimator; } else { if (!ndvEstimator.canMerge(estimator)) { areAllNDVEstimatorsMergeable = false; break; } } } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } LOG.debug("all of the bit vectors can merge for {} is {}", colName, areAllNDVEstimatorsMergeable); ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); if (doAllPartitionContainStats || colStatsWithSourceInfo.size() < 2) { TimestampColumnStatsDataInspector aggregateData = null; long lowerBound = 0; long higherBound = 0; double densityAvgSum = 0.0; TimestampColumnStatsMerger merger = new TimestampColumnStatsMerger(); for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(cso); lowerBound = Math.max(lowerBound, newData.getNumDVs()); higherBound += newData.getNumDVs(); if (newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(merger.mergeLowValue( merger.getLowValue(aggregateData), merger.getLowValue(newData))); aggregateData.setHighValue(merger.mergeHighValue( merger.getHighValue(aggregateData), merger.getHighValue(newData))); aggregateData.setNumNulls(merger.mergeNumNulls(aggregateData.getNumNulls(), newData.getNumNulls())); aggregateData.setNumDVs(merger.mergeNumDVs(aggregateData.getNumDVs(), newData.getNumDVs())); } } if (areAllNDVEstimatorsMergeable && ndvEstimator != null) { // if all the ColumnStatisticsObjs contain bitvectors, we do not need to // use uniform distribution assumption because we can merge bitvectors // to get a good estimation. aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); } else { long estimation; if (useDensityFunctionForNDVEstimation && aggregateData != null && aggregateData.isSetLowValue() && aggregateData.isSetHighValue() ) { // We have estimation, lowerbound and higherbound. We use estimation // if it is between lowerbound and higherbound. double densityAvg = densityAvgSum / partNames.size(); estimation = (long) (diff(aggregateData.getHighValue(), aggregateData.getLowValue()) / densityAvg); if (estimation < lowerBound) { estimation = lowerBound; } else if (estimation > higherBound) { estimation = higherBound; } } else { estimation = (long) (lowerBound + (higherBound - lowerBound) * ndvTuner); } aggregateData.setNumDVs(estimation); } columnStatisticsData.setTimestampStats(aggregateData); } else { // TODO: bail out if missing stats are over a certain threshold // we need extrapolation LOG.debug("start extrapolation for {}", colName); Map<String, Integer> indexMap = new HashMap<>(); for (int index = 0; index < partNames.size(); index++) { indexMap.put(partNames.get(index), index); } Map<String, Double> adjustedIndexMap = new HashMap<>(); Map<String, ColumnStatisticsData> adjustedStatsMap = new HashMap<>(); // while we scan the css, we also get the densityAvg, lowerbound and // higherbound when useDensityFunctionForNDVEstimation is true. double densityAvgSum = 0.0; if (!areAllNDVEstimatorsMergeable) { // if not every partition uses bitvector for ndv, we just fall back to // the traditional extrapolation methods. for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); TimestampColumnStatsData newData = cso.getStatsData().getTimestampStats(); if (useDensityFunctionForNDVEstimation && newData.isSetLowValue() && newData.isSetHighValue()) { densityAvgSum += ((double) diff(newData.getHighValue(), newData.getLowValue())) / newData.getNumDVs(); } adjustedIndexMap.put(partName, (double) indexMap.get(partName)); adjustedStatsMap.put(partName, cso.getStatsData()); } } else { // we first merge all the adjacent bitvectors that we could merge and // derive new partition names and index. StringBuilder pseudoPartName = new StringBuilder(); double pseudoIndexSum = 0; int length = 0; int curIndex = -1; TimestampColumnStatsDataInspector aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); String partName = csp.getPartName(); TimestampColumnStatsDataInspector newData = timestampInspectorFromStats(cso); // newData.isSetBitVectors() should be true for sure because we // already checked it before. if (indexMap.get(partName) != curIndex) { // There is bitvector, but it is not adjacent to the previous ones. if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setTimestampStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } // reset everything pseudoPartName = new StringBuilder(); pseudoIndexSum = 0; length = 0; ndvEstimator = NumDistinctValueEstimatorFactory.getEmptyNumDistinctValueEstimator(ndvEstimator); } aggregateData = null; } curIndex = indexMap.get(partName); pseudoPartName.append(partName); pseudoIndexSum += curIndex; length++; curIndex++; if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setLowValue(min(aggregateData.getLowValue(), newData.getLowValue())); aggregateData.setHighValue(max(aggregateData.getHighValue(), newData.getHighValue())); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } ndvEstimator.mergeEstimators(newData.getNdvEstimator()); } if (length > 0) { // we have to set ndv adjustedIndexMap.put(pseudoPartName.toString(), pseudoIndexSum / length); aggregateData.setNumDVs(ndvEstimator.estimateNumDistinctValues()); ColumnStatisticsData csd = new ColumnStatisticsData(); csd.setTimestampStats(aggregateData); adjustedStatsMap.put(pseudoPartName.toString(), csd); if (useDensityFunctionForNDVEstimation) { densityAvgSum += ((double) diff(aggregateData.getHighValue(), aggregateData.getLowValue())) / aggregateData.getNumDVs(); } } } extrapolate(columnStatisticsData, partNames.size(), colStatsWithSourceInfo.size(), adjustedIndexMap, adjustedStatsMap, densityAvgSum / adjustedStatsMap.size()); } LOG.debug( "Ndv estimation for {} is {}. # of partitions requested: {}. # of partitions found: {}", colName, columnStatisticsData.getTimestampStats().getNumDVs(), partNames.size(), colStatsWithSourceInfo.size()); KllHistogramEstimator mergedKllHistogramEstimator = mergeHistograms(colStatsWithSourceInfo); if (mergedKllHistogramEstimator != null) { columnStatisticsData.getTimestampStats().setHistogram(mergedKllHistogramEstimator.serialize()); } statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateMultiStatsWhenAllAvailable() throws MetaException { List<String> partitions = Arrays.asList("part1", "part2", "part3"); long[] values1 = { TS_1.getSecondsSinceEpoch(), TS_2.getSecondsSinceEpoch(), TS_3.getSecondsSinceEpoch() }; ColumnStatisticsData data1 = new ColStatsBuilder<>(Timestamp.class).numNulls(1).numDVs(2) .low(TS_1).high(TS_3).hll(values1).kll(values1).build(); long[] values2 = { TS_3.getSecondsSinceEpoch(), TS_4.getSecondsSinceEpoch(), TS_5.getSecondsSinceEpoch() }; ColumnStatisticsData data2 = new ColStatsBuilder<>(Timestamp.class).numNulls(2).numDVs(3) .low(TS_3).high(TS_5).hll(values2).kll(values1).build(); long[] values3 = { TS_6.getSecondsSinceEpoch(), TS_7.getSecondsSinceEpoch() }; ColumnStatisticsData data3 = new ColStatsBuilder<>(Timestamp.class).numNulls(3).numDVs(2) .low(TS_6).high(TS_7).hll(values3).kll(values3).build(); List<ColStatsObjWithSourceInfo> statsList = Arrays.asList( createStatsWithInfo(data1, TABLE, COL, partitions.get(0)), createStatsWithInfo(data2, TABLE, COL, partitions.get(1)), createStatsWithInfo(data3, TABLE, COL, partitions.get(2))); TimestampColumnStatsAggregator aggregator = new TimestampColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); // the aggregation does not update hll, only numDVs is, it keeps the first hll // notice that numDVs is computed by using HLL, it can detect that 'TS_3' appears twice ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Timestamp.class).numNulls(6).numDVs(7) .low(TS_1).high(TS_7).hll(values1).kll(Longs.concat(values1, values2, values3)).build(); assertEqualStatistics(expectedStats, computedStatsObj.getStatsData()); }
public static JobDashboardProgressBar get(Job job) { Map<String, Object> jobMetadata = job.getMetadata(); return jobMetadata.keySet().stream().filter(key -> key.startsWith(JOBRUNR_PROGRESSBAR_KEY)) .max(String::compareTo) .map(key -> new JobDashboardProgressBar(cast(jobMetadata.get(key)))) .orElse(null); }
@Test void doesNotThrowExceptionIfNoJobProgressBarIsPresent() { final Job job = aJobInProgress().build(); assertThatCode(() -> JobDashboardProgressBar.get(job)).doesNotThrowAnyException(); assertThat(JobDashboardProgressBar.get(job)).isNull(); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator276() { // file:// isn't allowed by default UrlValidator validator = new UrlValidator(); assertTrue("http://apache.org/ should be allowed by default", validator.isValid("http://www.apache.org/test/index.html")); assertFalse("file:///c:/ shouldn't be allowed by default", validator.isValid("file:///C:/some.file")); assertFalse("file:///c:\\ shouldn't be allowed by default", validator.isValid("file:///C:\\some.file")); assertFalse("file:///etc/ shouldn't be allowed by default", validator.isValid("file:///etc/hosts")); assertFalse("file://localhost/etc/ shouldn't be allowed by default", validator.isValid("file://localhost/etc/hosts")); assertFalse("file://localhost/c:/ shouldn't be allowed by default", validator.isValid("file://localhost/c:/some.file")); // Turn it on, and check // Note - we need to enable local urls when working with file: validator = new UrlValidator(new String[] {"http", "file"}, UrlValidator.ALLOW_LOCAL_URLS); assertTrue("http://apache.org/ should be allowed by default", validator.isValid("http://www.apache.org/test/index.html")); assertTrue("file:///c:/ should now be allowed", validator.isValid("file:///C:/some.file")); assertFalse("file:///c:\\ should not be allowed", // Only allow forward slashes validator.isValid("file:///C:\\some.file")); assertTrue("file:///etc/ should now be allowed", validator.isValid("file:///etc/hosts")); assertTrue("file://localhost/etc/ should now be allowed", validator.isValid("file://localhost/etc/hosts")); assertTrue("file://localhost/c:/ should now be allowed", validator.isValid("file://localhost/c:/some.file")); // These are never valid assertFalse("file://c:/ shouldn't ever be allowed, needs file:///c:/", validator.isValid("file://C:/some.file")); assertFalse("file://c:\\ shouldn't ever be allowed, needs file:///c:/", validator.isValid("file://C:\\some.file")); }
@VisibleForTesting static List<String> getHeaderList(final MultivaluedMap<String, String> headerMap) { final List<String> headers = new LinkedList<>(); if (headerMap != null) { for (String key : headerMap.keySet()) { headers.add(key + ":" + headerMap.getFirst(key)); } } return headers; }
@Test void testGetHeaderList() { assertThat(WebSocketResourceProvider.getHeaderList(new MultivaluedHashMap<>())).isEmpty(); { final MultivaluedMap<String, String> headers = new MultivaluedHashMap<>(); headers.put("test", Arrays.asList("a", "b", "c")); final List<String> headerStrings = WebSocketResourceProvider.getHeaderList(headers); assertThat(headerStrings).hasSize(1); assertThat(headerStrings).contains("test:a"); } }
public static Sampler create(final String probability) { if (StringUtils.isBlank(probability)) { return ALWAYS_SAMPLE; } if ("0".equals(probability)) { return NEVER_SAMPLE; } if ("1".equals(probability) || "1.0".equals(probability) || "1.0.0".equals(probability)) { return ALWAYS_SAMPLE; } float parseProbability = NumberUtils.toFloat(probability, 1); if (parseProbability < 0.01f || parseProbability > 1) { throw new IllegalArgumentException( "probability should be between 0.01 and 1: was " + probability); } return new CountSampler(parseProbability); }
@Test public void testCreate() { Assertions.assertEquals(CountSampler.create(""), Sampler.ALWAYS_SAMPLE); Assertions.assertEquals(CountSampler.create("0"), Sampler.NEVER_SAMPLE); Assertions.assertEquals(CountSampler.create("1"), Sampler.ALWAYS_SAMPLE); Assertions.assertEquals(CountSampler.create("0.5").getClass(), CountSampler.class); }
@Override public Set<Long> calculateUsers(DelegateExecution execution, String param) { Set<Long> roleIds = StrUtils.splitToLongSet(param); return permissionApi.getUserRoleIdListByRoleIds(roleIds); }
@Test public void testCalculateUsers() { // 准备参数 String param = "1,2"; // mock 方法 when(permissionApi.getUserRoleIdListByRoleIds(eq(asSet(1L, 2L)))) .thenReturn(asSet(11L, 22L)); // 调用 Set<Long> results = strategy.calculateUsers(null, param); // 断言 assertEquals(asSet(11L, 22L), results); }
@VisibleForTesting static boolean hasEnoughCurvature(final int[] xs, final int[] ys, final int middlePointIndex) { // Calculate the radianValue formed between middlePointIndex, and one point in either // direction final int startPointIndex = middlePointIndex - CURVATURE_NEIGHBORHOOD; final int startX = xs[startPointIndex]; final int startY = ys[startPointIndex]; final int endPointIndex = middlePointIndex + CURVATURE_NEIGHBORHOOD; final int endX = xs[endPointIndex]; final int endY = ys[endPointIndex]; final int middleX = xs[middlePointIndex]; final int middleY = ys[middlePointIndex]; final int firstSectionXDiff = startX - middleX; final int firstSectionYDiff = startY - middleY; final double firstSectionLength = Math.sqrt(firstSectionXDiff * firstSectionXDiff + firstSectionYDiff * firstSectionYDiff); final int secondSectionXDiff = endX - middleX; final int secondSectionYDiff = endY - middleY; final double secondSectionLength = Math.sqrt( secondSectionXDiff * secondSectionXDiff + secondSectionYDiff * secondSectionYDiff); final double dotProduct = firstSectionXDiff * secondSectionXDiff + firstSectionYDiff * secondSectionYDiff; final double radianValue = Math.acos(dotProduct / firstSectionLength / secondSectionLength); return radianValue <= CURVATURE_THRESHOLD; }
@Test public void testHasEnoughCurvatureStraight() { final int[] Xs = new int[3]; final int[] Ys = new int[3]; Xs[0] = -100; Ys[0] = 0; Xs[1] = 0; Ys[1] = 0; Xs[2] = 100; Ys[2] = 0; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = 0; Ys[0] = -100; Xs[1] = 0; Ys[1] = 0; Xs[2] = 0; Ys[2] = 100; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = 50; Ys[0] = -50; Xs[1] = 0; Ys[1] = 0; Xs[2] = -50; Ys[2] = 50; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = -50; Ys[0] = 50; Xs[1] = 0; Ys[1] = 0; Xs[2] = 50; Ys[2] = -50; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); Xs[0] = -41; Ys[0] = 50; Xs[1] = 9; Ys[1] = 0; Xs[2] = 59; Ys[2] = -50; Assert.assertFalse(GestureTypingDetector.hasEnoughCurvature(Xs, Ys, 1)); }
@Override public Map<Errors, Integer> errorCounts() { Errors error = error(); if (error != Errors.NONE) { // Minor optimization since the top-level error applies to all partitions if (version < 5) return Collections.singletonMap(error, data.partitionErrors().size() + 1); return Collections.singletonMap(error, data.topics().stream().mapToInt(t -> t.partitionErrors().size()).sum() + 1); } Map<Errors, Integer> errors; if (version < 5) errors = errorCounts(data.partitionErrors().stream().map(l -> Errors.forCode(l.errorCode()))); else errors = errorCounts(data.topics().stream().flatMap(t -> t.partitionErrors().stream()).map(l -> Errors.forCode(l.errorCode()))); updateErrorCounts(errors, Errors.NONE); return errors; }
@Test public void testErrorCountsWithTopLevelError() { for (short version : LEADER_AND_ISR.allVersions()) { LeaderAndIsrResponse response; if (version < 5) { List<LeaderAndIsrPartitionError> partitions = createPartitions("foo", asList(Errors.NONE, Errors.NOT_LEADER_OR_FOLLOWER)); response = new LeaderAndIsrResponse(new LeaderAndIsrResponseData() .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) .setPartitionErrors(partitions), version); } else { Uuid id = Uuid.randomUuid(); LeaderAndIsrTopicErrorCollection topics = createTopic(id, asList(Errors.NONE, Errors.NOT_LEADER_OR_FOLLOWER)); response = new LeaderAndIsrResponse(new LeaderAndIsrResponseData() .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) .setTopics(topics), version); } assertEquals(Collections.singletonMap(Errors.UNKNOWN_SERVER_ERROR, 3), response.errorCounts()); } }
@Override public KsMaterializedQueryResult<Row> get( final GenericKey key, final int partition, final Optional<Position> position ) { try { final ReadOnlyKeyValueStore<GenericKey, ValueAndTimestamp<GenericRow>> store = stateStore .store(QueryableStoreTypes.timestampedKeyValueStore(), partition); final ValueAndTimestamp<GenericRow> row = store.get(key); if (row == null) { return KsMaterializedQueryResult.rowIterator(Collections.emptyIterator()); } else { return KsMaterializedQueryResult.rowIterator(ImmutableList.of(Row.of( stateStore.schema(), key, row.value(), row.timestamp())).iterator()); } } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldGetStoreWithCorrectParams() { // When: table.get(A_KEY, PARTITION); // Then: verify(stateStore).store(storeTypeCaptor.capture(), anyInt()); assertThat(storeTypeCaptor.getValue().getClass().getSimpleName(), is("TimestampedKeyValueStoreType")); }
@Override public Optional<CeTaskProcessor> getForCeTask(CeTask ceTask) { return Optional.ofNullable(taskProcessorByCeTaskType.get(ceTask.getType())); }
@Test public void getForTask_returns_TaskProcessor_even_if_it_is_not_specific() { CeTaskProcessor taskProcessor = createCeTaskProcessor(SOME_CE_TASK_TYPE + "_1", SOME_CE_TASK_TYPE, SOME_CE_TASK_TYPE + "_3"); CeTaskProcessorRepositoryImpl underTest = new CeTaskProcessorRepositoryImpl(new CeTaskProcessor[] {taskProcessor}); assertThat(underTest.getForCeTask(createCeTask(SOME_CE_TASK_TYPE, SOME_COMPONENT_KEY))).containsSame(taskProcessor); }
public long indexOf(double x, double y) { if (!rectangle.contains(x, y)) { // Put things outside the box at the end // This will also handle infinities and NaNs return Long.MAX_VALUE; } int xInt = (int) (xScale * (x - rectangle.getXMin())); int yInt = (int) (yScale * (y - rectangle.getYMin())); return discreteIndexOf(xInt, yInt); }
@Test public void testOutOfBounds() { HilbertIndex hilbert = new HilbertIndex(new Rectangle(0, 0, 1, 1)); assertEquals(hilbert.indexOf(2., 2.), Long.MAX_VALUE); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { try { if(null != status.getModified()) { final FileEntity response = new FilesApi(new BrickApiClient(session)) .patchFilesPath(StringUtils.removeStart(file.getAbsolute(), String.valueOf(Path.DELIMITER)), new FilesPathBody().providedMtime(status.getModified() != null ? new DateTime(status.getModified()) : null)); status.setResponse(new BrickAttributesFinderFeature(session).toAttributes(response)); } } catch(ApiException e) { throw new BrickExceptionMappingService().map("Failure to write attributes of {0}", e, file); } }
@Test public void testSetTimestampRoot() throws Exception { final Path file = new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)); final long ts = System.currentTimeMillis(); new BrickTimestampFeature(session).setTimestamp(file, ts); assertEquals(Timestamp.toSeconds(ts), new BrickAttributesFinderFeature(session).find(file).getModificationDate()); }
@Override public void declareResourceRequirements(ResourceRequirements resourceRequirements) { synchronized (lock) { checkNotClosed(); if (isConnected()) { currentResourceRequirements = resourceRequirements; triggerResourceRequirementsSubmission( Duration.ofMillis(1L), Duration.ofMillis(10000L), currentResourceRequirements); } } }
@Test void testIgnoreDeclareResourceRequirementsIfNotConnected() { final DeclareResourceRequirementServiceConnectionManager declareResourceRequirementServiceConnectionManager = createResourceManagerConnectionManager(); declareResourceRequirementServiceConnectionManager.declareResourceRequirements( createResourceRequirements()); }
public static String sanitizeAddress(InetSocketAddress addr) { Preconditions.checkArgument( !addr.isUnresolved(), "Unresolved address" ); String string = addr.getAddress().getHostAddress(); // Remove IPv6 scope if present if ( addr.getAddress() instanceof Inet6Address ) { int strip = string.indexOf( '%' ); return ( strip == -1 ) ? string : string.substring( 0, strip ); } else { return string; } }
@Test public void testScope() { InetSocketAddress addr = new InetSocketAddress( "0:0:0:0:0:0:0:1%0", 25577 ); assertEquals( "0:0:0:0:0:0:0:1", AddressUtil.sanitizeAddress( addr ) ); InetSocketAddress addr2 = new InetSocketAddress( "0:0:0:0:0:0:0:1", 25577 ); assertEquals( "0:0:0:0:0:0:0:1", AddressUtil.sanitizeAddress( addr2 ) ); }
@Override public String getName() { return "OpenSSL Source Analyzer"; }
@Test public void testGetName() { assertEquals("Analyzer name wrong.", "OpenSSL Source Analyzer", analyzer.getName()); }
public ByteBuffer write(ByteBuffer buf) throws BufferOverflowException { Coin.valueOf(value).write(buf); Buffers.writeLengthPrefixedBytes(buf, scriptBytes); return buf; }
@Test @Parameters(method = "randomOutputs") public void write(TransactionOutput output) { ByteBuffer buf = ByteBuffer.allocate(output.messageSize()); output.write(buf); assertFalse(buf.hasRemaining()); }
public static String normalizeHostName(String name) { try { return InetAddress.getByName(name).getHostAddress(); } catch (UnknownHostException e) { return name; } }
@Test public void testNormalizeHostName() { String oneHost = "1.kanyezone.appspot.com"; try { InetAddress.getByName(oneHost); } catch (UnknownHostException e) { Assume.assumeTrue("Network not resolving "+ oneHost, false); } List<String> hosts = Arrays.asList("127.0.0.1", "localhost", oneHost, "UnknownHost123"); List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts); String summary = "original [" + StringUtils.join(hosts, ", ") + "]" + " normalized [" + StringUtils.join(normalizedHosts, ", ") + "]"; // when ipaddress is normalized, same address is expected in return assertEquals(summary, hosts.get(0), normalizedHosts.get(0)); // for normalizing a resolvable hostname, resolved ipaddress is expected in return assertFalse("Element 1 equal "+ summary, normalizedHosts.get(1).equals(hosts.get(1))); assertEquals(summary, hosts.get(0), normalizedHosts.get(1)); // this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric, // its ipaddress is expected to return assertFalse("Element 2 equal " + summary, normalizedHosts.get(2).equals(hosts.get(2))); // return the same hostname after normalizing a irresolvable hostname. assertEquals(summary, hosts.get(3), normalizedHosts.get(3)); }
public RMContext getRMContext() { return this.rmContext; }
@Test public void testNodeHealthReportIsNotNull() throws Exception{ String host1 = "host1"; final int memory = 4 * 1024; NodeStatus mockNodeStatus = createMockNodeStatus(); org.apache.hadoop.yarn.server.resourcemanager.NodeManager nm1 = registerNode(host1, 1234, 2345, NetworkTopology.DEFAULT_RACK, Resources.createResource(memory, 1), mockNodeStatus); nm1.heartbeat(); nm1.heartbeat(); Collection<RMNode> values = resourceManager.getRMContext().getRMNodes().values(); for (RMNode ni : values) { assertNotNull(ni.getHealthReport()); } }
public void writeInt8(final long value) { byteBuf.writeLongLE(value); }
@Test void assertWriteInt8() { new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeInt8(1L); verify(byteBuf).writeLongLE(1L); }
@GET @Path("{path:.*}") @Produces({MediaType.APPLICATION_OCTET_STREAM + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8}) public Response get(@PathParam("path") String path, @Context UriInfo uriInfo, @QueryParam(OperationParam.NAME) OperationParam op, @Context Parameters params, @Context HttpServletRequest request) throws IOException, FileSystemAccessException { // Restrict access to only GETFILESTATUS and LISTSTATUS in write-only mode if((op.value() != HttpFSFileSystem.Operation.GETFILESTATUS) && (op.value() != HttpFSFileSystem.Operation.LISTSTATUS) && accessMode == AccessMode.WRITEONLY) { return Response.status(Response.Status.FORBIDDEN).build(); } UserGroupInformation user = HttpUserGroupInformation.get(); Response response; path = makeAbsolute(path); MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name()); MDC.put("hostname", request.getRemoteAddr()); switch (op.value()) { case OPEN: { Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { //Invoking the command directly using an unmanaged FileSystem that is // released by the FileSystemReleaseFilter final FSOperations.FSOpen command = new FSOperations.FSOpen(path); final FileSystem fs = createFileSystem(user); InputStream is = null; UserGroupInformation ugi = UserGroupInformation .createProxyUser(user.getShortUserName(), UserGroupInformation.getLoginUser()); try { is = ugi.doAs(new PrivilegedExceptionAction<InputStream>() { @Override public InputStream run() throws Exception { return command.execute(fs); } }); } catch (InterruptedException ie) { LOG.warn("Open interrupted.", ie); Thread.currentThread().interrupt(); } Long offset = params.get(OffsetParam.NAME, OffsetParam.class); Long len = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[] { path, offset, len }); InputStreamEntity entity = new InputStreamEntity(is, offset, len); response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM) .build(); } break; } case GETFILESTATUS: { FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS: { String filter = params.get(FilterParam.NAME, FilterParam.class); FSOperations.FSListStatus command = new FSOperations.FSListStatus(path, filter); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] filter [{}]", path, (filter != null) ? filter : "-"); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETHOMEDIRECTORY: { enforceRootPath(op.value(), path); FSOperations.FSHomeDir command = new FSOperations.FSHomeDir(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("Home Directory for [{}]", user); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case INSTRUMENTATION: { enforceRootPath(op.value(), path); Groups groups = HttpFSServerWebApp.get().get(Groups.class); Set<String> userGroups = groups.getGroupsSet(user.getShortUserName()); if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) { throw new AccessControlException( "User not in HttpFSServer admin group"); } Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class); Map snapshot = instrumentation.getSnapshot(); response = Response.ok(snapshot).build(); break; } case GETCONTENTSUMMARY: { FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Content summary for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETQUOTAUSAGE: { FSOperations.FSQuotaUsage command = new FSOperations.FSQuotaUsage(path); Map json = fsExecute(user, command); AUDIT_LOG.info("Quota Usage for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILECHECKSUM: { FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path); Boolean noRedirect = params.get( NoRedirectParam.NAME, NoRedirectParam.class); AUDIT_LOG.info("[{}]", path); if (noRedirect) { URI redirectURL = createOpenRedirectionURL(uriInfo); final String js = JsonUtil.toJsonString("Location", redirectURL); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } else { Map json = fsExecute(user, command); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); } break; } case GETFILEBLOCKLOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocations command = new FSOperations.FSFileBlockLocations(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("BlockLocations", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETACLSTATUS: { FSOperations.FSAclStatus command = new FSOperations.FSAclStatus(path); Map json = fsExecute(user, command); AUDIT_LOG.info("ACL status for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETXATTRS: { List<String> xattrNames = params.getValues(XAttrNameParam.NAME, XAttrNameParam.class); XAttrCodec encoding = params.get(XAttrEncodingParam.NAME, XAttrEncodingParam.class); FSOperations.FSGetXAttrs command = new FSOperations.FSGetXAttrs(path, xattrNames, encoding); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttrs for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTXATTRS: { FSOperations.FSListXAttrs command = new FSOperations.FSListXAttrs(path); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("XAttr names for [{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case LISTSTATUS_BATCH: { String startAfter = params.get( HttpFSParametersProvider.StartAfterParam.NAME, HttpFSParametersProvider.StartAfterParam.class); byte[] token = HttpFSUtils.EMPTY_BYTES; if (startAfter != null) { token = startAfter.getBytes(StandardCharsets.UTF_8); } FSOperations.FSListStatusBatch command = new FSOperations .FSListStatusBatch(path, token); @SuppressWarnings("rawtypes") Map json = fsExecute(user, command); AUDIT_LOG.info("[{}] token [{}]", path, token); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOT: { FSOperations.FSTrashRoot command = new FSOperations.FSTrashRoot(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETALLSTORAGEPOLICY: { FSOperations.FSGetAllStoragePolicies command = new FSOperations.FSGetAllStoragePolicies(); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTORAGEPOLICY: { FSOperations.FSGetStoragePolicy command = new FSOperations.FSGetStoragePolicy(path); JSONObject json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFF: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); FSOperations.FSGetSnapshotDiff command = new FSOperations.FSGetSnapshotDiff(path, oldSnapshotName, snapshotName); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTDIFFLISTING: { String oldSnapshotName = params.get(OldSnapshotNameParam.NAME, OldSnapshotNameParam.class); String snapshotName = params.get(SnapshotNameParam.NAME, SnapshotNameParam.class); String snapshotDiffStartPath = params .get(HttpFSParametersProvider.SnapshotDiffStartPathParam.NAME, HttpFSParametersProvider.SnapshotDiffStartPathParam.class); Integer snapshotDiffIndex = params.get(HttpFSParametersProvider.SnapshotDiffIndexParam.NAME, HttpFSParametersProvider.SnapshotDiffIndexParam.class); FSOperations.FSGetSnapshotDiffListing command = new FSOperations.FSGetSnapshotDiffListing(path, oldSnapshotName, snapshotName, snapshotDiffStartPath, snapshotDiffIndex); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTTABLEDIRECTORYLIST: { FSOperations.FSGetSnapshottableDirListing command = new FSOperations.FSGetSnapshottableDirListing(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSNAPSHOTLIST: { FSOperations.FSGetSnapshotListing command = new FSOperations.FSGetSnapshotListing(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSERVERDEFAULTS: { FSOperations.FSGetServerDefaults command = new FSOperations.FSGetServerDefaults(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case CHECKACCESS: { String mode = params.get(FsActionParam.NAME, FsActionParam.class); FsActionParam fsparam = new FsActionParam(mode); FSOperations.FSAccess command = new FSOperations.FSAccess(path, FsAction.getFsAction(fsparam.value())); fsExecute(user, command); AUDIT_LOG.info("[{}]", "/"); response = Response.ok().build(); break; } case GETECPOLICY: { FSOperations.FSGetErasureCodingPolicy command = new FSOperations.FSGetErasureCodingPolicy(path); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECPOLICIES: { FSOperations.FSGetErasureCodingPolicies command = new FSOperations.FSGetErasureCodingPolicies(); String js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETECCODECS: { FSOperations.FSGetErasureCodingCodecs command = new FSOperations.FSGetErasureCodingCodecs(); Map json = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GET_BLOCK_LOCATIONS: { long offset = 0; long len = Long.MAX_VALUE; Long offsetParam = params.get(OffsetParam.NAME, OffsetParam.class); Long lenParam = params.get(LenParam.NAME, LenParam.class); AUDIT_LOG.info("[{}] offset [{}] len [{}]", path, offsetParam, lenParam); if (offsetParam != null && offsetParam > 0) { offset = offsetParam; } if (lenParam != null && lenParam > 0) { len = lenParam; } FSOperations.FSFileBlockLocationsLegacy command = new FSOperations.FSFileBlockLocationsLegacy(path, offset, len); @SuppressWarnings("rawtypes") Map locations = fsExecute(user, command); final String json = JsonUtil.toJsonString("LocatedBlocks", locations); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } case GETFILELINKSTATUS: { FSOperations.FSFileLinkStatus command = new FSOperations.FSFileLinkStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); AUDIT_LOG.info("[{}]", path); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETSTATUS: { FSOperations.FSStatus command = new FSOperations.FSStatus(path); @SuppressWarnings("rawtypes") Map js = fsExecute(user, command); response = Response.ok(js).type(MediaType.APPLICATION_JSON).build(); break; } case GETTRASHROOTS: { Boolean allUsers = params.get(AllUsersParam.NAME, AllUsersParam.class); FSOperations.FSGetTrashRoots command = new FSOperations.FSGetTrashRoots(allUsers); Map json = fsExecute(user, command); AUDIT_LOG.info("allUsers [{}]", allUsers); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); break; } default: { throw new IOException( MessageFormat.format("Invalid HTTP GET operation [{0}]", op.value())); } } return response; }
@Test @TestDir @TestJetty @TestHdfs public void testContentType() throws Exception { createHttpFSServer(false, false); FileSystem fs = FileSystem.get(TestHdfsHelper.getHdfsConf()); Path dir = new Path("/tmp"); Path file = new Path(dir, "foo"); fs.mkdirs(dir); fs.create(file); String user = HadoopUsersConfTestHelper.getHadoopUsers()[0]; URL url = new URL(TestJettyHelper.getJettyURL(), MessageFormat.format( "/webhdfs/v1/tmp/foo?user.name={0}&op=open&offset=1&length=2", user)); // test jsonParse with non-json type. final HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.setRequestMethod(Operation.OPEN.getMethod()); conn.connect(); LambdaTestUtils.intercept(IOException.class, "Content-Type \"text/html;charset=iso-8859-1\" " + "is incompatible with \"application/json\"", () -> HttpFSUtils.jsonParse(conn)); conn.disconnect(); }
@Override public void serviceInit(Configuration conf) throws Exception { migration = conf.getBoolean(FairSchedulerConfiguration.MIGRATION_MODE, false); noTerminalRuleCheck = migration && conf.getBoolean(FairSchedulerConfiguration.NO_TERMINAL_RULE_CHECK, false); initScheduler(conf); super.serviceInit(conf); if (!migration) { // Initialize SchedulingMonitorManager schedulingMonitorManager.initialize(rmContext, conf); } }
@Test (timeout = 30000) public void testConfValidation() throws Exception { Configuration conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 2048); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 1024); try { scheduler.serviceInit(conf); fail("Exception is expected because the min memory allocation is" + " larger than the max memory allocation."); } catch (YarnRuntimeException e) { // Exception is expected. assertTrue("The thrown exception is not the expected one.", e.getMessage().startsWith( "Invalid resource scheduler memory")); } conf = new YarnConfiguration(); conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, 2); conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, 1); try { scheduler.serviceInit(conf); fail("Exception is expected because the min vcores allocation is" + " larger than the max vcores allocation."); } catch (YarnRuntimeException e) { // Exception is expected. assertTrue("The thrown exception is not the expected one.", e.getMessage().startsWith( "Invalid resource scheduler vcores")); } }
Map<String, String> taskConfigForTopicPartitions(List<TopicPartition> topicPartitions, int taskIndex) { Map<String, String> props = originalsStrings(); String topicPartitionsString = topicPartitions.stream() .map(MirrorUtils::encodeTopicPartition) .collect(Collectors.joining(",")); props.put(TASK_TOPIC_PARTITIONS, topicPartitionsString); props.put(TASK_INDEX, String.valueOf(taskIndex)); return props; }
@Test public void testTaskConfigTopicPartitions() { List<TopicPartition> topicPartitions = Arrays.asList(new TopicPartition("topic-1", 2), new TopicPartition("topic-3", 4), new TopicPartition("topic-5", 6)); MirrorSourceConfig config = new MirrorSourceConfig(makeProps()); Map<String, String> props = config.taskConfigForTopicPartitions(topicPartitions, 1); MirrorSourceTaskConfig taskConfig = new MirrorSourceTaskConfig(props); assertEquals(taskConfig.taskTopicPartitions(), new HashSet<>(topicPartitions), "Setting topic property configuration failed"); }
Channel acquireChannel(String serverAddress) { Channel channelToServer = channels.get(serverAddress); if (channelToServer != null) { channelToServer = getExistAliveChannel(channelToServer, serverAddress); if (channelToServer != null) { return channelToServer; } } if (LOGGER.isInfoEnabled()) { LOGGER.info("will connect to {}", serverAddress); } Object lockObj = CollectionUtils.computeIfAbsent(channelLocks, serverAddress, key -> new Object()); synchronized (lockObj) { return doConnect(serverAddress); } }
@Test void assertAcquireChannelFromPool() { setupPoolFactory(nettyPoolKey, channel); Channel actual = channelManager.acquireChannel("localhost"); verify(poolableFactory).makeObject(nettyPoolKey); Assertions.assertEquals(actual, channel); }
@VisibleForTesting public void validateDictTypeExists(String type) { DictTypeDO dictType = dictTypeService.getDictType(type); if (dictType == null) { throw exception(DICT_TYPE_NOT_EXISTS); } if (!CommonStatusEnum.ENABLE.getStatus().equals(dictType.getStatus())) { throw exception(DICT_TYPE_NOT_ENABLE); } }
@Test public void testValidateDictTypeExists_success() { // mock 方法,数据类型被禁用 String type = randomString(); when(dictTypeService.getDictType(eq(type))).thenReturn(randomDictTypeDO(type)); // 调用, 成功 dictDataService.validateDictTypeExists(type); }
@Override public ListOffsetsResult listOffsets(Map<TopicPartition, OffsetSpec> topicPartitionOffsets, ListOffsetsOptions options) { AdminApiFuture.SimpleAdminApiFuture<TopicPartition, ListOffsetsResultInfo> future = ListOffsetsHandler.newFuture(topicPartitionOffsets.keySet()); Map<TopicPartition, Long> offsetQueriesByPartition = topicPartitionOffsets.entrySet().stream() .collect(Collectors.toMap(Map.Entry::getKey, e -> getOffsetFromSpec(e.getValue()))); ListOffsetsHandler handler = new ListOffsetsHandler(offsetQueriesByPartition, options, logContext); invokeDriver(handler, future, options.timeoutMs); return new ListOffsetsResult(future.all()); }
@Test public void testListOffsets() throws Exception { // Happy path Node node0 = new Node(0, "localhost", 8120); List<PartitionInfo> pInfos = new ArrayList<>(); pInfos.add(new PartitionInfo("foo", 0, node0, new Node[]{node0}, new Node[]{node0})); pInfos.add(new PartitionInfo("bar", 0, node0, new Node[]{node0}, new Node[]{node0})); pInfos.add(new PartitionInfo("baz", 0, node0, new Node[]{node0}, new Node[]{node0})); pInfos.add(new PartitionInfo("qux", 0, node0, new Node[]{node0}, new Node[]{node0})); final Cluster cluster = new Cluster( "mockClusterId", singletonList(node0), pInfos, Collections.emptySet(), Collections.emptySet(), node0); final TopicPartition tp0 = new TopicPartition("foo", 0); final TopicPartition tp1 = new TopicPartition("bar", 0); final TopicPartition tp2 = new TopicPartition("baz", 0); final TopicPartition tp3 = new TopicPartition("qux", 0); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareMetadataResponse(cluster, Errors.NONE)); ListOffsetsTopicResponse t0 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp0, Errors.NONE, -1L, 123L, 321); ListOffsetsTopicResponse t1 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp1, Errors.NONE, -1L, 234L, 432); ListOffsetsTopicResponse t2 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp2, Errors.NONE, 123456789L, 345L, 543); ListOffsetsTopicResponse t3 = ListOffsetsResponse.singletonListOffsetsTopicResponse(tp3, Errors.NONE, 234567890L, 456L, 654); ListOffsetsResponseData responseData = new ListOffsetsResponseData() .setThrottleTimeMs(0) .setTopics(asList(t0, t1, t2, t3)); env.kafkaClient().prepareResponse(new ListOffsetsResponse(responseData)); Map<TopicPartition, OffsetSpec> partitions = new HashMap<>(); partitions.put(tp0, OffsetSpec.latest()); partitions.put(tp1, OffsetSpec.earliest()); partitions.put(tp2, OffsetSpec.forTimestamp(System.currentTimeMillis())); partitions.put(tp3, OffsetSpec.maxTimestamp()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); Map<TopicPartition, ListOffsetsResultInfo> offsets = result.all().get(); assertFalse(offsets.isEmpty()); assertEquals(123L, offsets.get(tp0).offset()); assertEquals(321, offsets.get(tp0).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp0).timestamp()); assertEquals(234L, offsets.get(tp1).offset()); assertEquals(432, offsets.get(tp1).leaderEpoch().get().intValue()); assertEquals(-1L, offsets.get(tp1).timestamp()); assertEquals(345L, offsets.get(tp2).offset()); assertEquals(543, offsets.get(tp2).leaderEpoch().get().intValue()); assertEquals(123456789L, offsets.get(tp2).timestamp()); assertEquals(456L, offsets.get(tp3).offset()); assertEquals(654, offsets.get(tp3).leaderEpoch().get().intValue()); assertEquals(234567890L, offsets.get(tp3).timestamp()); assertEquals(offsets.get(tp0), result.partitionResult(tp0).get()); assertEquals(offsets.get(tp1), result.partitionResult(tp1).get()); assertEquals(offsets.get(tp2), result.partitionResult(tp2).get()); assertEquals(offsets.get(tp3), result.partitionResult(tp3).get()); assertThrows(IllegalArgumentException.class, () -> result.partitionResult(new TopicPartition("unknown", 0)).get()); } }
@Async public Future<Integer> asyncUnreadNotificationCount(long accountId){ return nsClient.AsyncGetUnreadNotifications(accountId); }
@Test public void testGetNotificationStatusHasNoNotifications() throws InterruptedException, ExecutionException { when(nsClient.AsyncGetUnreadNotifications(eq(1L))).thenReturn(new AsyncResult(0)); int unreadNotifications = notificationService.asyncUnreadNotificationCount(1L).get(); assertEquals(0, unreadNotifications); }
protected static void configureMulticastSocket(MulticastSocket multicastSocket, Address bindAddress, HazelcastProperties hzProperties, MulticastConfig multicastConfig, ILogger logger) throws SocketException, IOException, UnknownHostException { multicastSocket.setReuseAddress(true); // bind to receive interface multicastSocket.bind(new InetSocketAddress(multicastConfig.getMulticastPort())); multicastSocket.setTimeToLive(multicastConfig.getMulticastTimeToLive()); try { boolean loopbackBind = bindAddress.getInetAddress().isLoopbackAddress(); Boolean loopbackModeEnabled = multicastConfig.getLoopbackModeEnabled(); if (loopbackModeEnabled != null) { // setting loopbackmode is just a hint - and the argument means "disable"! // to check the real value we call getLoopbackMode() (and again - return value means "disabled") multicastSocket.setLoopbackMode(!loopbackModeEnabled); } // If LoopBack mode is not enabled (i.e. getLoopbackMode return true) and bind address is a loopback one, // then print a warning if (loopbackBind && multicastSocket.getLoopbackMode()) { logger.warning("Hazelcast is bound to " + bindAddress.getHost() + " and loop-back mode is " + "disabled. This could cause multicast auto-discovery issues " + "and render it unable to work. Check your network connectivity, try to enable the " + "loopback mode and/or force -Djava.net.preferIPv4Stack=true on your JVM."); } // warning: before modifying lines below, take a look at these links: // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4417033 // http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=6402758 // https://github.com/hazelcast/hazelcast/pull/19251#issuecomment-891375270 boolean callSetInterface = OS.isMac() || !loopbackBind; String propSetInterface = hzProperties.getString(ClusterProperty.MULTICAST_SOCKET_SET_INTERFACE); if (propSetInterface != null) { callSetInterface = Boolean.parseBoolean(propSetInterface); } if (callSetInterface) { multicastSocket.setInterface(bindAddress.getInetAddress()); } } catch (Exception e) { logger.warning(e); } multicastSocket.setReceiveBufferSize(SOCKET_BUFFER_SIZE); multicastSocket.setSendBufferSize(SOCKET_BUFFER_SIZE); String multicastGroup = hzProperties.getString(ClusterProperty.MULTICAST_GROUP); if (multicastGroup == null) { multicastGroup = multicastConfig.getMulticastGroup(); } multicastConfig.setMulticastGroup(multicastGroup); multicastSocket.joinGroup(InetAddress.getByName(multicastGroup)); multicastSocket.setSoTimeout(SOCKET_TIMEOUT); }
@Test public void testMulticastGroupProperty() throws Exception { Config config = createConfig(null); String customMulticastGroup = "225.225.225.225"; config.setProperty(ClusterProperty.MULTICAST_GROUP.getName(), customMulticastGroup); MulticastConfig multicastConfig = config.getNetworkConfig().getJoin().getMulticastConfig(); MulticastSocket multicastSocket = mock(MulticastSocket.class); Address address = new Address("10.0.0.2", 5701); HazelcastProperties hzProperties = new HazelcastProperties(config); MulticastService.configureMulticastSocket(multicastSocket, address, hzProperties , multicastConfig, mock(ILogger.class)); verify(multicastSocket).bind(new InetSocketAddress(multicastConfig.getMulticastPort())); verify(multicastSocket).setTimeToLive(multicastConfig.getMulticastTimeToLive()); verify(multicastSocket, never()).setLoopbackMode(anyBoolean()); verify(multicastSocket).joinGroup(InetAddress.getByName(customMulticastGroup)); }
public long commitAndNext(String topic, String group, int queueId, long queueOffset, long popTime) { String key = buildKey(topic, group); ConcurrentHashMap<Integer/*queueId*/, OrderInfo> qs = table.get(key); if (qs == null) { return queueOffset + 1; } OrderInfo orderInfo = qs.get(queueId); if (orderInfo == null) { log.warn("OrderInfo is null, {}, {}, {}", key, queueOffset, orderInfo); return queueOffset + 1; } List<Long> o = orderInfo.offsetList; if (o == null || o.isEmpty()) { log.warn("OrderInfo is empty, {}, {}, {}", key, queueOffset, orderInfo); return -1; } if (popTime != orderInfo.popTime) { log.warn("popTime is not equal to orderInfo saved. key: {}, offset: {}, orderInfo: {}, popTime: {}", key, queueOffset, orderInfo, popTime); return -2; } Long first = o.get(0); int i = 0, size = o.size(); for (; i < size; i++) { long temp; if (i == 0) { temp = first; } else { temp = first + o.get(i); } if (queueOffset == temp) { break; } } // not found if (i >= size) { log.warn("OrderInfo not found commit offset, {}, {}, {}", key, queueOffset, orderInfo); return -1; } //set bit orderInfo.setCommitOffsetBit(orderInfo.commitOffsetBit | (1L << i)); long nextOffset = orderInfo.getNextOffset(); updateLockFreeTimestamp(topic, group, queueId, orderInfo); return nextOffset; }
@Test public void testCommitAndNext() { consumerOrderInfoManager.update( null, false, TOPIC, GROUP, QUEUE_ID_0, popTime, 3000, Lists.newArrayList(1L), new StringBuilder() ); assertEncodeAndDecode(); assertEquals(-2, consumerOrderInfoManager.commitAndNext( TOPIC, GROUP, QUEUE_ID_0, 1L, popTime - 10 )); assertEncodeAndDecode(); assertTrue(consumerOrderInfoManager.checkBlock( null, TOPIC, GROUP, QUEUE_ID_0, TimeUnit.SECONDS.toMillis(3) )); assertEquals(2, consumerOrderInfoManager.commitAndNext( TOPIC, GROUP, QUEUE_ID_0, 1L, popTime )); assertEncodeAndDecode(); assertFalse(consumerOrderInfoManager.checkBlock( null, TOPIC, GROUP, QUEUE_ID_0, TimeUnit.SECONDS.toMillis(3) )); }
public boolean eval(ContentFile<?> file) { // TODO: detect the case where a column is missing from the file using file's max field id. return new MetricsEvalVisitor().eval(file); }
@Test public void testIntegerGtEq() { boolean shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE + 1)).eval(FILE); assertThat(shouldRead).as("Should not match: no values in range").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MAX_VALUE)).eval(FILE); assertThat(shouldRead).as("Should not match: 78 and lower are not in range").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MIN_VALUE + 1)).eval(FILE); assertThat(shouldRead).as("Should not match: 30 not in range").isFalse(); shouldRead = new StrictMetricsEvaluator(SCHEMA, greaterThanOrEqual("id", INT_MIN_VALUE)).eval(FILE); assertThat(shouldRead).as("Should match: all values in range").isTrue(); }
@Override public ShardingRule build(final ShardingRuleConfiguration ruleConfig, final String databaseName, final DatabaseType protocolType, final ResourceMetaData resourceMetaData, final Collection<ShardingSphereRule> builtRules, final ComputeNodeInstanceContext computeNodeInstanceContext) { ShardingSpherePreconditions.checkNotEmpty(resourceMetaData.getDataSourceMap(), () -> new MissingRequiredShardingConfigurationException("Data source", databaseName)); return new ShardingRule(ruleConfig, resourceMetaData.getDataSourceMap(), computeNodeInstanceContext); }
@SuppressWarnings("unchecked") @Test void assertBuildWithEmptyDataSourceMap() { assertThrows(MissingRequiredShardingConfigurationException.class, () -> builder.build(ruleConfig, "sharding_db", new MySQLDatabaseType(), mock(ResourceMetaData.class), Collections.emptyList(), mock(ComputeNodeInstanceContext.class))); }
@Override public Position offsetToPosition(int offset, Bias bias) { return position(0, 0).offsetBy(offset, bias); }
@Test public void testRightBoundary() { Position pos = navigator.offsetToPosition(100, Forward); assertEquals(4, pos.getMajor()); assertEquals(60, pos.getMinor()); pos = pos.clamp(); assertEquals(4, pos.getMajor()); assertEquals(9, pos.getMinor()); }
public TreeCache start() throws Exception { Preconditions.checkState(treeState.compareAndSet(TreeState.LATENT, TreeState.STARTED), "already started"); if (createParentNodes) { client.createContainers(root.path); } client.getConnectionStateListenable().addListener(connectionStateListener); if (client.getZookeeperClient().isConnected()) { root.wasCreated(); } return this; }
@Test public void testStartEmptyDeeper() throws Exception { cache = newTreeCacheWithListeners(client, "/test/foo/bar"); cache.start(); assertEvent(TreeCacheEvent.Type.INITIALIZED); client.create().creatingParentsIfNeeded().forPath("/test/foo"); assertNoMoreEvents(); client.create().forPath("/test/foo/bar"); assertEvent(TreeCacheEvent.Type.NODE_ADDED, "/test/foo/bar"); assertNoMoreEvents(); }
@Deprecated @Override public void init(final org.apache.kafka.streams.processor.ProcessorContext context, final StateStore root) { store.init(context, root); }
@Test public void shouldInitTimestampedStore() { givenWrapperWithTimestampedStore(); final StateStoreContext mockContext = mock(StateStoreContext.class); wrapper.init(mockContext, wrapper); verify(timestampedStore).init(mockContext, wrapper); }
public static void w(String tag, String message, Object... args) { sLogger.w(tag, message, args); }
@Test public void warning() { String tag = "TestTag"; String message = "Test message"; LogManager.w(tag, message); verify(logger).w(tag, message); }
public void put(final T object, final int index) { doForAll(index, new Callback<T>() { @Override public void callback(final T t) { t.setIndex(t.getIndex() + 1); } }); object.setIndex(index); super.put(object); }
@Test void testAddToMiddle() throws Exception { final Person smurf = new Person("Smurf", 55); map.put(smurf, 1); assertThat(map.get(IndexKey.INDEX_ID).size()).isEqualTo(4); assertThat(map.get(IndexKey.INDEX_ID).get(new Value(0))).containsExactly(toni); assertThat(map.get(IndexKey.INDEX_ID).get(new Value(1))).containsExactly(smurf); assertThat(map.get(IndexKey.INDEX_ID).get(new Value(2))).containsExactly(eder); assertThat(map.get(IndexKey.INDEX_ID).get(new Value(3))).containsExactly(michael); }
public PublisherAgreement getPublisherAgreement(UserData user) { var eclipseToken = checkEclipseToken(user); var personId = user.getEclipsePersonId(); if (StringUtils.isEmpty(personId)) { return null; } checkApiUrl(); var urlTemplate = eclipseApiUrl + "openvsx/publisher_agreement/{personId}"; var uriVariables = Map.of("personId", personId); var headers = new HttpHeaders(); headers.setBearerAuth(eclipseToken.accessToken); headers.setAccept(Arrays.asList(MediaType.APPLICATION_JSON)); var request = new HttpEntity<>(headers); try { var json = restTemplate.exchange(urlTemplate, HttpMethod.GET, request, String.class, uriVariables); return parseAgreementResponse(json); } catch (RestClientException exc) { HttpStatusCode status = HttpStatus.INTERNAL_SERVER_ERROR; if (exc instanceof HttpStatusCodeException) { status = ((HttpStatusCodeException) exc).getStatusCode(); // The endpoint yields 404 if the specified user has not signed a publisher agreement if (status == HttpStatus.NOT_FOUND) return null; } var url = UriComponentsBuilder.fromUriString(urlTemplate).build(uriVariables); logger.error("Get request failed with URL: " + url, exc); throw new ErrorResultException("Request for retrieving publisher agreement failed: " + exc.getMessage(), status); } }
@Test public void testGetPublisherAgreementNotAuthenticated() throws Exception { var user = mockUser(); var agreement = eclipse.getPublisherAgreement(user); assertThat(agreement).isNull(); }
public ProcessingNodesState calculateProcessingState(TimeRange timeRange) { final DateTime updateThresholdTimestamp = clock.nowUTC().minus(updateThreshold.toMilliseconds()); try (DBCursor<ProcessingStatusDto> statusCursor = db.find(activeNodes(updateThresholdTimestamp))) { if (!statusCursor.hasNext()) { return ProcessingNodesState.NONE_ACTIVE; } int activeNodes = 0; int idleNodes = 0; while (statusCursor.hasNext()) { activeNodes++; ProcessingStatusDto nodeProcessingStatus = statusCursor.next(); DateTime lastIndexedMessage = nodeProcessingStatus.receiveTimes().postIndexing(); // If node is behind and is busy, it is overloaded. if (lastIndexedMessage.isBefore(timeRange.getTo()) && isBusy(nodeProcessingStatus)) { return ProcessingNodesState.SOME_OVERLOADED; } // If a node did not index a message that is at least at the start of the time range, // we consider it idle. if (lastIndexedMessage.isBefore(timeRange.getFrom())) { idleNodes++; } } // Only if all nodes are idle, we stop the processing. if (activeNodes == idleNodes) { return ProcessingNodesState.ALL_IDLE; } } // If none of the above checks return, we can assume that some nodes have already indexed the given timerange. return ProcessingNodesState.SOME_UP_TO_DATE; }
@Test @MongoDBFixtures("processing-status-idle-and-processing-node.json") public void processingStateIdleAndProcessingNode() { when(clock.nowUTC()).thenReturn(DateTime.parse("2019-01-01T04:00:00.000Z")); when(updateThreshold.toMilliseconds()).thenReturn(Duration.hours(1).toMilliseconds()); TimeRange timeRange = AbsoluteRange.create("2019-01-01T02:00:00.000Z", "2019-01-01T03:00:00.000Z"); assertThat(dbService.calculateProcessingState(timeRange)).isEqualTo(ProcessingNodesState.SOME_UP_TO_DATE); }
@Override public String toString() { MoreObjects.ToStringHelper helper = MoreObjects.toStringHelper(this); helper.addValue(toString(null, null)); return helper.toString(); }
@Test public void testToString() { Transaction tx = FakeTxBuilder.createFakeTx(TESTNET.network()); int lengthWithAddresses = tx.toString(null, BitcoinNetwork.TESTNET).length(); int lengthWithoutAddresses = tx.toString(null, null).length(); assertTrue(lengthWithAddresses > lengthWithoutAddresses); }
static List<String> splitText(String toSplit, Pattern pattern) { return pattern.splitAsStream(toSplit) .map(trm -> trm.replaceAll("[^a-zA-Z0-9 ]", "")) .filter(trm -> !trm.isEmpty()) .collect(Collectors.toList()); }
@Test void splitText() { final Pattern unwantedPattern = Pattern.compile("[^a-zA-Z0-9 ]"); final Pattern wantedPattern = Pattern.compile("[a-zA-Z0-9]"); Pattern pattern = Pattern.compile("\\s+"); List<String> retrieved = KiePMMLTextIndex.splitText(TEXT_0, pattern); assertThat(retrieved).hasSize(25); retrieved.forEach(txt -> { assertThat(unwantedPattern.matcher(txt).find()).isFalse(); assertThat(wantedPattern.matcher(txt).find()).isTrue(); }); pattern = Pattern.compile("[\\s\\-]"); retrieved = KiePMMLTextIndex.splitText(TEXT_0, pattern); assertThat(retrieved).hasSize(26); retrieved.forEach(txt -> { assertThat(unwantedPattern.matcher(txt).find()).isFalse(); assertThat(wantedPattern.matcher(txt).find()).isTrue(); }); }
@Override public void createOrUpdate(final String path, final Object data) { zkClient.createOrUpdate(path, data, CreateMode.PERSISTENT); }
@Test public void testOnAppAuthChangedUpdate() { AppAuthData appAuthData = AppAuthData.builder().appKey(MOCK_APP_KEY).appSecret(MOCK_APP_SECRET).build(); String appAuthPath = DefaultPathConstants.buildAppAuthPath(appAuthData.getAppKey()); zookeeperDataChangedListener.onAppAuthChanged(ImmutableList.of(appAuthData), DataEventTypeEnum.UPDATE); verify(zkClient, times(1)).createOrUpdate(appAuthPath, appAuthData, CreateMode.PERSISTENT); }
@Override public Status status() { return status; }
@Test void status() { assertThat(response.status()).isSameAs(status); }
public static byte[] byteBufferToArray(ByteBuffer buf) { byte[] arr = new byte[buf.remaining()]; int prevPosition = buf.position(); try { buf.get(arr); } finally { buf.position(prevPosition); } return arr; }
@Test public void testByteBufferToArray() { assertArrayEquals(new byte[]{1, 2, 3}, MessageUtil.byteBufferToArray(ByteBuffer.wrap(new byte[]{1, 2, 3}))); assertArrayEquals(new byte[]{}, MessageUtil.byteBufferToArray(ByteBuffer.wrap(new byte[]{}))); }
@CheckForNull public String getExternalUserAuthentication() { SecurityRealm realm = securityRealmFactory.getRealm(); return realm == null ? null : realm.getName(); }
@Test public void getExternalUserAuthentication_whenDefined_shouldReturnName() { mockSecurityRealmFactory("Security Realm"); assertThat(commonSystemInformation.getExternalUserAuthentication()) .isEqualTo("Security Realm"); }
public String getParentGroupId() { return parentGroupId; }
@Test public void testGetParentGroupId() { Model instance = new Model(); instance.setParentGroupId(""); String expResult = ""; String result = instance.getParentGroupId(); assertEquals(expResult, result); }
static void populateMissingTargetFieldInSegment(final MiningSchema parentMiningSchema, final Model childrenModel) { List<MiningField> parentTargetFields = getMiningTargetFields(parentMiningSchema.getMiningFields()); List<MiningField> childrenTargetFields = getMiningTargetFields(childrenModel.getMiningSchema().getMiningFields()); if (childrenTargetFields.isEmpty()) { childrenModel.getMiningSchema().addMiningFields(parentTargetFields.toArray(new MiningField[parentTargetFields.size()])); } }
@Test void populateMissingTargetFieldInSegment() throws Exception { final InputStream inputStream = getFileInputStream(NO_MODELNAME_NO_SEGMENT_ID_NOSEGMENT_TARGET_FIELD_SAMPLE); final PMML pmml = org.jpmml.model.PMMLUtil.unmarshal(inputStream); final Model retrieved = pmml.getModels().get(0); assertThat(retrieved).isInstanceOf(MiningModel.class); MiningModel miningModel = (MiningModel) retrieved; Model toPopulate = miningModel.getSegmentation().getSegments().get(0).getModel(); assertThat(getMiningTargetFields(toPopulate.getMiningSchema())).isEmpty(); KiePMMLUtil.populateMissingTargetFieldInSegment(retrieved.getMiningSchema(), toPopulate); List<MiningField> childrenTargetFields = getMiningTargetFields(toPopulate.getMiningSchema()); assertThat(childrenTargetFields).isNotEmpty(); getMiningTargetFields(miningModel.getMiningSchema()).forEach(parentTargetField -> assertThat(childrenTargetFields).contains(parentTargetField)); }
public static Ip6Address valueOf(byte[] value) { return new Ip6Address(value); }
@Test public void testValueOfByteArrayOffsetIPv6() { Ip6Address ipAddress; byte[] value; value = new byte[] {11, 22, 33, // Preamble 0x11, 0x11, 0x22, 0x22, 0x33, 0x33, 0x44, 0x44, 0x55, 0x55, 0x66, 0x66, 0x77, 0x77, (byte) 0x88, (byte) 0x88, 44, 55}; // Extra bytes ipAddress = Ip6Address.valueOf(value, 3); assertThat(ipAddress.toString(), is("1111:2222:3333:4444:5555:6666:7777:8888")); value = new byte[] {11, 22, // Preamble 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 33}; // Extra bytes ipAddress = Ip6Address.valueOf(value, 2); assertThat(ipAddress.toString(), is("::")); value = new byte[] {11, 22, // Preamble (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, (byte) 0xff, 33}; // Extra bytes ipAddress = Ip6Address.valueOf(value, 2); assertThat(ipAddress.toString(), is("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff")); }
@Override @NonNull public String getId() { return ID; }
@Test public void shouldBePoliteAboutBadUrl() throws Exception { User user = login(); String scmPath = "/organizations/" + getOrgName() + "/scm/git/"; // Let's say the user has only started typing a url String repoPath = scmPath + "?repositoryUrl=htt"; Map resp = new RequestBuilder(baseUrl) .status(200) .jwtToken(getJwtToken(j.jenkins,user.getId(), user.getId())) .crumb( crumb ) .get(repoPath) .build(Map.class); assertNull(resp.get("credentialId")); }
@Override public RateLimiter rateLimiter(final String name) { return rateLimiter(name, getDefaultConfig()); }
@Test public void rateLimiterNewWithNullNonDefaultConfig() throws Exception { exception.expect(NullPointerException.class); exception.expectMessage(CONFIG_MUST_NOT_BE_NULL); RateLimiterRegistry registry = new InMemoryRateLimiterRegistry(config); RateLimiterConfig rateLimiterConfig = null; registry.rateLimiter("name", rateLimiterConfig); }
@Override public void run() { // The target object for this example. var sword = new SwordOfAragorn(); // Creation of creatures. List<Creature> creatures = new ArrayList<>(); for (var i = 0; i < WORKERS; i++) { creatures.add(new Elf(String.format("Elf %s", i))); creatures.add(new Orc(String.format("Orc %s", i))); creatures.add(new Human(String.format("Human %s", i))); } int totalFiends = WORKERS * MULTIPLICATION_FACTOR; ExecutorService service = Executors.newFixedThreadPool(totalFiends); // Attach every creature and the sword is a Fiend to fight for the sword. for (var i = 0; i < totalFiends; i = i + MULTIPLICATION_FACTOR) { service.submit(new Feind(creatures.get(i), sword)); service.submit(new Feind(creatures.get(i + 1), sword)); service.submit(new Feind(creatures.get(i + 2), sword)); } // Wait for program to terminate. try { if (!service.awaitTermination(WAIT_TIME, TimeUnit.SECONDS)) { LOGGER.info("The master of the sword is now {}.", sword.getLocker().getName()); } } catch (InterruptedException e) { LOGGER.error(e.getMessage()); Thread.currentThread().interrupt(); } finally { service.shutdown(); } }
@Test void shouldExecuteApplicationAsRunnableWithoutException() { assertDoesNotThrow(() -> (new App()).run()); }
@ExecuteOn(TaskExecutors.IO) @Post(uri = "/resume/by-ids") @Operation(tags = {"Executions"}, summary = "Resume a list of paused executions") @ApiResponse(responseCode = "200", description = "On success", content = {@Content(schema = @Schema(implementation = BulkResponse.class))}) @ApiResponse(responseCode = "422", description = "Resumed with errors", content = {@Content(schema = @Schema(implementation = BulkErrorResponse.class))}) public MutableHttpResponse<?> resumeByIds( @Parameter(description = "The execution id") @Body List<String> executionsId ) throws Exception { List<Execution> executions = new ArrayList<>(); Set<ManualConstraintViolation<String>> invalids = new HashSet<>(); Map<String, Flow> flows = new HashMap<>(); for (String executionId : executionsId) { Optional<Execution> execution = executionRepository.findById(tenantService.resolveTenant(), executionId); if (execution.isPresent() && !execution.get().getState().isPaused()) { invalids.add(ManualConstraintViolation.of( "execution not in state PAUSED", executionId, String.class, "execution", executionId )); } else if (execution.isEmpty()) { invalids.add(ManualConstraintViolation.of( "execution not found", executionId, String.class, "execution", executionId )); } else { executions.add(execution.get()); } } if (!invalids.isEmpty()) { return HttpResponse.badRequest(BulkErrorResponse .builder() .message("invalid bulk resume") .invalids(invalids) .build() ); } for (Execution execution : executions) { var flow = flows.get(execution.getFlowId() + "_" + execution.getFlowRevision()) != null ? flows.get(execution.getFlowId() + "_" + execution.getFlowRevision()) : flowRepository.findByExecutionWithoutAcl(execution); flows.put(execution.getFlowId() + "_" + execution.getFlowRevision(), flow); Execution resumeExecution = this.executionService.resume(execution, flow, State.Type.RUNNING); this.executionQueue.emit(resumeExecution); } return HttpResponse.ok(BulkResponse.builder().count(executions.size()).build()); }
@Test void resumeByIds() throws TimeoutException, InterruptedException { Execution pausedExecution1 = runnerUtils.runOneUntilPaused(null, TESTS_FLOW_NS, "pause"); Execution pausedExecution2 = runnerUtils.runOneUntilPaused(null, TESTS_FLOW_NS, "pause"); assertThat(pausedExecution1.getState().isPaused(), is(true)); assertThat(pausedExecution2.getState().isPaused(), is(true)); // resume executions BulkResponse resumeResponse = client.toBlocking().retrieve( HttpRequest.POST( "/api/v1/executions/resume/by-ids", List.of(pausedExecution1.getId(), pausedExecution2.getId()) ), BulkResponse.class ); assertThat(resumeResponse.getCount(), is(2)); // check that the executions are no more paused Thread.sleep(100); Execution resumedExecution1 = client.toBlocking().retrieve( GET("/api/v1/executions/" + pausedExecution1.getId()), Execution.class ); Execution resumedExecution2 = client.toBlocking().retrieve( GET("/api/v1/executions/" + pausedExecution2.getId()), Execution.class ); assertThat(resumedExecution1.getState().isPaused(), is(false)); assertThat(resumedExecution2.getState().isPaused(), is(false)); // attempt to resume no more paused executions HttpClientResponseException e = assertThrows( HttpClientResponseException.class, () -> client.toBlocking().retrieve(HttpRequest.POST( "/api/v1/executions/resume/by-ids", List.of(pausedExecution1.getId(), pausedExecution2.getId()) )) ); assertThat(e.getStatus(), is(HttpStatus.BAD_REQUEST)); }
public static AvroGenericCoder of(Schema schema) { return AvroGenericCoder.of(schema); }
@Test public void testDeterminismSortedMap() { assertDeterministic(AvroCoder.of(StringSortedMapField.class)); }
public static <T> Map<String, T> jsonToMap(final String json, final Class<T> valueTypeRef) { try { JavaType t = MAPPER.getTypeFactory().constructParametricType(HashMap.class, String.class, valueTypeRef); return MAPPER.readValue(json, t); } catch (IOException e) { LOG.warn("write to map error: " + json, e); return new LinkedHashMap<>(); } }
@Test public void testJsonToMapByValueTypeRef() { Map<String, Object> stringObjectMap = JsonUtils.jsonToMap(EXPECTED_JSON, Object.class); assertEquals(stringObjectMap.get("name"), "test object"); }
public static IntrinsicMapTaskExecutor withSharedCounterSet( List<Operation> operations, CounterSet counters, ExecutionStateTracker executionStateTracker) { return new IntrinsicMapTaskExecutor(operations, counters, executionStateTracker); }
@Test public void testExecuteMapTaskExecutor() throws Exception { Operation o1 = Mockito.mock(Operation.class); Operation o2 = Mockito.mock(Operation.class); Operation o3 = Mockito.mock(Operation.class); List<Operation> operations = Arrays.asList(new Operation[] {o1, o2, o3}); ExecutionStateTracker stateTracker = Mockito.mock(ExecutionStateTracker.class); try (IntrinsicMapTaskExecutor executor = IntrinsicMapTaskExecutor.withSharedCounterSet(operations, counterSet, stateTracker)) { executor.execute(); } InOrder inOrder = Mockito.inOrder(stateTracker, o1, o2, o3); inOrder.verify(o3).start(); inOrder.verify(o2).start(); inOrder.verify(o1).start(); inOrder.verify(o1).finish(); inOrder.verify(o2).finish(); inOrder.verify(o3).finish(); }
public String anonymize(final ParseTree tree) { return build(tree); }
@Test public void shouldAnonymizeExplainStatementCorrectly() { Assert.assertEquals("EXPLAIN query;", anon.anonymize("EXPLAIN my_query;")); Assert.assertEquals("EXPLAIN SELECT * FROM source1;", anon.anonymize("EXPLAIN SELECT * from S1;")); }
public static Set<String> findKeywordsFromCrashReport(String crashReport) { Matcher matcher = CRASH_REPORT_STACK_TRACE_PATTERN.matcher(crashReport); Set<String> result = new HashSet<>(); if (matcher.find()) { for (String line : matcher.group("stacktrace").split("\\n")) { Matcher lineMatcher = STACK_TRACE_LINE_PATTERN.matcher(line); if (lineMatcher.find()) { String[] method = lineMatcher.group("method").split("\\."); for (int i = 0; i < method.length - 2; i++) { if (PACKAGE_KEYWORD_BLACK_LIST.contains(method[i])) { continue; } result.add(method[i]); } Matcher moduleMatcher = STACK_TRACE_LINE_MODULE_PATTERN.matcher(line); if (moduleMatcher.find()) { for (String module : moduleMatcher.group("tokens").split(",")) { String[] split = module.split(":"); if (split.length >= 2 && "xf".equals(split[0])) { if (PACKAGE_KEYWORD_BLACK_LIST.contains(split[1])) { continue; } result.add(split[1]); } } } } } } return result; }
@Test public void icycream() throws IOException { assertEquals( new HashSet<>(Collections.singletonList("icycream")), CrashReportAnalyzer.findKeywordsFromCrashReport(loadLog("/crash-report/mod/icycream.txt"))); }
static ConnectionSettings createConnectionSettings(Configuration conf) { int maxConnections = S3AUtils.intOption(conf, MAXIMUM_CONNECTIONS, DEFAULT_MAXIMUM_CONNECTIONS, 1); final boolean keepAlive = conf.getBoolean(CONNECTION_KEEPALIVE, DEFAULT_CONNECTION_KEEPALIVE); // time to acquire a connection from the pool Duration acquisitionTimeout = getDuration(conf, CONNECTION_ACQUISITION_TIMEOUT, DEFAULT_CONNECTION_ACQUISITION_TIMEOUT_DURATION, TimeUnit.MILLISECONDS, minimumOperationDuration); // set the connection TTL irrespective of whether the connection is in use or not. // this can balance requests over different S3 servers, and avoid failed // connections. See HADOOP-18845. Duration connectionTTL = getDuration(conf, CONNECTION_TTL, DEFAULT_CONNECTION_TTL_DURATION, TimeUnit.MILLISECONDS, null); Duration establishTimeout = getDuration(conf, ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT_DURATION, TimeUnit.MILLISECONDS, minimumOperationDuration); // limit on the time a connection can be idle in the pool Duration maxIdleTime = getDuration(conf, CONNECTION_IDLE_TIME, DEFAULT_CONNECTION_IDLE_TIME_DURATION, TimeUnit.MILLISECONDS, Duration.ZERO); Duration socketTimeout = getDuration(conf, SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT_DURATION, TimeUnit.MILLISECONDS, minimumOperationDuration); return new ConnectionSettings( maxConnections, keepAlive, acquisitionTimeout, connectionTTL, establishTimeout, maxIdleTime, socketTimeout); }
@Test public void testLoadUnsetValues() { final AWSClientConfig.ConnectionSettings conn = createConnectionSettings(conf()); assertDuration(CONNECTION_ACQUISITION_TIMEOUT, DEFAULT_CONNECTION_ACQUISITION_TIMEOUT_DURATION, conn.getAcquisitionTimeout()); assertDuration(CONNECTION_TTL, DEFAULT_CONNECTION_TTL_DURATION, conn.getConnectionTTL()); assertDuration(CONNECTION_IDLE_TIME, DEFAULT_CONNECTION_IDLE_TIME_DURATION, conn.getMaxIdleTime()); assertDuration(ESTABLISH_TIMEOUT, DEFAULT_ESTABLISH_TIMEOUT_DURATION, conn.getEstablishTimeout()); assertDuration(SOCKET_TIMEOUT, DEFAULT_SOCKET_TIMEOUT_DURATION, conn.getSocketTimeout()); Assertions.assertThat(conn.getMaxConnections()) .describedAs(MAXIMUM_CONNECTIONS) .isEqualTo(DEFAULT_MAXIMUM_CONNECTIONS); Assertions.assertThat(conn.isKeepAlive()) .describedAs(CONNECTION_KEEPALIVE) .isEqualTo(DEFAULT_CONNECTION_KEEPALIVE); }
public MediaType getContentType() { Optional<MediaType> optionalType = toContentType(Files.getFileExtension(filename)); Optional<Charset> targetCharset = toCharset(optionalType.orElse(null)); MediaType type = optionalType.orElse(DEFAULT_CONTENT_TYPE_WITH_CHARSET); if (targetCharset.isPresent() && !type.charset().toJavaUtil().equals(targetCharset)) { return type.withCharset(targetCharset.get()); } return type; }
@Test public void should_get_type_from_filename() { FileContentType contentType = new FileContentType("logo.png"); assertThat(contentType.getContentType(), is(MediaType.PNG)); }
public static void clear() { LOCAL_HOOKS.remove(); }
@Test public void testClear() { assertThat(TransactionHookManager.getHooks()).isEmpty(); TransactionHookManager.registerHook(new TransactionHookAdapter()); assertThat(TransactionHookManager.getHooks()).isNotEmpty(); TransactionHookManager.clear(); assertThat(TransactionHookManager.getHooks()).isEmpty(); }
static void process(int maxMessages, MessageFormatter formatter, ConsumerWrapper consumer, PrintStream output, boolean rejectMessageOnError, AcknowledgeType acknowledgeType) { while (messageCount < maxMessages || maxMessages == -1) { ConsumerRecord<byte[], byte[]> msg; try { msg = consumer.receive(); } catch (WakeupException we) { LOG.trace("Caught WakeupException because consumer is shutdown, ignore and terminate."); // Consumer will be closed return; } catch (Throwable t) { LOG.error("Error processing message, terminating consumer process: ", t); // Consumer will be closed return; } messageCount += 1; try { formatter.writeTo(new ConsumerRecord<>(msg.topic(), msg.partition(), msg.offset(), msg.timestamp(), msg.timestampType(), 0, 0, msg.key(), msg.value(), msg.headers(), Optional.empty()), output); consumer.acknowledge(msg, acknowledgeType); } catch (Throwable t) { if (rejectMessageOnError) { LOG.error("Error processing message, rejecting this message: ", t); consumer.acknowledge(msg, AcknowledgeType.REJECT); } else { // Consumer will be closed throw t; } } if (checkErr(output)) { // Consumer will be closed return; } } }
@Test public void testRejectMessageOnError() { ConsoleShareConsumer.ConsumerWrapper consumer = mock(ConsoleShareConsumer.ConsumerWrapper.class); MessageFormatter formatter = mock(MessageFormatter.class); PrintStream printStream = mock(PrintStream.class); ConsumerRecord<byte[], byte[]> record = new ConsumerRecord<>("foo", 1, 1, new byte[0], new byte[0]); when(consumer.receive()).thenReturn(record); //Simulate an error on formatter.writeTo() call doThrow(new RuntimeException()) .when(formatter) .writeTo(any(), any()); ConsoleShareConsumer.process(1, formatter, consumer, printStream, true, AcknowledgeType.ACCEPT); verify(formatter).writeTo(any(), eq(printStream)); verify(consumer).receive(); verify(consumer).acknowledge(record, AcknowledgeType.REJECT); consumer.cleanup(); }
@Override public long getFreeSpace() { throw new UnsupportedOperationException("Not implemented"); }
@Test(expectedExceptions = UnsupportedOperationException.class) public void testGetFreeSpace() { fs.getFile("nonsuch.txt").getFreeSpace(); }
public static boolean isInEc2(InstanceInfo instanceInfo) { if (instanceInfo.getDataCenterInfo() instanceof AmazonInfo) { String instanceId = ((AmazonInfo) instanceInfo.getDataCenterInfo()).getId(); if (instanceId != null && instanceId.startsWith("i-")) { return true; } } return false; }
@Test public void testIsInEc2() { InstanceInfo instanceInfo1 = new InstanceInfo.Builder(InstanceInfoGenerator.takeOne()) .setDataCenterInfo(new DataCenterInfo() { @Override public Name getName() { return Name.MyOwn; } }) .build(); Assert.assertFalse(EurekaUtils.isInEc2(instanceInfo1)); InstanceInfo instanceInfo2 = InstanceInfoGenerator.takeOne(); Assert.assertTrue(EurekaUtils.isInEc2(instanceInfo2)); }
protected boolean evaluation(Object rawValue) { String stringValue = (String) ConverterTypeUtil.convert(String.class, rawValue); Object convertedValue = arrayType.getValue(stringValue); switch (inNotIn) { case IN: return values.contains(convertedValue); case NOT_IN: return !values.contains(convertedValue); default: throw new KiePMMLException("Unknown IN_NOTIN" + inNotIn); } }
@Test void evaluationIntNotIn() { ARRAY_TYPE arrayType = ARRAY_TYPE.INT; List<Object> values = getObjects(arrayType, 1); KiePMMLSimpleSetPredicate kiePMMLSimpleSetPredicate = getKiePMMLSimpleSetPredicate(values, arrayType, IN_NOTIN.NOT_IN); assertThat(kiePMMLSimpleSetPredicate.evaluation(values.get(0))).isFalse(); assertThat(kiePMMLSimpleSetPredicate.evaluation("234")).isTrue(); }
protected static List<LastOpenedDTO> filterForExistingIdAndCapAtMaximum(final LastOpenedForUserDTO loi, final GRN grn, final long max) { return loi.items().stream().filter(i -> !i.grn().equals(grn)).limit(max - 1).toList(); }
@Test public void testNotCapAtMaximumMinusOneIfYouAreAtMaxMinusOne() { var list = List.of( new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "1"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "2"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "3"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "4"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "5"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "6"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "7"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "8"), DateTime.now(DateTimeZone.UTC)), new LastOpenedDTO(grnRegistry.newGRN(GRNTypes.DASHBOARD, "9"), DateTime.now(DateTimeZone.UTC)) ); assertThat(list.size()).isEqualTo(MAX-1); LastOpenedForUserDTO dto = new LastOpenedForUserDTO("userId", list); var result = StartPageService.filterForExistingIdAndCapAtMaximum(dto, grnRegistry.newGRN(GRNTypes.DASHBOARD, "11"), MAX); assertThat(result.size()).isEqualTo(MAX-1); }