focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
static String headerLine(CSVFormat csvFormat) { return String.join(String.valueOf(csvFormat.getDelimiter()), csvFormat.getHeader()); }
@Test public void givenCustomDelimiter_splitsCells() { CSVFormat csvFormat = csvFormat().withDelimiter(';'); PCollection<String> input = pipeline.apply(Create.of(headerLine(csvFormat), "a;1;1.1", "b;2;2.2", "c;3;3.3")); CsvIOStringToCsvRecord underTest = new CsvIOStringToCsvRecord(csvFormat); CsvIOParseResult<List<String>> result = input.apply(underTest); PAssert.that(result.getOutput()) .containsInAnyOrder( Arrays.asList( Arrays.asList("a", "1", "1.1"), Arrays.asList("b", "2", "2.2"), Arrays.asList("c", "3", "3.3"))); PAssert.that(result.getErrors()).empty(); pipeline.run(); }
public int put(final int key, final int value) { final int missingValue = this.missingValue; if (missingValue == value) { throw new IllegalArgumentException("cannot accept missingValue"); } final int[] entries = this.entries; @DoNotSub final int mask = entries.length - 1; @DoNotSub int index = Hashing.evenHash(key, mask); int oldValue; while (missingValue != (oldValue = entries[index + 1])) { if (key == entries[index]) { break; } index = next(index, mask); } if (missingValue == oldValue) { ++size; entries[index] = key; } entries[index + 1] = value; increaseCapacity(); return oldValue; }
@Test void shouldNotAllowMissingValueAsValue() { assertThrows(IllegalArgumentException.class, () -> map.put(1, MISSING_VALUE)); }
public static MetadataUpdate fromJson(String json) { return JsonUtil.parse(json, MetadataUpdateParser::fromJson); }
@Test public void testSetDefaultPartitionSpecFromJson() { String action = MetadataUpdateParser.SET_DEFAULT_PARTITION_SPEC; int specId = 4; String json = String.format("{\"action\":\"%s\",\"spec-id\":%d}", action, specId); MetadataUpdate.SetDefaultPartitionSpec expected = new MetadataUpdate.SetDefaultPartitionSpec(specId); assertEquals(action, expected, MetadataUpdateParser.fromJson(json)); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } final Object single = inner.deserialize(topic, bytes); return Collections.singletonList(single); }
@Test public void shouldDeserializeOldStyleNulls() { // When: final List<?> result = deserializer.deserialize(TOPIC, null); // Then: assertThat(result, is(nullValue())); }
public static void verifyGroupId(final String groupId) { if (StringUtils.isBlank(groupId)) { throw new IllegalArgumentException("Blank groupId"); } if (!GROUP_ID_PATTER.matcher(groupId).matches()) { throw new IllegalArgumentException( "Invalid group id, it should be started with character 'a'-'z' or 'A'-'Z'," + " and followed with numbers, english alphabet, '-' or '_'. "); } }
@Test(expected = IllegalArgumentException.class) public void tetsVerifyGroupId1() { Utils.verifyGroupId(""); }
@JsonCreator public static ModelVersion of(String version) { Preconditions.checkArgument(StringUtils.isNotBlank(version), "Version must not be blank"); return new AutoValue_ModelVersion(version); }
@Test public void serialize() throws IOException { final ModelVersion modelVersion = objectMapper.readValue("\"foobar\"", ModelVersion.class); assertThat(modelVersion).isEqualTo(ModelVersion.of("foobar")); }
@DeleteMapping("/batch") public ShenyuAdminResult deleteRules(@RequestBody @NotEmpty final List<@NotBlank String> ids) { Integer deleteCount = ruleService.delete(ids); return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, deleteCount); }
@Test public void testDeleteRules() throws Exception { given(this.ruleService.delete(Collections.singletonList("111"))).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.delete("/rule/batch") .contentType(MediaType.APPLICATION_JSON) .content("[\"111\"]") ) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS))) .andReturn(); }
public String getQuery() throws Exception { return getQuery(weatherConfiguration.getLocation()); }
@Test public void testSingleIdHourlyForecastQuery() throws Exception { WeatherConfiguration weatherConfiguration = new WeatherConfiguration(); weatherConfiguration.setIds("524901"); weatherConfiguration.setMode(WeatherMode.XML); weatherConfiguration.setLanguage(WeatherLanguage.nl); weatherConfiguration.setAppid(APPID); weatherConfiguration.setWeatherApi(WeatherApi.Hourly); weatherConfiguration.setPeriod("20"); WeatherQuery weatherQuery = new WeatherQuery(weatherConfiguration); weatherConfiguration.setGeoLocationProvider(geoLocationProvider); String query = weatherQuery.getQuery(); assertThat(query, is( "http://api.openweathermap.org/data/2.5/forecast?id=524901&lang=nl&cnt=20&mode=xml&APPID=9162755b2efa555823cfe0451d7fff38")); }
public void cancel(long jobId) throws JobDoesNotExistException { try (JobMasterAuditContext auditContext = createAuditContext("cancel")) { auditContext.setJobId(jobId); PlanCoordinator planCoordinator = mPlanTracker.getCoordinator(jobId); if (planCoordinator == null) { if (!mWorkflowTracker.cancel(jobId)) { throw new JobDoesNotExistException(jobId); } return; } planCoordinator.cancel(); auditContext.setSucceeded(true); } }
@Test public void cancel() throws Exception { try (MockedStatic<PlanCoordinator> mockStaticPlanCoordinator = mockPlanCoordinator()) { SleepJobConfig config = new SleepJobConfig(10000); long jobId = mJobMaster.run(config); mJobMaster.cancel(jobId); verify(mMockPlanCoordinator).cancel(); } }
public static <T> T newInstance(Class<T> clazz) throws SofaRpcRuntimeException { if (clazz.isPrimitive()) { return (T) getDefaultPrimitiveValue(clazz); } T t = getDefaultWrapperValue(clazz); if (t != null) { return t; } try { // 普通类,如果是成员类(需要多传一个父类参数) if (!(clazz.isMemberClass() && !Modifier.isStatic(clazz.getModifiers()))) { try { // 先找一个空的构造函数 Constructor<T> constructor = clazz.getDeclaredConstructor(); constructor.setAccessible(true); return constructor.newInstance(); } catch (Exception ignore) { // NOPMD } } // 不行的话,找一个最少参数的构造函数 Constructor<T>[] constructors = (Constructor<T>[]) clazz.getDeclaredConstructors(); if (constructors == null || constructors.length == 0) { throw new SofaRpcRuntimeException("The " + clazz.getCanonicalName() + " has no default constructor!"); } Constructor<T> constructor = constructors[0]; if (constructor.getParameterTypes().length > 0) { for (Constructor<T> c : constructors) { if (c.getParameterTypes().length < constructor.getParameterTypes().length) { constructor = c; if (constructor.getParameterTypes().length == 0) { break; } } } } constructor.setAccessible(true); // 虚拟构造函数的参数值,基本类型使用默认值,其它类型使用null Class<?>[] argTypes = constructor.getParameterTypes(); Object[] args = new Object[argTypes.length]; for (int i = 0; i < args.length; i++) { args[i] = getDefaultPrimitiveValue(argTypes[i]); } return constructor.newInstance(args); } catch (SofaRpcRuntimeException e) { throw e; } catch (Throwable e) { throw new SofaRpcRuntimeException(e.getMessage(), e); } }
@Test public void testNewInstance() throws Exception { short s = ClassUtils.newInstance(short.class); Assert.assertTrue(s == 0); Short s2 = ClassUtils.newInstance(Short.class); Assert.assertTrue(s2 == 0); int i = ClassUtils.newInstance(int.class); Assert.assertTrue(i == 0); Integer integer = ClassUtils.newInstance(Integer.class); Assert.assertTrue(integer == 0); long l = ClassUtils.newInstance(long.class); Assert.assertTrue(l == 0); Long l2 = ClassUtils.newInstance(Long.class); Assert.assertTrue(l2 == 0); double d = ClassUtils.newInstance(double.class); Assert.assertTrue(d == 0.0d); Double d2 = ClassUtils.newInstance(Double.class); Assert.assertTrue(d2 == 0.0d); float f = ClassUtils.newInstance(float.class); Assert.assertTrue(f == 0.0f); Float f2 = ClassUtils.newInstance(Float.class); Assert.assertTrue(f2 == 0.0f); byte b = ClassUtils.newInstance(byte.class); Assert.assertTrue(b == 0); Byte b2 = ClassUtils.newInstance(Byte.class); Assert.assertTrue(b2 == 0); char c = ClassUtils.newInstance(char.class); Assert.assertTrue(c == 0); Character c2 = ClassUtils.newInstance(Character.class); Assert.assertTrue(c2 == 0); boolean bl = ClassUtils.newInstance(boolean.class); Assert.assertFalse(bl); Boolean bl2 = ClassUtils.newInstance(Boolean.class); Assert.assertFalse(bl2); Assert.assertNotNull(ClassUtils.newInstance(TestMemberClass1.class)); Assert.assertNotNull(ClassUtils.newInstance(TestMemberClass2.class)); Assert.assertNotNull(ClassUtils.newInstance(TestMemberClass3.class)); Assert.assertNotNull(ClassUtils.newInstance(TestMemberClass4.class)); Assert.assertNotNull(ClassUtils.newInstance(TestMemberClass5.class)); Assert.assertNotNull(ClassUtils.newInstance(TestMemberClass6.class)); Assert.assertNotNull(ClassUtils.newInstance(TestClass1.class)); Assert.assertNotNull(ClassUtils.newInstance(TestClass2.class)); TestClass3 class3 = ClassUtils.newInstance(TestClass3.class); Assert.assertNotNull(class3); Assert.assertNull(class3.getName()); Assert.assertEquals(class3.getAge(), 0); }
public static <T> Encoder<Tuple2<Integer, T>> oneOfEncoder(List<Encoder<T>> encoders) { Expression serializer = serializeOneOf(rootRef(TUPLE2_TYPE, true), encoders); Expression deserializer = deserializeOneOf(rootCol(serializer.dataType()), encoders); return EncoderFactory.create(serializer, deserializer, Tuple2.class); }
@Test public void testOneOffEncoder() { List<Coder<?>> coders = ImmutableList.copyOf(BASIC_CASES.keySet()); List<Encoder<?>> encoders = coders.stream().map(EncoderHelpers::encoderFor).collect(toList()); // build oneOf tuples of type index and corresponding value List<Tuple2<Integer, ?>> data = BASIC_CASES.entrySet().stream() .map(e -> tuple(coders.indexOf(e.getKey()), (Object) e.getValue().get(0))) .collect(toList()); // dataset is a sparse dataset with only one column set per row Dataset<Tuple2<Integer, ?>> dataset = createDataset(data, oneOfEncoder((List) encoders)); assertThat(dataset.collectAsList(), equalTo(data)); }
public byte[] encode(String val, String delimiters) { return codecs[0].encode(val); }
@Test public void testEncodeChineseLongTextGB2312() { assertArrayEquals(CHINESE_LONG_TEXT_GB2312_BYTES, gb2312().encode(CHINESE_LONG_TEXT_GB2312, LT_DELIMS)); }
void handleLine(final String line) { final String trimmedLine = Optional.ofNullable(line).orElse("").trim(); if (trimmedLine.isEmpty()) { return; } handleStatements(trimmedLine); }
@Test public void shouldIssueCCloudConnectorRequest() throws Exception { // Given: final KsqlRestClient mockRestClient = givenMockRestClient(); when(mockRestClient.getIsCCloudServer()).thenReturn(true); when(mockRestClient.getHasCCloudApiKey()).thenReturn(true); when(mockRestClient.makeConnectorRequest(anyString(), anyLong())) .thenReturn(RestResponse.successful( OK.code(), new KsqlEntityList(Collections.singletonList( new ConnectorList("list connectors;", Collections.emptyList(), Collections.emptyList()))) )); // When: localCli.handleLine("list connectors;"); // Then: verify(mockRestClient).makeConnectorRequest(anyString(), anyLong()); }
public Map<String, String> clientLevelTagMap() { final Map<String, String> tagMap = new LinkedHashMap<>(); tagMap.put(CLIENT_ID_TAG, clientId); return tagMap; }
@Test public void shouldGetClientLevelTagMap() { final Map<String, String> tagMap = streamsMetrics.clientLevelTagMap(); assertThat(tagMap.size(), equalTo(1)); assertThat(tagMap.get(StreamsMetricsImpl.CLIENT_ID_TAG), equalTo(CLIENT_ID)); }
public static Schema reassignIds(Schema schema, Schema idSourceSchema) { return reassignIds(schema, idSourceSchema, true); }
@Test public void testReassignIdsWithIdentifier() { Schema schema = new Schema( Lists.newArrayList( required(0, "a", Types.IntegerType.get()), required(1, "A", Types.IntegerType.get())), Sets.newHashSet(0)); Schema sourceSchema = new Schema( Lists.newArrayList( required(1, "a", Types.IntegerType.get()), required(2, "A", Types.IntegerType.get())), Sets.newHashSet(1)); final Schema actualSchema = TypeUtil.reassignIds(schema, sourceSchema); assertThat(actualSchema.asStruct()).isEqualTo(sourceSchema.asStruct()); assertThat(actualSchema.identifierFieldIds()) .as("identifier field ID should change based on source schema") .isEqualTo(sourceSchema.identifierFieldIds()); }
public Coin parse(String str) throws NumberFormatException { return Coin.valueOf(parseValue(str, Coin.SMALLEST_UNIT_EXPONENT)); }
@Test(expected = NumberFormatException.class) public void parseInvalidEmpty() { NO_CODE.parse(""); }
@Override public void streamRequest(StreamRequest request, Callback<StreamResponse> callback) { streamRequest(request, new RequestContext(), callback); }
@Test public void testStreamRetryOverLimit() throws Exception { SimpleLoadBalancer balancer = prepareLoadBalancer(Arrays.asList("http://test.linkedin.com/retry1", "http://test.linkedin.com/retry2"), HttpClientFactory.UNLIMITED_CLIENT_REQUEST_RETRY_RATIO); DynamicClient dynamicClient = new DynamicClient(balancer, null); RetryClient client = new RetryClient( dynamicClient, balancer, 1, RetryClient.DEFAULT_UPDATE_INTERVAL_MS, RetryClient.DEFAULT_AGGREGATED_INTERVAL_NUM, SystemClock.instance(), true, true); URI uri = URI.create("d2://retryService?arg1=empty&arg2=empty"); StreamRequest streamRequest = new StreamRequestBuilder(uri).build(EntityStreams.emptyStream()); DegraderTrackerClientTest.TestCallback<StreamResponse> streamCallback = new DegraderTrackerClientTest.TestCallback<>(); client.streamRequest(streamRequest, streamCallback); assertNull(streamCallback.t); assertNotNull(streamCallback.e); assertTrue(streamCallback.e.getMessage().contains("Data not available")); }
public void writeShortFieldDescriptor(FieldReference fieldReference) throws IOException { writeSimpleName(fieldReference.getName()); writer.write(':'); writeType(fieldReference.getType()); }
@Test public void testWriteShortFieldDescriptor() throws IOException { DexFormattedWriter writer = new DexFormattedWriter(output); writer.writeShortFieldDescriptor(getFieldReference()); Assert.assertEquals("fieldName:Lfield/type;", output.toString()); }
public static Object get(final ConvertedMap data, final FieldReference field) { final Object target = findParent(data, field); return target == null ? null : fetch(target, field.getKey()); }
@Test public void testBareBracketsGet() throws Exception { Map<Serializable, Object> data = new HashMap<>(); data.put("foo", "bar"); String reference = "[foo]"; assertEquals( RubyUtil.RUBY.newString("bar"), get(ConvertedMap.newFromMap(data), reference) ); }
@SuppressWarnings("unchecked") @Override public <S extends StateStore> S getStateStore(final String name) { final StateStore store = stateManager.getGlobalStore(name); return (S) getReadWriteStore(store); }
@Test public void shouldNotAllowCloseForWindowStore() { when(stateManager.getGlobalStore(GLOBAL_WINDOW_STORE_NAME)).thenReturn(mock(WindowStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_WINDOW_STORE_NAME); try { store.close(); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } }
@Override public List<ApplicationAttemptId> getAppsInQueue(String queueName) { CSQueue queue = getQueue(queueName); if (queue == null) { return null; } List<ApplicationAttemptId> apps = new ArrayList<ApplicationAttemptId>(); queue.collectSchedulerApplications(apps); return apps; }
@Test public void testQueueMappingWithCurrentUserQueueMappingForaGroup() throws Exception { CapacitySchedulerConfiguration config = new CapacitySchedulerConfiguration(); config.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class); setupQueueConfiguration(config); config.setClass(CommonConfigurationKeys.HADOOP_SECURITY_GROUP_MAPPING, TestGroupsCaching.FakeunPrivilegedGroupMapping.class, ShellBasedUnixGroupsMapping.class); config.set(CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES, "a1" +"=" + "agroup" + ""); Groups.getUserToGroupsMappingServiceWithLoadedConfiguration(config); config.set(CapacitySchedulerConfiguration.QUEUE_MAPPING, "g:agroup:%user"); MockRM rm = new MockRM(config); rm.start(); CapacityScheduler cs = ((CapacityScheduler) rm.getResourceScheduler()); cs.start(); MockRMAppSubmissionData data = MockRMAppSubmissionData.Builder.createWithMemory(GB, rm) .withAppName("appname") .withUser("a1") .withAcls(null) .withQueue("default") .withUnmanagedAM(false) .build(); RMApp app = MockRMAppSubmitter.submit(rm, data); List<ApplicationAttemptId> appsInA1 = cs.getAppsInQueue("a1"); assertEquals(1, appsInA1.size()); rm.stop(); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchResultNotProcessedForPartitionsAwaitingCallbackCompletion() { buildFetcher(); assignFromUser(singleton(tp0)); subscriptions.seek(tp0, 0); // Successfully fetch from partition (not marked as involved in the callback yet) assertNonEmptyFetch(); // Mark partition as pendingOnAssignedCallback. No records should be fetched for it subscriptions.markPendingOnAssignedCallback(singleton(tp0), true); assertEquals(0, sendFetches()); networkClientDelegate.poll(time.timer(0)); assertFalse(fetcher.hasCompletedFetches()); // Successfully start fetching again, once the partition is not waiting for the callback anymore subscriptions.enablePartitionsAwaitingCallback(singleton(tp0)); assertNonEmptyFetch(); }
@Override public String getCommandWithArguments() { List<String> argList = new ArrayList<>(); argList.add(super.getCommandWithArguments()); argList.add(containerName); return StringUtils.join(argList, " "); }
@Test public void getCommandWithArguments() { dockerStopCommand.setGracePeriod(3); assertEquals("stop --time=3 container_name", dockerStopCommand.getCommandWithArguments()); }
@GetMapping("/watcherConfigs") public SampleResult getSubClientConfigByIp(HttpServletRequest request, HttpServletResponse response, @RequestParam("ip") String ip, ModelMap modelMap) { SampleResult result = longPollingService.getCollectSubscribleInfoByIp(ip); List<Connection> connectionsByIp = connectionManager.getConnectionByIp(ip); for (Connection connectionByIp : connectionsByIp) { Map<String, String> listenKeys = configChangeListenContext .getListenKeys(connectionByIp.getMetaInfo().getConnectionId()); if (listenKeys != null) { result.getLisentersGroupkeyStatus().putAll(listenKeys); } } return result; }
@Test void testGetSubClientConfigByIp() throws Exception { String ip = "127.0.0.1"; SampleResult result = new SampleResult(); result.setLisentersGroupkeyStatus(new HashMap<>()); when(longPollingService.getCollectSubscribleInfoByIp(ip)).thenReturn(result); ConnectionMeta connectionMeta = new ConnectionMeta(ip, ip, ip, 8888, 9848, "GRPC", "", "", new HashMap<>()); Connection connection = new GrpcConnection(connectionMeta, null, null); List<Connection> connectionList = new ArrayList<>(); connectionList.add(connection); when(connectionManager.getConnectionByIp(ip)).thenReturn(connectionList); Map<String, String> map = new HashMap<>(); map.put("test", "test"); when(configChangeListenContext.getListenKeys(ip)).thenReturn(map); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(Constants.COMMUNICATION_CONTROLLER_PATH + "/watcherConfigs") .param("ip", ip); String actualValue = mockMvc.perform(builder).andReturn().getResponse().getContentAsString(); assertEquals("{\"test\":\"test\"}", JacksonUtils.toObj(actualValue).get("lisentersGroupkeyStatus").toString()); }
public ConfigPayloadBuilder getObject(String name) { ConfigPayloadBuilder p = objectMap.get(name); if (p == null) { validateObject(name); p = new ConfigPayloadBuilder(getStructDef(name)); objectMap.put(name, p); } return p; }
@Test(expected=IllegalArgumentException.class) public void require_that_structs_must_exist() { builderWithDef.getObject("structdoesnotexist"); }
public static boolean parseBoolean(final String value) { return booleanStringMatches(value, true); }
@Test public void shouldParseYesAsTrue() { assertThat(SqlBooleans.parseBoolean("YeS"), is(true)); assertThat(SqlBooleans.parseBoolean("yE"), is(true)); assertThat(SqlBooleans.parseBoolean("Y"), is(true)); }
public static int checkLessThanOrEqual(int n, long expected, String name) { if (n > expected) { throw new IllegalArgumentException(name + ": " + n + " (expected: <= " + expected + ')'); } return n; }
@Test(expected = IllegalArgumentException.class) public void checkLessThanOrEqualMustFailIfArgumentIsGreaterThanExpected() { RangeUtil.checkLessThanOrEqual(1, 0, "var"); }
@Override public AppResponse process(Flow flow, ReplaceApplicationRequest request) { if (appSession.getRegistrationId() == null) { return new NokResponse(); } Map<String, String> result = digidClient.replaceExistingApplication(appSession.getRegistrationId(), request.isReplaceApplication()); if (result.get(lowerUnderscore(STATUS)).equals("OK")) { return new OkResponse(); } else { return new NokResponse(); } }
@Test void processNOKMissingRegistrationTest(){ replaceExistingApplication.getAppSession().setRegistrationId(null); AppResponse appResponse = replaceExistingApplication.process(flowMock, null); assertTrue(appResponse instanceof NokResponse); assertEquals("NOK", ((NokResponse) appResponse).getStatus()); }
public static void notNull(Object object) { notNull(object, "object is null"); }
@Test public void testNotNull2() { assertThrows(IllegalArgumentException.class, () -> Precondition.notNull(null)); }
public ProcessContinuation run( PartitionRecord partitionRecord, RestrictionTracker<StreamProgress, StreamProgress> tracker, OutputReceiver<KV<ByteString, ChangeStreamRecord>> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator) throws IOException { BytesThroughputEstimator<KV<ByteString, ChangeStreamRecord>> throughputEstimator = new BytesThroughputEstimator<>(sizeEstimator, Instant.now()); // Lock the partition if (tracker.currentRestriction().isEmpty()) { boolean lockedPartition = metadataTableDao.lockAndRecordPartition(partitionRecord); // Clean up NewPartition on the first run regardless of locking result. If locking fails it // means this partition is being streamed, then cleaning up NewPartitions avoids lingering // NewPartitions. for (NewPartition newPartition : partitionRecord.getParentPartitions()) { metadataTableDao.deleteNewPartition(newPartition); } if (!lockedPartition) { LOG.info( "RCSP {} : Could not acquire lock with uid: {}, because this is a " + "duplicate and another worker is working on this partition already.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } } else if (tracker.currentRestriction().getCloseStream() == null && !metadataTableDao.doHoldLock( partitionRecord.getPartition(), partitionRecord.getUuid())) { // We only verify the lock if we are not holding CloseStream because if this is a retry of // CloseStream we might have already cleaned up the lock in a previous attempt. // Failed correctness check on this worker holds the lock on this partition. This shouldn't // fail because there's a restriction tracker which means this worker has already acquired the // lock and once it has acquired the lock it shouldn't fail the lock check. LOG.warn( "RCSP {} : Subsequent run that doesn't hold the lock {}. This is not unexpected and " + "should probably be reviewed.", formatByteStringRange(partitionRecord.getPartition()), partitionRecord.getUuid()); StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); metrics.decPartitionStreamCount(); tracker.tryClaim(streamProgress); return ProcessContinuation.stop(); } // Process CloseStream if it exists CloseStream closeStream = tracker.currentRestriction().getCloseStream(); if (closeStream != null) { LOG.debug("RCSP: Processing CloseStream"); metrics.decPartitionStreamCount(); if (closeStream.getStatus().getCode() == Status.Code.OK) { // We need to update watermark here. We're terminating this stream because we have reached // endTime. Instant.now is greater or equal to endTime. The goal here is // DNP will need to know this stream has passed the endTime so DNP can eventually terminate. Instant terminatingWatermark = Instant.ofEpochMilli(Long.MAX_VALUE); Instant endTime = partitionRecord.getEndTime(); if (endTime != null) { terminatingWatermark = endTime; } watermarkEstimator.setWatermark(terminatingWatermark); metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.currentWatermark(), null); LOG.info( "RCSP {}: Reached end time, terminating...", formatByteStringRange(partitionRecord.getPartition())); return ProcessContinuation.stop(); } if (closeStream.getStatus().getCode() != Status.Code.OUT_OF_RANGE) { LOG.error( "RCSP {}: Reached unexpected terminal state: {}", formatByteStringRange(partitionRecord.getPartition()), closeStream.getStatus()); return ProcessContinuation.stop(); } // Release the lock only if the uuid matches. In normal operation this doesn't change // anything. However, it's possible for this RCSP to crash while processing CloseStream but // after the side effects of writing the new partitions to the metadata table. New partitions // can be created while this RCSP restarts from the previous checkpoint and processes the // CloseStream again. In certain race scenarios the child partitions may merge back to this // partition, but as a new RCSP. The new partition (same as this partition) would write the // exact same content to the metadata table but with a different uuid. We don't want to // accidentally delete the StreamPartition because it now belongs to the new RCSP. // If the uuid is the same (meaning this race scenario did not take place) we release the lock // and mark the StreamPartition to be deleted, so we can delete it after we have written the // NewPartitions. metadataTableDao.releaseStreamPartitionLockForDeletion( partitionRecord.getPartition(), partitionRecord.getUuid()); // The partitions in the continuation tokens must cover the same key space as this partition. // If there's only 1 token, then the token's partition is equals to this partition. // If there are more than 1 tokens, then the tokens form a continuous row range equals to this // partition. List<ByteStringRange> childPartitions = new ArrayList<>(); List<ByteStringRange> tokenPartitions = new ArrayList<>(); // Check if NewPartitions field exists, if not we default to using just the // ChangeStreamContinuationTokens. boolean useNewPartitionsField = closeStream.getNewPartitions().size() == closeStream.getChangeStreamContinuationTokens().size(); for (int i = 0; i < closeStream.getChangeStreamContinuationTokens().size(); i++) { ByteStringRange childPartition; if (useNewPartitionsField) { childPartition = closeStream.getNewPartitions().get(i); } else { childPartition = closeStream.getChangeStreamContinuationTokens().get(i).getPartition(); } childPartitions.add(childPartition); ChangeStreamContinuationToken token = getTokenWithCorrectPartition( partitionRecord.getPartition(), closeStream.getChangeStreamContinuationTokens().get(i)); tokenPartitions.add(token.getPartition()); metadataTableDao.writeNewPartition( new NewPartition( childPartition, Collections.singletonList(token), watermarkEstimator.getState())); } LOG.info( "RCSP {}: Split/Merge into {}", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(childPartitions)); if (!coverSameKeySpace(tokenPartitions, partitionRecord.getPartition())) { LOG.warn( "RCSP {}: CloseStream has tokens {} that don't cover the entire keyspace", formatByteStringRange(partitionRecord.getPartition()), partitionsToString(tokenPartitions)); } // Perform the real cleanup. This step is no op if the race mentioned above occurs (splits and // merges results back to this partition again) because when we register the "new" partition, // we unset the deletion bit. metadataTableDao.deleteStreamPartitionRow(partitionRecord.getPartition()); return ProcessContinuation.stop(); } // Update the metadata table with the watermark metadataTableDao.updateWatermark( partitionRecord.getPartition(), watermarkEstimator.getState(), tracker.currentRestriction().getCurrentToken()); // Start to stream the partition. ServerStream<ChangeStreamRecord> stream = null; try { stream = changeStreamDao.readChangeStreamPartition( partitionRecord, tracker.currentRestriction(), partitionRecord.getEndTime(), heartbeatDuration); for (ChangeStreamRecord record : stream) { Optional<ProcessContinuation> result = changeStreamAction.run( partitionRecord, record, tracker, receiver, watermarkEstimator, throughputEstimator); // changeStreamAction will usually return Optional.empty() except for when a checkpoint // (either runner or pipeline initiated) is required. if (result.isPresent()) { return result.get(); } } } catch (Exception e) { throw e; } finally { if (stream != null) { stream.cancel(); } } return ProcessContinuation.resume(); }
@Test public void testLockingRowNotNeededAfterFirstRunNotSame() throws IOException { when(metadataTableDao.lockAndRecordPartition(partitionRecord)).thenReturn(false); // After the first run, we check if the lock is the same, but in this case it's not. when(restriction.isEmpty()).thenReturn(false); when(metadataTableDao.doHoldLock(partition, uuid)).thenReturn(false); final DoFn.ProcessContinuation result = action.run(partitionRecord, tracker, receiver, watermarkEstimator); assertEquals(DoFn.ProcessContinuation.stop(), result); // On failure to lock, we try to claim a fail to lock, so it will terminate gracefully. StreamProgress streamProgress = new StreamProgress(); streamProgress.setFailToLock(true); verify(tracker).tryClaim(streamProgress); verify(changeStreamAction, never()).run(any(), any(), any(), any(), any(), any()); }
@Override public Iterable<K> loadAllKeys() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void loadAllKeys() { cacheStore.loadAllKeys(); }
public static NewTopicBuilder defineTopic(String topicName) { return new NewTopicBuilder(topicName); }
@Test public void shouldCreateTopicWithReplicationFactorWhenItDoesNotExist() { for (int numBrokers = 1; numBrokers < 10; ++numBrokers) { int maxRf = Math.min(numBrokers, 5); int maxDefaultRf = Math.min(numBrokers, 5); for (short rf = 1; rf < maxRf; ++rf) { NewTopic newTopic = TopicAdmin.defineTopic("myTopic").replicationFactor(rf).compacted().build(); // Try clusters with no default replication factor or default partitions assertTopicCreation(numBrokers, newTopic, null, null, rf, 1); // Try clusters with different default partitions for (int numPartitions = 1; numPartitions < 30; ++numPartitions) { assertTopicCreation(numBrokers, newTopic, numPartitions, null, rf, numPartitions); } // Try clusters with different default replication factors for (int defaultRF = 1; defaultRF < maxDefaultRf; ++defaultRF) { assertTopicCreation(numBrokers, newTopic, null, defaultRF, rf, 1); } } } }
public void checkData(final Resilience4JHandle resilience4JHandle) { resilience4JHandle.setTimeoutDurationRate(resilience4JHandle.getTimeoutDurationRate() < 0 ? Constants.TIMEOUT_DURATION_RATE : resilience4JHandle.getTimeoutDurationRate()); resilience4JHandle.setLimitRefreshPeriod(resilience4JHandle.getLimitRefreshPeriod() < 0 ? Constants.LIMIT_REFRESH_PERIOD : resilience4JHandle.getLimitRefreshPeriod()); resilience4JHandle.setLimitForPeriod(resilience4JHandle.getLimitForPeriod() < 0 ? Constants.LIMIT_FOR_PERIOD : resilience4JHandle.getLimitForPeriod()); resilience4JHandle.setCircuitEnable(resilience4JHandle.getCircuitEnable() != Constants.CIRCUIT_ENABLE ? Constants.CIRCUIT_DISABLE : Constants.CIRCUIT_ENABLE); resilience4JHandle.setTimeoutDuration(resilience4JHandle.getTimeoutDuration() < 0 ? Constants.TIMEOUT_DURATION : resilience4JHandle.getTimeoutDuration()); resilience4JHandle.setFallbackUri(!"0".equals(resilience4JHandle.getFallbackUri()) ? resilience4JHandle.getFallbackUri() : ""); resilience4JHandle.setSlidingWindowSize(resilience4JHandle.getSlidingWindowSize() < 0 ? Constants.SLIDING_WINDOW_SIZE : resilience4JHandle.getSlidingWindowSize()); resilience4JHandle.setSlidingWindowType(resilience4JHandle.getSlidingWindowType() < 0 ? Constants.SLIDING_WINDOW_TYPE : resilience4JHandle.getSlidingWindowType()); resilience4JHandle.setMinimumNumberOfCalls(resilience4JHandle.getMinimumNumberOfCalls() < 0 ? Constants.MINIMUM_NUMBER_OF_CALLS : resilience4JHandle.getMinimumNumberOfCalls()); resilience4JHandle.setWaitIntervalFunctionInOpenState(resilience4JHandle.getWaitIntervalFunctionInOpenState() < 0 ? Constants.WAIT_INTERVAL_FUNCTION_IN_OPEN_STATE : resilience4JHandle.getWaitIntervalFunctionInOpenState()); resilience4JHandle.setPermittedNumberOfCallsInHalfOpenState(resilience4JHandle.getPermittedNumberOfCallsInHalfOpenState() < 0 ? Constants.PERMITTED_NUMBER_OF_CALLS_IN_HALF_OPEN_STATE : resilience4JHandle.getPermittedNumberOfCallsInHalfOpenState()); resilience4JHandle.setFailureRateThreshold( resilience4JHandle.getFailureRateThreshold() < 0 || resilience4JHandle.getFailureRateThreshold() > 100 ? Constants.FAILURE_RATE_THRESHOLD : resilience4JHandle.getFailureRateThreshold()); }
@Test public void testCheckData() { Resilience4JHandle handle = new Resilience4JHandle(); handle.setTimeoutDurationRate(-1); handle.setLimitRefreshPeriod(-2); handle.setLimitForPeriod(-3); handle.setCircuitEnable(-4); handle.setTimeoutDuration(-5); handle.setFallbackUri("uri"); handle.setSlidingWindowSize(-6); handle.setSlidingWindowType(-7); handle.setMinimumNumberOfCalls(-8); handle.setWaitIntervalFunctionInOpenState(-9); handle.setPermittedNumberOfCallsInHalfOpenState(-10); handle.setFailureRateThreshold(-11); handle.checkData(handle); assertThat(handle.getTimeoutDurationRate(), is(Constants.TIMEOUT_DURATION_RATE)); assertThat(handle.getLimitRefreshPeriod(), is(Constants.LIMIT_REFRESH_PERIOD)); assertThat(handle.getLimitForPeriod(), is(Constants.LIMIT_FOR_PERIOD)); assertThat(handle.getCircuitEnable(), is(Constants.CIRCUIT_DISABLE)); assertThat(handle.getTimeoutDuration(), is(Constants.TIMEOUT_DURATION)); assertThat(handle.getFallbackUri(), is("uri")); assertThat(handle.getSlidingWindowSize(), is(Constants.SLIDING_WINDOW_SIZE)); assertThat(handle.getSlidingWindowType(), is(Constants.SLIDING_WINDOW_TYPE)); assertThat(handle.getMinimumNumberOfCalls(), is(Constants.MINIMUM_NUMBER_OF_CALLS)); assertThat(handle.getWaitIntervalFunctionInOpenState(), is(Constants.WAIT_INTERVAL_FUNCTION_IN_OPEN_STATE)); assertThat(handle.getPermittedNumberOfCallsInHalfOpenState(), is(Constants.PERMITTED_NUMBER_OF_CALLS_IN_HALF_OPEN_STATE)); assertThat(handle.getFailureRateThreshold(), is(Constants.FAILURE_RATE_THRESHOLD)); }
public static ConnectedComponents findComponentsRecursive(Graph graph, EdgeFilter edgeFilter, boolean excludeSingleNodeComponents) { return new TarjanSCC(graph, edgeFilter, excludeSingleNodeComponents).findComponentsRecursive(); }
@Test public void testTarjan_issue761() { // 11-10-9 // | | // 0-1-2->3->4->5 // | | // 6 12 // | | // 7 13-14 // | \| // 8 15-16 // oneway main road graph.edge(0, 1).setDistance(1).set(speedEnc, 10, 10); graph.edge(1, 2).setDistance(1).set(speedEnc, 10, 10); graph.edge(2, 3).setDistance(1).set(speedEnc, 10, 0); graph.edge(3, 4).setDistance(1).set(speedEnc, 10, 0); graph.edge(4, 5).setDistance(1).set(speedEnc, 10, 0); // going south from main road graph.edge(3, 6).setDistance(1).set(speedEnc, 10, 10); graph.edge(6, 7).setDistance(1).set(speedEnc, 10, 10); graph.edge(7, 8).setDistance(1).set(speedEnc, 10, 10); // connects the two nodes 2 and 4 graph.edge(4, 9).setDistance(1).set(speedEnc, 10, 10); graph.edge(9, 10).setDistance(1).set(speedEnc, 10, 10); graph.edge(10, 11).setDistance(1).set(speedEnc, 10, 10); graph.edge(11, 2).setDistance(1).set(speedEnc, 10, 10); // eastern part (only connected by a single directed edge to the rest of the graph) graph.edge(5, 12).setDistance(1).set(speedEnc, 10, 10); graph.edge(12, 13).setDistance(1).set(speedEnc, 10, 10); graph.edge(13, 14).setDistance(1).set(speedEnc, 10, 10); graph.edge(14, 15).setDistance(1).set(speedEnc, 10, 10); graph.edge(15, 13).setDistance(1).set(speedEnc, 10, 10); graph.edge(15, 16).setDistance(1).set(speedEnc, 10, 10); TarjanSCC.ConnectedComponents scc = TarjanSCC.findComponentsRecursive(graph, edgeFilter, false); assertEquals(2, scc.getTotalComponents()); assertTrue(scc.getSingleNodeComponents().isEmpty()); assertEquals(17, scc.getNodes()); assertEquals(scc.getComponents().get(1), scc.getBiggestComponent()); assertEquals(2, scc.getComponents().size()); assertEquals(IntArrayList.from(14, 16, 15, 13, 12, 5), scc.getComponents().get(0)); assertEquals(IntArrayList.from(8, 7, 6, 3, 4, 9, 10, 11, 2, 1, 0), scc.getComponents().get(1)); }
@Override public void run() { try { // make sure we call afterRun() even on crashes // and operate countdown latches, else we may hang the parallel runner if (steps == null) { beforeRun(); } if (skipped) { return; } int count = steps.size(); int index = 0; while ((index = nextStepIndex()) < count) { currentStep = steps.get(index); execute(currentStep); if (currentStepResult != null) { // can be null if debug step-back or hook skip result.addStepResult(currentStepResult); } } } catch (Exception e) { if (currentStepResult != null) { result.addStepResult(currentStepResult); } logError("scenario [run] failed\n" + StringUtils.throwableToString(e)); currentStepResult = result.addFakeStepResult("scenario [run] failed", e); } finally { if (!skipped) { afterRun(); if (isFailed() && engine.getConfig().isAbortSuiteOnFailure()) { featureRuntime.suite.abort(); } } if (caller.isNone()) { logAppender.close(); // reclaim memory } } }
@Test void testFilter() { run( "def foo = [{ a: 0 }, { a: 1 }, { a: 2 }]", "def res = karate.filter(foo, x => x.a > 0)" ); matchVar("res", "[{ a: 1 }, { a: 2 }]"); }
@Override public void execute(CommandLine commandLine, Options options, RPCHook rpcHook) throws SubCommandException { DefaultMQAdminExt defaultMQAdminExt = new DefaultMQAdminExt(rpcHook); defaultMQAdminExt.setInstanceName(Long.toString(System.currentTimeMillis())); if (commandLine.hasOption('n')) { defaultMQAdminExt.setNamesrvAddr(commandLine.getOptionValue('n').trim()); } try { boolean result = false; defaultMQAdminExt.start(); if (commandLine.hasOption('b')) { String addr = commandLine.getOptionValue('b').trim(); result = defaultMQAdminExt.deleteExpiredCommitLogByAddr(addr); } else { String cluster = commandLine.getOptionValue('c'); if (null != cluster) cluster = cluster.trim(); result = defaultMQAdminExt.deleteExpiredCommitLog(cluster); } System.out.printf(result ? "success" : "false"); } catch (Exception e) { throw new SubCommandException(this.getClass().getSimpleName() + " command execute failed.", e); } finally { defaultMQAdminExt.shutdown(); } }
@Test public void testExecute() throws SubCommandException { DeleteExpiredCommitLogSubCommand cmd = new DeleteExpiredCommitLogSubCommand(); Options options = ServerUtil.buildCommandlineOptions(new Options()); String[] subargs = new String[] {"-b 127.0.0.1:" + listenPort(), "-c default-cluster"}; final CommandLine commandLine = ServerUtil.parseCmdLine("mqadmin " + cmd.commandName(), subargs, cmd.buildCommandlineOptions(options), new DefaultParser()); cmd.execute(commandLine, options, null); Assert.assertTrue(outContent.toString().contains("success")); Assert.assertEquals("", errContent.toString()); }
public OpenTelemetry getOpenTelemetry() { return openTelemetrySdkReference.get(); }
@Test public void testIsInstrumentationNameSetOnMeter() { var meter = openTelemetryService.getOpenTelemetry().getMeter("testInstrumentationScope"); meter.counterBuilder("dummyCounter").build().add(1); assertThat(reader.collectAllMetrics()) .anySatisfy(metricData -> assertThat(metricData) .hasInstrumentationScope(InstrumentationScopeInfo.create("testInstrumentationScope"))); }
public static Object convertValue(String className, Object cleanValue, ClassLoader classLoader) { // "null" string is converted to null cleanValue = "null".equals(cleanValue) ? null : cleanValue; if (!isPrimitive(className) && cleanValue == null) { return null; } Class<?> clazz = loadClass(className, classLoader); // if it is not a String, it has to be an instance of the desired type if (!(cleanValue instanceof String)) { if (clazz.isInstance(cleanValue)) { return cleanValue; } throw new IllegalArgumentException(new StringBuilder().append("Object ").append(cleanValue) .append(" is not a String or an instance of ").append(className).toString()); } String value = (String) cleanValue; try { if (clazz.isAssignableFrom(String.class)) { return value; } else if (clazz.isAssignableFrom(BigDecimal.class)) { return parseBigDecimal(value); } else if (clazz.isAssignableFrom(BigInteger.class)) { return parseBigInteger(value); } else if (clazz.isAssignableFrom(Boolean.class) || clazz.isAssignableFrom(boolean.class)) { return parseBoolean(value); } else if (clazz.isAssignableFrom(Byte.class) || clazz.isAssignableFrom(byte.class)) { return Byte.parseByte(value); } else if (clazz.isAssignableFrom(Character.class) || clazz.isAssignableFrom(char.class)) { return parseChar(value); } else if (clazz.isAssignableFrom(Double.class) || clazz.isAssignableFrom(double.class)) { return Double.parseDouble(cleanStringForNumberParsing(value)); } else if (clazz.isAssignableFrom(Float.class) || clazz.isAssignableFrom(float.class)) { return Float.parseFloat(cleanStringForNumberParsing(value)); } else if (clazz.isAssignableFrom(Integer.class) || clazz.isAssignableFrom(int.class)) { return Integer.parseInt(cleanStringForNumberParsing(value)); } else if (clazz.isAssignableFrom(LocalDate.class)) { return LocalDate.parse(value, DateTimeFormatter.ISO_LOCAL_DATE); } else if (clazz.isAssignableFrom(LocalDateTime.class)) { return LocalDateTime.parse(value, DateTimeFormatter.ISO_LOCAL_DATE_TIME); } else if (clazz.isAssignableFrom(LocalTime.class)) { return LocalTime.parse(value, DateTimeFormatter.ISO_LOCAL_TIME); } else if (clazz.isAssignableFrom(Long.class) || clazz.isAssignableFrom(long.class)) { return Long.parseLong(cleanStringForNumberParsing(value)); } else if (clazz.isAssignableFrom(Short.class) || clazz.isAssignableFrom(short.class)) { return Short.parseShort(cleanStringForNumberParsing(value)); } else if (Enum.class.isAssignableFrom(clazz)) { return Enum.valueOf(((Class<? extends Enum>) clazz), value); } } catch (RuntimeException e) { throw new IllegalArgumentException(new StringBuilder().append("Impossible to parse '") .append(value).append("' as ").append(className).append(" [") .append(e.getMessage()).append("]").toString()); } throw new IllegalArgumentException(new StringBuilder().append("Class ").append(className) .append(" is not natively supported. Please use an MVEL expression" + " to use it.").toString()); }
@Test public void convertValueEnumWrongValue() { String enumTestCanonicalName = EnumTest.class.getCanonicalName(); assertThatThrownBy(() -> convertValue(EnumTest.class.getCanonicalName(), "FIRS", classLoader)) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith("Impossible to parse 'FIRS' as " + enumTestCanonicalName); }
List<Endpoint> endpoints() { try { String urlString = String.format("%s/api/v1/namespaces/%s/pods", kubernetesMaster, namespace); return enrichWithPublicAddresses(parsePodsList(callGet(urlString))); } catch (RestClientException e) { return handleKnownException(e); } }
@Test public void endpointsByNamespaceWithLoadBalancerHostname() throws JsonProcessingException { // given stub(String.format("/api/v1/namespaces/%s/pods", NAMESPACE), podsListResponse()); stub(String.format("/api/v1/namespaces/%s/endpoints", NAMESPACE), endpointsListResponse()); stub(String.format("/api/v1/namespaces/%s/services/hazelcast-0", NAMESPACE), serviceLbHost(servicePort(32123, 5701, 31916), "abc.hostname")); stub(String.format("/api/v1/namespaces/%s/services/service-1", NAMESPACE), serviceLbHost(servicePort(32124, 5701, 31916), "abc2.hostname")); stub(String.format("/api/v1/namespaces/%s/pods/hazelcast-0", NAMESPACE), pod("hazelcast-0", NAMESPACE, "node-name-1", 5701)); stub(String.format("/api/v1/namespaces/%s/pods/hazelcast-1", NAMESPACE), pod("hazelcast-1", NAMESPACE, "node-name-2", 5701)); // when List<Endpoint> result = kubernetesClient.endpoints(); // then assertThat(formatPrivate(result)).containsExactlyInAnyOrder(ready("192.168.0.25", 5701), ready("172.17.0.5", 5702)); assertThat(formatPublic(result)).containsExactlyInAnyOrder(ready("abc.hostname", 32123), ready("abc2.hostname", 32124)); }
public static ShardingRouteEngine newInstance(final ShardingRule shardingRule, final ShardingSphereDatabase database, final QueryContext queryContext, final ShardingConditions shardingConditions, final ConfigurationProperties props, final ConnectionContext connectionContext) { SQLStatementContext sqlStatementContext = queryContext.getSqlStatementContext(); SQLStatement sqlStatement = sqlStatementContext.getSqlStatement(); if (sqlStatement instanceof TCLStatement) { return new ShardingDatabaseBroadcastRoutingEngine(); } if (sqlStatement instanceof DDLStatement) { if (sqlStatementContext instanceof CursorAvailable) { return getCursorRouteEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props); } return getDDLRoutingEngine(shardingRule, database, sqlStatementContext); } if (sqlStatement instanceof DALStatement) { return getDALRoutingEngine(shardingRule, database, sqlStatementContext, connectionContext); } if (sqlStatement instanceof DCLStatement) { return getDCLRoutingEngine(shardingRule, database, sqlStatementContext); } return getDQLRoutingEngine(shardingRule, database, sqlStatementContext, queryContext.getHintValueContext(), shardingConditions, props, connectionContext); }
@Test void assertNewInstanceForSelectWithoutSingleTable() { SQLStatement sqlStatement = mock(MySQLSelectStatement.class); when(sqlStatementContext.getSqlStatement()).thenReturn(sqlStatement); QueryContext queryContext = new QueryContext(sqlStatementContext, "", Collections.emptyList(), new HintValueContext(), mockConnectionContext(), mock(ShardingSphereMetaData.class)); ShardingRouteEngine actual = ShardingRouteEngineFactory.newInstance(shardingRule, database, queryContext, shardingConditions, props, new ConnectionContext(Collections::emptySet)); assertThat(actual, instanceOf(ShardingUnicastRoutingEngine.class)); }
@Override public void open() { super.open(); for (String propertyKey : properties.stringPropertyNames()) { LOGGER.debug("propertyKey: {}", propertyKey); String[] keyValue = propertyKey.split("\\.", 2); if (2 == keyValue.length) { LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]); Properties prefixProperties; if (basePropertiesMap.containsKey(keyValue[0])) { prefixProperties = basePropertiesMap.get(keyValue[0]); } else { prefixProperties = new Properties(); basePropertiesMap.put(keyValue[0].trim(), prefixProperties); } prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey)); } } Set<String> removeKeySet = new HashSet<>(); for (String key : basePropertiesMap.keySet()) { if (!COMMON_KEY.equals(key)) { Properties properties = basePropertiesMap.get(key); if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) { LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.", key, DRIVER_KEY, key, key, URL_KEY); removeKeySet.add(key); } } } for (String key : removeKeySet) { basePropertiesMap.remove(key); } LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap); setMaxLineResults(); setMaxRows(); //TODO(zjffdu) Set different sql splitter for different sql dialects. this.sqlSplitter = new SqlSplitter(); }
@Test void testDefaultSplitQuries() throws IOException, InterpreterException { Properties properties = new Properties(); properties.setProperty("common.max_count", "1000"); properties.setProperty("common.max_retry", "3"); properties.setProperty("default.driver", "org.h2.Driver"); properties.setProperty("default.url", getJdbcConnection()); properties.setProperty("default.user", ""); properties.setProperty("default.password", ""); JDBCInterpreter t = new JDBCInterpreter(properties); t.open(); String sqlQuery = "select * from test_table;" + "select * from test_table WHERE ID = ';';"; InterpreterResult interpreterResult = t.interpret(sqlQuery, context); assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code()); List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage(); assertEquals(2, resultMessages.size()); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType()); assertEquals("ID\tNAME\na\ta_name\nb\tb_name\nc\tnull\n", resultMessages.get(0).getData()); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(1).getType()); assertEquals("ID\tNAME\n", resultMessages.get(1).getData()); }
public static float[] toFloatArray(FloatArrayList floatArrayList) { float[] floatArrayListElements = floatArrayList.elements(); return floatArrayListElements.length == floatArrayList.size() ? floatArrayListElements : floatArrayList.toFloatArray(); }
@Test public void testToFloatArray() { // Test empty list FloatArrayList floatArrayList = new FloatArrayList(); float[] floatArray = ArrayListUtils.toFloatArray(floatArrayList); assertEquals(floatArray.length, 0); // Test list with one element floatArrayList.add(1.0f); floatArray = ArrayListUtils.toFloatArray(floatArrayList); assertEquals(floatArray.length, 1); assertEquals(floatArray[0], 1.0f); // Test list with multiple elements floatArrayList.add(2.0f); floatArrayList.add(3.0f); floatArray = ArrayListUtils.toFloatArray(floatArrayList); assertEquals(floatArray.length, 3); assertEquals(floatArray[0], 1.0f); assertEquals(floatArray[1], 2.0f); assertEquals(floatArray[2], 3.0f); }
public static KeyValueBytesStoreSupplier lruMap(final String name, final int maxCacheSize) { Objects.requireNonNull(name, "name cannot be null"); if (maxCacheSize < 0) { throw new IllegalArgumentException("maxCacheSize cannot be negative"); } return new KeyValueBytesStoreSupplier() { @Override public String name() { return name; } @Override public KeyValueStore<Bytes, byte[]> get() { return new MemoryNavigableLRUCache(name, maxCacheSize); } @Override public String metricsScope() { return "in-memory-lru"; } }; }
@Test public void shouldCreateMemoryNavigableCache() { assertThat(Stores.lruMap("map", 10).get(), instanceOf(MemoryNavigableLRUCache.class)); }
@TpsControl(pointName = "ClusterConfigChangeNotify") @Override @ExtractorManager.Extractor(rpcExtractor = ConfigRequestParamExtractor.class) public ConfigChangeClusterSyncResponse handle(ConfigChangeClusterSyncRequest configChangeSyncRequest, RequestMeta meta) throws NacosException { DumpRequest dumpRequest = DumpRequest.create(configChangeSyncRequest.getDataId(), configChangeSyncRequest.getGroup(), configChangeSyncRequest.getTenant(), configChangeSyncRequest.getLastModified(), meta.getClientIp()); dumpRequest.setBeta(configChangeSyncRequest.isBeta()); dumpRequest.setBatch(configChangeSyncRequest.isBatch()); dumpRequest.setTag(configChangeSyncRequest.getTag()); dumpService.dump(dumpRequest); return new ConfigChangeClusterSyncResponse(); }
@Test void testHandle() throws NacosException { ConfigChangeClusterSyncRequest configChangeSyncRequest = new ConfigChangeClusterSyncRequest(); configChangeSyncRequest.setRequestId(""); configChangeSyncRequest.setDataId("dataId"); configChangeSyncRequest.setTag("tag"); configChangeSyncRequest.setLastModified(1L); configChangeSyncRequest.setBeta(false); RequestMeta meta = new RequestMeta(); meta.setClientIp("1.1.1.1"); ConfigChangeClusterSyncResponse configChangeClusterSyncResponse = configChangeClusterSyncRequestHandler.handle( configChangeSyncRequest, meta); assertEquals(configChangeClusterSyncResponse.getResultCode(), ResponseCode.SUCCESS.getCode()); }
@Override protected int command() { if (!validateConfigFilePresent()) { return 1; } final MigrationConfig config; try { config = MigrationConfig.load(getConfigFile()); } catch (KsqlException | MigrationException e) { LOGGER.error(e.getMessage()); return 1; } return command(config, MigrationsUtil::getKsqlClient); }
@Test public void shouldSucceedIfNeverInitialized() throws Exception { // Given: givenMigrationsStreamDoesNotExist(); givenMigrationsTableDoesNotExist(); // When: final int status = command.command(config, cfg -> client); // Then: assertThat(status, is(0)); }
@Override public void accept(ICOSVisitor visitor) throws IOException { visitor.visitFromString(this); }
@Override @Test void testAccept() throws IOException { ByteArrayOutputStream outStream = new ByteArrayOutputStream(); ICOSVisitor visitor = new COSWriter(outStream); COSString testSubj = new COSString(ESC_CHAR_STRING); testSubj.accept(visitor); assertEquals("(" + ESC_CHAR_STRING_PDF_FORMAT + ")", outStream.toString()); outStream.reset(); COSString testSubjHex = new COSString(ESC_CHAR_STRING, true); testSubjHex.accept(visitor); assertEquals("<" + createHex(ESC_CHAR_STRING) + ">", outStream.toString()); }
public static float norm2(float[] x) { float norm = 0.0F; for (float n : x) { norm += n * n; } norm = (float) sqrt(norm); return norm; }
@Test public void testNorm2_doubleArr() { System.out.println("norm2"); double[] x = {-2.1968219, -0.9559913, -0.0431738, 1.0567679, 0.3853515}; assertEquals(2.647086, MathEx.norm2(x), 1E-6); }
public void updateTopicConfig(final TopicConfig topicConfig) { updateSingleTopicConfigWithoutPersist(topicConfig); this.persist(topicConfig.getTopicName(), topicConfig); }
@Test public void testAddUnsupportedKeyOnCreating() { String unsupportedKey = "key4"; supportAttributes(asList( new EnumAttribute("enum.key", true, newHashSet("enum-1", "enum-2", "enum-3"), "enum-1"), new BooleanAttribute("bool.key", false, false), new LongRangeAttribute("long.range.key", true, 10, 20, 15) )); Map<String, String> attributes = new HashMap<>(); attributes.put("+enum.key", "enum-2"); attributes.put("+" + unsupportedKey, "value1"); TopicConfig topicConfig = new TopicConfig(); topicConfig.setTopicName("new-topic"); topicConfig.setAttributes(attributes); RuntimeException runtimeException = Assert.assertThrows(RuntimeException.class, () -> topicConfigManager.updateTopicConfig(topicConfig)); Assert.assertEquals("unsupported key: " + unsupportedKey, runtimeException.getMessage()); }
public static DataSchema canonicalizeDataSchemaForGroupBy(QueryContext queryContext, DataSchema dataSchema) { List<ExpressionContext> groupByExpressions = queryContext.getGroupByExpressions(); List<Pair<AggregationFunction, FilterContext>> filteredAggregationFunctions = queryContext.getFilteredAggregationFunctions(); assert groupByExpressions != null && filteredAggregationFunctions != null; int numGroupByExpression = groupByExpressions.size(); int numAggregations = filteredAggregationFunctions.size(); int numColumns = numGroupByExpression + numAggregations; String[] columnNames = new String[numColumns]; Preconditions.checkState(dataSchema.size() == numColumns, "BUG: Expect same number of group-by expressions, aggregations and columns in data schema, got %s group-by " + "expressions, %s aggregations, %s columns in data schema", numGroupByExpression, numAggregations, dataSchema.size()); for (int i = 0; i < numGroupByExpression; i++) { columnNames[i] = groupByExpressions.get(i).toString(); } for (int i = 0; i < numAggregations; i++) { Pair<AggregationFunction, FilterContext> pair = filteredAggregationFunctions.get(i); columnNames[numGroupByExpression + i] = AggregationFunctionUtils.getResultColumnName(pair.getLeft(), pair.getRight()); } return new DataSchema(columnNames, dataSchema.getColumnDataTypes()); }
@Test public void testCanonicalizeDataSchemaForGroupBy() { QueryContext queryContext = QueryContextConverterUtils.getQueryContext( "SELECT SUM(col1 + col2) FROM testTable GROUP BY col3 + col4 ORDER BY col3 + col4"); // Intentionally make data schema not matching the string representation of the expression DataSchema dataSchema = new DataSchema(new String[]{"add(col3+col4)", "sum(col1+col2)"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); DataSchema canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForGroupBy(queryContext, dataSchema); assertEquals(canonicalDataSchema, new DataSchema(new String[]{"plus(col3,col4)", "sum(plus(col1,col2))"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE})); queryContext = QueryContextConverterUtils.getQueryContext( "SELECT SUM(col1 + 1), MIN(col2 + 2), col4 FROM testTable GROUP BY col3, col4"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"col3", "col4", "sum(col1+1)", "min(col2+2)"}, new ColumnDataType[]{ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForGroupBy(queryContext, dataSchema); assertEquals(canonicalDataSchema, new DataSchema(new String[]{"col3", "col4", "sum(plus(col1,'1'))", "min(plus(col2,'2'))"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.LONG, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE })); queryContext = QueryContextConverterUtils.getQueryContext( "SELECT col3 + col4, MAX(col1 + 1) FILTER(WHERE col3 > 0) - MIN(col2 + 2) FILTER(WHERE col4 > 0) FROM " + "testTable GROUP BY col3 + col4"); // Intentionally make data schema not matching the string representation of the expression dataSchema = new DataSchema(new String[]{"add(col3+col4)", "max(col1+1)", "min(col2+2)"}, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE}); canonicalDataSchema = ReducerDataSchemaUtils.canonicalizeDataSchemaForGroupBy(queryContext, dataSchema); assertEquals(canonicalDataSchema, new DataSchema(new String[]{ "plus(col3,col4)", "max(plus(col1,'1')) FILTER(WHERE col3 > '0')", "min(plus(col2,'2')) FILTER" + "(WHERE col4 > '0')" }, new ColumnDataType[]{ColumnDataType.DOUBLE, ColumnDataType.DOUBLE, ColumnDataType.DOUBLE})); }
public static String validateColumnName(@Nullable String columnName) { String name = requireNonNull(columnName, "Column name cannot be null"); checkDbIdentifierCharacters(columnName, "Column name"); return name; }
@Test public void accept_allowed_identifier_for_column_name_that_is_SQL_reserved_keyword() { assertThatCode(() -> validateColumnName("value")) .doesNotThrowAnyException(); }
public V put(K key, V value) { return resolve(map.put(key, new WeakReference<>(value))); }
@Test public void testDemandCreateEntries() { // ask for an unknown key and expect a generated value assertMapEntryEquals(1, factory(1)); assertMapSize(1); assertMapContainsKey(1); assertLostCount(0); // an empty ref has the same outcome referenceMap.put(2, null); assertMapEntryEquals(2, factory(2)); // but the lost coun goes up assertLostCount(1); }
String maybeTruncated(String note) throws UnsupportedOperationException { byte[] stringBytes = note.getBytes(UTF_8); if (note.length() != stringBytes.length) { throw new UnsupportedOperationException("Truncating messages containing multibyte characters isn't implemented"); } else if (stringBytes.length <= MAX_MESSAGE_LENGTH) { return note; } else { return new String(stringBytes, 0, 997, UTF_8) + ELLIPSIS; } }
@Test void testTruncation() { String underOneThousandBytes = "1".repeat(999); String oneThousandBytes = "2".repeat(1000); String twoThousandBytes = "3".repeat(2000); assertThat(publisher.maybeTruncated(underOneThousandBytes).getBytes(UTF_8).length, is(999)); assertThat(publisher.maybeTruncated(underOneThousandBytes), is(underOneThousandBytes)); assertThat(publisher.maybeTruncated(oneThousandBytes).getBytes(UTF_8).length, is(1000)); assertThat(publisher.maybeTruncated(oneThousandBytes), is(oneThousandBytes)); assertThat(publisher.maybeTruncated(twoThousandBytes).getBytes(UTF_8).length, is(1000)); assertThat(publisher.maybeTruncated(twoThousandBytes), is("3".repeat(997) + "...")); }
@Override public List<String> getDependency(String name) { //normalize all input names to be in the form of IP addresses name = NetUtils.normalizeHostName(name); if (name==null) { return Collections.emptyList(); } List<String> dependencies = dependencyCache.get(name); if (dependencies == null) { //not cached dependencies = getRawMapping().getDependency(name); if(dependencies != null) { dependencyCache.put(name, dependencies); } } return dependencies; }
@Test public void testNoArgsMeansNoResult() { Configuration conf = new Configuration(); conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY, ScriptBasedMapping.MIN_ALLOWABLE_ARGS - 1); conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "any-filename-1"); conf.set(ScriptBasedMappingWithDependency.DEPENDENCY_SCRIPT_FILENAME_KEY, "any-filename-2"); conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY, 10); ScriptBasedMappingWithDependency mapping = createMapping(conf); List<String> names = new ArrayList<String>(); names.add("some.machine.name"); names.add("other.machine.name"); List<String> result = mapping.resolve(names); assertNull("Expected an empty list for resolve", result); result = mapping.getDependency("some.machine.name"); assertNull("Expected an empty list for getDependency", result); }
public long getSessionId() { return mSessionId; }
@Test(expected = RuntimeException.class) public void constructorWithException() { for (int k = 0; k >= -1000; k -= DELTA) { SessionInfo tSessionInfo = new SessionInfo(k, SESSION_TIMEOUT_MS); assertEquals(k, tSessionInfo.getSessionId()); fail("SessionId " + k + " should be invalid."); } }
public void registerHandlerMethods(String pluginId, Object handler) { Class<?> handlerType = (handler instanceof String beanName ? obtainApplicationContext().getType(beanName) : handler.getClass()); if (handlerType != null) { final Class<?> userType = ClassUtils.getUserClass(handlerType); Map<Method, RequestMappingInfo> methods = MethodIntrospector.selectMethods(userType, (MethodIntrospector.MetadataLookup<RequestMappingInfo>) method -> getPluginMappingForMethod(pluginId, method, userType)); if (logger.isTraceEnabled()) { logger.trace(formatMappings(userType, methods)); } else if (mappingsLogger.isDebugEnabled()) { mappingsLogger.debug(formatMappings(userType, methods)); } methods.forEach((method, mapping) -> { Method invocableMethod = AopUtils.selectInvocableMethod(method, userType); registerHandlerMethod(handler, invocableMethod, mapping); pluginMappingInfo.add(pluginId, mapping); }); } }
@Test void registerHandlerMethods() { assertThat(handlerMapping.getMappings("fakePlugin")).isEmpty(); UserController userController = mock(UserController.class); handlerMapping.registerHandlerMethods("fakePlugin", userController); List<RequestMappingInfo> mappings = handlerMapping.getMappings("fakePlugin"); assertThat(mappings).hasSize(1); assertThat(mappings.get(0).toString()).isEqualTo( "{GET /apis/api.plugin.halo.run/v1alpha1/plugins/fakePlugin/user/{id}}"); }
@Override public IMetaverseNode createResourceNode( IExternalResourceInfo resource ) throws MetaverseException { return createFileNode( resource.getName(), descriptor ); }
@Test public void testCreateResourceNode() throws Exception { IExternalResourceInfo res = mock( IExternalResourceInfo.class ); when( res.getName() ).thenReturn( "file:///Users/home/tmp/xyz.ktr" ); IMetaverseNode resourceNode = analyzer.createResourceNode( res ); assertNotNull( resourceNode ); assertEquals( DictionaryConst.NODE_TYPE_FILE, resourceNode.getType() ); }
@Override public AwsProxyResponse handle(Throwable ex) { log.error("Called exception handler for:", ex); // adding a print stack trace in case we have no appender or we are running inside SAM local, where need the // output to go to the stderr. ex.printStackTrace(); if (ex instanceof InvalidRequestEventException || ex instanceof InternalServerErrorException) { return new AwsProxyResponse(500, HEADERS, getErrorJson(INTERNAL_SERVER_ERROR)); } else { return new AwsProxyResponse(502, HEADERS, getErrorJson(GATEWAY_TIMEOUT_ERROR)); } }
@Test void typedHandle_InternalServerErrorException_responseString() throws JsonProcessingException { InternalServerErrorException mockInternalServerErrorException = Mockito.mock(InternalServerErrorException.class); Mockito.when(mockInternalServerErrorException.getMessage()).thenReturn(INTERNAL_SERVER_ERROR_MESSAGE); AwsProxyResponse resp = exceptionHandler.handle(mockInternalServerErrorException); assertNotNull(resp); String body = objectMapper.writeValueAsString(new ErrorModel(AwsProxyExceptionHandler.INTERNAL_SERVER_ERROR)); assertEquals(body, resp.getBody()); }
@Override public boolean tryAcquire() { return tryAcquire(1); }
@Test @Timeout(2) public void testTryAcquire() { RRateLimiter rr = redisson.getRateLimiter("acquire"); assertThat(rr.trySetRate(RateType.OVERALL, 1, 5, RateIntervalUnit.SECONDS)).isTrue(); assertThat(rr.tryAcquire(1, 1, TimeUnit.SECONDS)).isTrue(); assertThat(rr.tryAcquire(1, 1, TimeUnit.SECONDS)).isFalse(); assertThat(rr.tryAcquire()).isFalse(); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testOnWindowExpirationNoParam() { DoFnSignature sig = DoFnSignatures.getSignature( new DoFn<String, String>() { @ProcessElement public void process(ProcessContext c) {} @OnWindowExpiration public void bar() {} }.getClass()); assertThat(sig.onWindowExpiration().extraParameters().size(), equalTo(0)); }
@Override public void deleteFile(Long id) throws Exception { // 校验存在 FileDO file = validateFileExists(id); // 从文件存储器中删除 FileClient client = fileConfigService.getFileClient(file.getConfigId()); Assert.notNull(client, "客户端({}) 不能为空", file.getConfigId()); client.delete(file.getPath()); // 删除记录 fileMapper.deleteById(id); }
@Test public void testDeleteFile_success() throws Exception { // mock 数据 FileDO dbFile = randomPojo(FileDO.class, o -> o.setConfigId(10L).setPath("tudou.jpg")); fileMapper.insert(dbFile);// @Sql: 先插入出一条存在的数据 // mock Master 文件客户端 FileClient client = mock(FileClient.class); when(fileConfigService.getFileClient(eq(10L))).thenReturn(client); // 准备参数 Long id = dbFile.getId(); // 调用 fileService.deleteFile(id); // 校验数据不存在了 assertNull(fileMapper.selectById(id)); // 校验调用 verify(client).delete(eq("tudou.jpg")); }
@Udf(description = "Converts a string representation of a date in the given format" + " into a DATE value.") public Date parseDate( @UdfParameter( description = "The string representation of a date.") final String formattedDate, @UdfParameter( description = "The format pattern should be in the format expected by" + " java.text.SimpleDateFormat.") final String formatPattern) { if (formattedDate == null || formatPattern == null) { return null; } try { final long time = formatters.get(formatPattern).parse(formattedDate).getTime(); if (time % MILLIS_IN_DAY != 0) { throw new KsqlFunctionException("Date format contains time field."); } return new Date(time); } catch (final ExecutionException | RuntimeException | ParseException e) { throw new KsqlFunctionException("Failed to parse date '" + formattedDate + "' with formatter '" + formatPattern + "': " + e.getMessage(), e); } }
@Test public void shouldHandleNullDateFormat() { // When: final Date result = udf.parseDate("2021-12-01", null); // Then: assertThat(result, is(nullValue())); }
public void addListener(AsyncListener asyncListener) { asyncContext.addListener(asyncListener); }
@Test public void addListener_addsListener() { AsyncListener mock = mock(AsyncListener.class); underTest.addListener(mock); verify(asyncContext).addListener(mock); }
public RegistryBuilder protocol(String protocol) { this.protocol = protocol; return getThis(); }
@Test void protocol() { RegistryBuilder builder = new RegistryBuilder(); builder.protocol("protocol"); Assertions.assertEquals("protocol", builder.build().getProtocol()); }
@Override public void init(ServletConfig config) throws ServletException { super.init(config); final ServletContext context = config.getServletContext(); if (null == registry) { final Object registryAttr = context.getAttribute(HEALTH_CHECK_REGISTRY); if (registryAttr instanceof HealthCheckRegistry) { this.registry = (HealthCheckRegistry) registryAttr; } else { throw new ServletException("Couldn't find a HealthCheckRegistry instance."); } } final Object executorAttr = context.getAttribute(HEALTH_CHECK_EXECUTOR); if (executorAttr instanceof ExecutorService) { this.executorService = (ExecutorService) executorAttr; } final Object filterAttr = context.getAttribute(HEALTH_CHECK_FILTER); if (filterAttr instanceof HealthCheckFilter) { filter = (HealthCheckFilter) filterAttr; } if (filter == null) { filter = HealthCheckFilter.ALL; } final Object mapperAttr = context.getAttribute(HEALTH_CHECK_MAPPER); if (mapperAttr instanceof ObjectMapper) { this.mapper = (ObjectMapper) mapperAttr; } else { this.mapper = new ObjectMapper(); } this.mapper.registerModule(new HealthCheckModule()); final Object httpStatusIndicatorAttr = context.getAttribute(HEALTH_CHECK_HTTP_STATUS_INDICATOR); if (httpStatusIndicatorAttr instanceof Boolean) { this.httpStatusIndicator = (Boolean) httpStatusIndicatorAttr; } else { this.httpStatusIndicator = true; } }
@Test(expected = ServletException.class) public void constructorWithRegistryAsArgumentUsesServletConfigWhenNullButWrongTypeInContext() throws Exception { final ServletContext servletContext = mock(ServletContext.class); final ServletConfig servletConfig = mock(ServletConfig.class); when(servletConfig.getServletContext()).thenReturn(servletContext); when(servletContext.getAttribute(eq(io.dropwizard.metrics.servlets.HealthCheckServlet.HEALTH_CHECK_REGISTRY))) .thenReturn("IRELLEVANT_STRING"); final io.dropwizard.metrics.servlets.HealthCheckServlet healthCheckServlet = new HealthCheckServlet(null); healthCheckServlet.init(servletConfig); }
@Override public void fulfill(T value) { super.fulfill(value); postFulfillment(); }
@Test void fetchingAnAlreadyFulfilledPromiseReturnsTheFulfilledValueImmediately() throws ExecutionException { var promise = new Promise<Integer>(); promise.fulfill(NumberCrunchingTask.CRUNCHED_NUMBER); Integer result = promise.get(1000, TimeUnit.SECONDS); assertEquals(NumberCrunchingTask.CRUNCHED_NUMBER, result); }
public void setPrefix(String prefix) { AssertUtil.notNull(prefix, "prefix cannot be null"); this.prefix = prefix; }
@Test(expected = IllegalArgumentException.class) public void testConfigSetPrefix() { SentinelApacheHttpClientConfig config = new SentinelApacheHttpClientConfig(); config.setPrefix(null); }
@Override public double cdf(double[] x) { if (x.length != dim) { throw new IllegalArgumentException("Sample has different dimension."); } int Nmax = 10000; double alph = GaussianDistribution.getInstance().quantile(0.999); double errMax = 0.001; double[] v = x.clone(); MathEx.sub(v, mu); double p = 0.0; double varSum = 0.0; // d is always zero double[] e = new double[dim]; double[] f = new double[dim]; e[0] = GaussianDistribution.getInstance().cdf(v[0] / sigmaL.get(0, 0)); f[0] = e[0]; double[] y = new double[dim]; double err = 2 * errMax; int N; for (N = 1; err > errMax && N <= Nmax; N++) { double[] w = MathEx.random(dim - 1); for (int i = 1; i < dim; i++) { y[i - 1] = GaussianDistribution.getInstance().quantile(w[i - 1] * e[i - 1]); double q = 0.0; for (int j = 0; j < i; j++) { q += sigmaL.get(i, j) * y[j]; } e[i] = GaussianDistribution.getInstance().cdf((v[i] - q) / sigmaL.get(i, i)); f[i] = e[i] * f[i - 1]; } double del = (f[dim - 1] - p) / N; p += del; varSum = (N - 2) * varSum / N + del * del; err = alph * Math.sqrt(varSum); } return p; }
@Test public void testCdf2() { System.out.println("cdf2"); double[][] S = { {3.260127902272362, 2.343938296424249, 0.1409050254343716, -0.1628775438743266}, {2.343938296424249, 4.213034991388330, 1.3997210599608563, 0.3373448510018783}, {0.1409050254343716, 1.3997210599608563, 4.6042485263677939, 0.0807267064408651}, {-0.1628775438743266, 0.3373448510018783, 0.0807267064408651, 5.4950949215890672} }; double[] M = {-0.683477474844462, 1.480296478403701, 1.008431991316523, 0.448404211078558}; double[] X = {0.713919336274493, 0.584408785741822, 0.263119200077829, 0.732513610871908}; MultivariateGaussianDistribution instance = new MultivariateGaussianDistribution(M, Matrix.of(S)); // The expected value is based on R assertEquals(0.0904191282120575, instance.cdf(X), 1E-3); }
public SearchSourceBuilder create(SearchesConfig config) { return create(SearchCommand.from(config)); }
@Test void searchIncludesTimerange() { final SearchSourceBuilder search = this.searchRequestFactory.create(ChunkCommand.builder() .indices(Collections.singleton("graylog_0")) .range(RANGE) .build()); assertJsonPath(search, request -> { request.jsonPathAsListOf("$.query.bool.filter..range.timestamp.from", String.class) .containsExactly("2020-07-23 11:03:32.243"); request.jsonPathAsListOf("$.query.bool.filter..range.timestamp.to", String.class) .containsExactly("2020-07-23 11:08:32.243"); }); }
@Override public void validate(String methodName, Class<?>[] parameterTypes, Object[] arguments) throws Exception { List<Class<?>> groups = new ArrayList<>(); Class<?> methodClass = methodClass(methodName); if (methodClass != null) { groups.add(methodClass); } Method method = clazz.getMethod(methodName, parameterTypes); Class<?>[] methodClasses; if (method.isAnnotationPresent(MethodValidated.class)) { methodClasses = method.getAnnotation(MethodValidated.class).value(); groups.addAll(Arrays.asList(methodClasses)); } // add into default group groups.add(0, Default.class); groups.add(1, clazz); // convert list to array Class<?>[] classGroups = groups.toArray(new Class[0]); Set<ConstraintViolation<?>> violations = new HashSet<>(); Object parameterBean = getMethodParameterBean(clazz, method, arguments); if (parameterBean != null) { violations.addAll(validator.validate(parameterBean, classGroups)); } for (Object arg : arguments) { validate(violations, arg, classGroups); } if (!violations.isEmpty()) { logger.info("Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations); throw new ConstraintViolationException( "Failed to validate service: " + clazz.getName() + ", method: " + methodName + ", cause: " + violations, violations); } }
@Test void testItWhenItViolatedConstraint() { Assertions.assertThrows(ValidationException.class, () -> { URL url = URL.valueOf( "test://test:11/org.apache.dubbo.validation.support.jvalidation.mock.JValidatorTestTarget"); JValidator jValidator = new JValidator(url); jValidator.validate( "someMethod2", new Class<?>[] {ValidationParameter.class}, new Object[] {new ValidationParameter() }); }); }
@Override public boolean shouldKeepLeft(ServiceUnitStateData from, ServiceUnitStateData to) { if (to == null) { return false; } if (from != null) { if (from.versionId() == Long.MAX_VALUE && to.versionId() == Long.MIN_VALUE) { // overflow } else if (from.versionId() >= to.versionId()) { return true; } else if (from.versionId() < to.versionId() - 1) { // Compacted return false; } // else from.versionId() == to.versionId() - 1 // continue to check further } if (to.force()) { return false; } ServiceUnitState prevState = state(from); ServiceUnitState state = state(to); if (!ServiceUnitState.isValidTransition(prevState, state)) { return true; } if (checkBrokers) { switch (prevState) { case Owned: switch (state) { case Splitting: return isNotBlank(to.dstBroker()) || !from.dstBroker().equals(to.sourceBroker()); case Releasing: return invalidUnload(from, to); } case Assigning: switch (state) { case Owned: return notEquals(from, to); } case Releasing: switch (state) { case Assigning: return isBlank(to.dstBroker()) || notEquals(from, to); case Free: return notEquals(from, to); } case Splitting: switch (state) { case Deleted: return notEquals(from, to); } case Free: switch (state) { case Assigning: return isNotBlank(to.sourceBroker()) || isBlank(to.dstBroker()); } } } return false; }
@Test public void testTombstone() { assertFalse(strategy.shouldKeepLeft( data(Init), null)); assertFalse(strategy.shouldKeepLeft( data(Assigning), null)); assertFalse(strategy.shouldKeepLeft( data(Owned), null)); assertFalse(strategy.shouldKeepLeft( data(Releasing), null)); assertFalse(strategy.shouldKeepLeft( data(Splitting), null)); assertFalse(strategy.shouldKeepLeft( data(Free), null)); assertFalse(strategy.shouldKeepLeft( data(Deleted), null)); }
@GET @Produces(MediaType.APPLICATION_JSON) public DeviceInfoList getDevices(@ReadOnly @Auth AuthenticatedDevice auth) { List<DeviceInfo> devices = new LinkedList<>(); for (Device device : auth.getAccount().getDevices()) { devices.add(new DeviceInfo(device.getId(), device.getName(), device.getLastSeen(), device.getCreated())); } return new DeviceInfoList(devices); }
@Test void linkDeviceAtomicMissingCapabilities() { final ECSignedPreKey aciSignedPreKey; final ECSignedPreKey pniSignedPreKey; final KEMSignedPreKey aciPqLastResortPreKey; final KEMSignedPreKey pniPqLastResortPreKey; final ECKeyPair aciIdentityKeyPair = Curve.generateKeyPair(); final ECKeyPair pniIdentityKeyPair = Curve.generateKeyPair(); aciSignedPreKey = KeysHelper.signedECPreKey(1, aciIdentityKeyPair); pniSignedPreKey = KeysHelper.signedECPreKey(2, pniIdentityKeyPair); aciPqLastResortPreKey = KeysHelper.signedKEMPreKey(3, aciIdentityKeyPair); pniPqLastResortPreKey = KeysHelper.signedKEMPreKey(4, pniIdentityKeyPair); when(accountsManager.getByAccountIdentifier(AuthHelper.VALID_UUID)).thenReturn(Optional.of(AuthHelper.VALID_ACCOUNT)); final Device existingDevice = mock(Device.class); when(existingDevice.getId()).thenReturn(Device.PRIMARY_ID); when(AuthHelper.VALID_ACCOUNT.getDevices()).thenReturn(List.of(existingDevice)); VerificationCode deviceCode = resources.getJerseyTest() .target("/v1/devices/provisioning/code") .request() .header("Authorization", AuthHelper.getAuthHeader(AuthHelper.VALID_UUID, AuthHelper.VALID_PASSWORD)) .get(VerificationCode.class); when(account.getIdentityKey(IdentityType.ACI)).thenReturn(new IdentityKey(aciIdentityKeyPair.getPublicKey())); when(account.getIdentityKey(IdentityType.PNI)).thenReturn(new IdentityKey(pniIdentityKeyPair.getPublicKey())); final LinkDeviceRequest request = new LinkDeviceRequest(deviceCode.verificationCode(), new AccountAttributes(true, 1234, 5678, null, null, true, null), new DeviceActivationRequest(aciSignedPreKey, pniSignedPreKey, aciPqLastResortPreKey, pniPqLastResortPreKey, Optional.empty(), Optional.empty())); try (final Response response = resources.getJerseyTest() .target("/v1/devices/link") .request() .header("Authorization", AuthHelper.getProvisioningAuthHeader(AuthHelper.VALID_NUMBER, "password1")) .put(Entity.entity(request, MediaType.APPLICATION_JSON_TYPE))) { assertEquals(422, response.getStatus()); } }
private List<Class<?>> scanForClassesInPackage(String packageName, Predicate<Class<?>> classFilter) { requireValidPackageName(packageName); requireNonNull(classFilter, "classFilter must not be null"); List<URI> rootUris = getUrisForPackage(getClassLoader(), packageName); return findClassesForUris(rootUris, packageName, classFilter); }
@Test void scanForResourcesInUnsupportedFileSystem(LogRecordListener logRecordListener) throws IOException { ClassLoader classLoader = mock(ClassLoader.class); ClasspathScanner scanner = new ClasspathScanner(() -> classLoader); URLStreamHandler handler = new URLStreamHandler() { @Override protected URLConnection openConnection(URL u) { return null; } }; URL resourceUrl = new URL(null, "bundle-resource:com/cucumber/bundle", handler); when(classLoader.getResources("com/cucumber/bundle")).thenReturn(enumeration(singletonList(resourceUrl))); assertThat(scanner.scanForClassesInPackage("com.cucumber.bundle"), empty()); assertThat(logRecordListener.getLogRecords().get(0).getMessage(), containsString("Failed to find resources for 'bundle-resource:com/cucumber/bundle'")); }
public long put(final CqExtUnit cqExtUnit) { final int retryTimes = 3; try { int size = cqExtUnit.calcUnitSize(); if (size > CqExtUnit.MAX_EXT_UNIT_SIZE) { log.error("Size of cq ext unit is greater than {}, {}", CqExtUnit.MAX_EXT_UNIT_SIZE, cqExtUnit); return 1; } if (this.mappedFileQueue.getMaxOffset() + size > MAX_REAL_OFFSET) { log.warn("Capacity of ext is maximum!{}, {}", this.mappedFileQueue.getMaxOffset(), size); return 1; } // unit size maybe change.but, the same most of the time. if (this.tempContainer == null || this.tempContainer.capacity() < size) { this.tempContainer = ByteBuffer.allocate(size); } for (int i = 0; i < retryTimes; i++) { MappedFile mappedFile = this.mappedFileQueue.getLastMappedFile(); if (mappedFile == null || mappedFile.isFull()) { mappedFile = this.mappedFileQueue.getLastMappedFile(0); } if (mappedFile == null) { log.error("Create mapped file when save consume queue extend, {}", cqExtUnit); continue; } final int wrotePosition = mappedFile.getWrotePosition(); final int blankSize = this.mappedFileSize - wrotePosition - END_BLANK_DATA_LENGTH; // check whether has enough space. if (size > blankSize) { fullFillToEnd(mappedFile, wrotePosition); log.info("No enough space(need:{}, has:{}) of file {}, so fill to end", size, blankSize, mappedFile.getFileName()); continue; } if (mappedFile.appendMessage(cqExtUnit.write(this.tempContainer), 0, size)) { return decorate(wrotePosition + mappedFile.getFileFromOffset()); } } } catch (Throwable e) { log.error("Save consume queue extend error, " + cqExtUnit, e); } return 1; }
@Test public void testPut() { ConsumeQueueExt consumeQueueExt = genExt(); try { putSth(consumeQueueExt, true, false, UNIT_COUNT); } finally { consumeQueueExt.destroy(); UtilAll.deleteFile(new File(STORE_PATH)); } }
public static boolean validateCSConfiguration( final Configuration oldConfParam, final Configuration newConf, final RMContext rmContext) throws IOException { // ensure that the oldConf is deep copied Configuration oldConf = new Configuration(oldConfParam); QueueMetrics.setConfigurationValidation(oldConf, true); QueueMetrics.setConfigurationValidation(newConf, true); CapacityScheduler liveScheduler = (CapacityScheduler) rmContext.getScheduler(); CapacityScheduler newCs = new CapacityScheduler(); try { //TODO: extract all the validation steps and replace reinitialize with //the specific validation steps newCs.setConf(oldConf); newCs.setRMContext(rmContext); newCs.init(oldConf); newCs.addNodes(liveScheduler.getAllNodes()); newCs.reinitialize(newConf, rmContext, true); return true; } finally { newCs.stop(); } }
@Test public void testValidateCSConfigDefaultRCAbsoluteModeParentMaxVcoreExceeded() throws Exception { setUpMockRM(false); RMContext rmContext = mockRM.getRMContext(); CapacitySchedulerConfiguration oldConfiguration = cs.getConfiguration(); CapacitySchedulerConfiguration newConfiguration = new CapacitySchedulerConfiguration(cs.getConfiguration()); newConfiguration.setMaximumResourceRequirement("", LEAF_A_FULL_PATH, VCORE_EXCEEDED_MAXRES); try { CapacitySchedulerConfigValidator .validateCSConfiguration(oldConfiguration, newConfiguration, rmContext); } catch (IOException e) { fail("In DefaultResourceCalculator vcore limits are not enforced"); } finally { mockRM.stop(); } }
@VisibleForTesting long getDelayMs() { return delayMs; }
@Test public void create_instance_with_specified_delay() { long delayMs = new Random().nextLong(); AbstractStopRequestWatcher underTest = new AbstractStopRequestWatcher(threadName, booleanSupplier, stopAction, delayMs) { }; assertThat(underTest.getDelayMs()).isEqualTo(delayMs); }
public KsqlTarget target(final URI server) { return target(server, Collections.emptyMap()); }
@Test public void shouldNotTrimTrailingZerosOnDecimalDeserialization() { // Given: server.setResponseBuffer(Buffer.buffer("" + "[" + "{\"row\": {\"columns\": [1.000, 12.100]}}" + "]" )); // When: final KsqlTarget target = ksqlClient.target(serverUri); RestResponse<List<StreamedRow>> response = target.postQueryRequest( "some sql", Collections.emptyMap(), Optional.of(321L)); // Then: assertThat(response.getResponse(), is(ImmutableList.of( StreamedRow.pushRow(GenericRow.genericRow(new BigDecimal("1.000"), new BigDecimal("12.100"))) ))); }
public static TbMathArgumentValue fromMessageBody(TbMathArgument arg, String argKey, Optional<ObjectNode> jsonNodeOpt) { Double defaultValue = arg.getDefaultValue(); if (jsonNodeOpt.isEmpty()) { return defaultOrThrow(defaultValue, "Message body is empty!"); } var json = jsonNodeOpt.get(); if (!json.has(argKey)) { return defaultOrThrow(defaultValue, "Message body has no '" + argKey + "'!"); } JsonNode valueNode = json.get(argKey); if (valueNode.isNull()) { return defaultOrThrow(defaultValue, "Message body has null '" + argKey + "'!"); } double value; if (valueNode.isNumber()) { value = valueNode.doubleValue(); } else if (valueNode.isTextual()) { var valueNodeText = valueNode.asText(); if (StringUtils.isNotBlank(valueNodeText)) { try { value = Double.parseDouble(valueNode.asText()); } catch (NumberFormatException ne) { throw new RuntimeException("Can't convert value '" + valueNode.asText() + "' to double!"); } } else { return defaultOrThrow(defaultValue, "Message value is empty for '" + argKey + "'!"); } } else { throw new RuntimeException("Can't convert value '" + valueNode.toString() + "' to double!"); } return new TbMathArgumentValue(value); }
@Test public void test_fromMessageBody_then_emptyBody() { TbMathArgument tbMathArgument = new TbMathArgument(TbMathArgumentType.MESSAGE_BODY, "TestKey"); Throwable thrown = assertThrows(RuntimeException.class, () -> { TbMathArgumentValue result = TbMathArgumentValue.fromMessageBody(tbMathArgument, tbMathArgument.getKey(), Optional.empty()); }); Assertions.assertNotNull(thrown.getMessage()); }
@Override protected void decode(ChannelHandlerContext channelHandlerContext, Object object, List out) throws Exception { if (object instanceof Element) { Element root = (Element) object; try { Packet packet = recognizeAndReturnXmppPacket(root); validate(packet); out.add(packet); } catch (UnsupportedStanzaTypeException e) { throw e; } catch (Exception e) { throw new XmppValidationException(false); } } else if (object instanceof XMLEvent) { XMLEvent event = (XMLEvent) object; if (event.isStartElement()) { final StartElement element = event.asStartElement(); if (element.getName().getLocalPart().equals(XmppConstants.STREAM_QNAME)) { DocumentFactory df = DocumentFactory.getInstance(); QName qname = (element.getName().getPrefix() == null) ? df.createQName(element.getName().getLocalPart(), element.getName().getNamespaceURI()) : df.createQName(element.getName().getLocalPart(), element.getName().getPrefix(), element.getName().getNamespaceURI()); Element newElement = df.createElement(qname); Iterator nsIt = element.getNamespaces(); // add all relevant XML namespaces to Element while (nsIt.hasNext()) { Namespace ns = (Namespace) nsIt.next(); newElement.addNamespace(ns.getPrefix(), ns.getNamespaceURI()); } Iterator attrIt = element.getAttributes(); // add all attributes to Element while (attrIt.hasNext()) { Attribute attr = (Attribute) attrIt.next(); newElement.addAttribute(attr.getName().getLocalPart(), attr.getValue()); } XmppStreamOpen xmppStreamOpen = new XmppStreamOpen(newElement); validator.validateStream(xmppStreamOpen); out.add(xmppStreamOpen); } } else if (event.isEndElement()) { out.add(new XmppStreamClose()); } } }
@Test public void testDecodeXmppStanza() throws Exception { // TODO: complete it List<Object> out = Lists.newArrayList(); xmppDecoder.decode(mockChannelHandlerContext, xmppStanzaElement, out); assertThat(out.size(), is(1)); assertThat(out.get(0), is(instanceOf(Packet.class))); assertThat(out.get(0), is(instanceOf(IQ.class))); IQ iq = (IQ) out.get(0); assertThat(iq.getElement(), is(notNullValue())); assertThat(iq.getFrom(), is(new JID("test@xmpp.org"))); assertThat(iq.getTo(), is(new JID("xmpp.onosproject.org"))); assertThat(iq.getType(), is(IQ.Type.set)); }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { if (!ExternalParser.check("gdalinfo")) { return; } // first set up and run GDAL // process the command TemporaryResources tmp = new TemporaryResources(); TikaInputStream tis = TikaInputStream.get(stream, tmp, metadata); String runCommand = processCommand(tis); String output = execCommand(new String[]{runCommand}); // now extract the actual metadata params // from the GDAL output in the content stream // to do this, we need to literally process the output // from the invoked command b/c we can't read metadata and // output text from the handler in ExternalParser // at the same time, so for now, we can't use the // ExternalParser to do this and I've had to bring some of // that functionality directly into this class // TODO: investigate a way to do both using ExternalParser extractMetFromOutput(output, metadata); applyPatternsToOutput(output, metadata, getPatterns()); // make the content handler and provide output there // now that we have metadata processOutput(handler, metadata, output); }
@Test public void testParseBasicInfo() { assumeTrue(canRun()); final String expectedDriver = "netCDF/Network Common Data Format"; final String expectedUpperRight = "512.0, 0.0"; final String expectedUpperLeft = "0.0, 0.0"; final String expectedLowerLeft = "0.0, 512.0"; final String expectedLowerRight = "512.0, 512.0"; final String expectedCoordinateSystem = "`'"; final String expectedSize = "512, 512"; GDALParser parser = new GDALParser(); InputStream stream = TestGDALParser.class .getResourceAsStream("/test-documents/sresa1b_ncar_ccsm3_0_run1_200001.nc"); Metadata met = new Metadata(); BodyContentHandler handler = new BodyContentHandler(); try { parser.parse(stream, handler, met, new ParseContext()); } catch (Exception e) { e.printStackTrace(); fail(e.getMessage()); } assertNotNull(met); assertNotNull(met.get("Driver")); assertEquals(expectedDriver, met.get("Driver")); assumeTrue(met.get("Files") != null); //recent version of gdalinfo doesn't include "Coordinate System": // GDAL 3.7.1, released 2023/07/06 //assertNotNull(met.get("Coordinate System")); //assertEquals(expectedCoordinateSystem, met.get("Coordinate System")); assertNotNull(met.get("Size")); assertEquals(expectedSize, met.get("Size")); assertNotNull(met.get("Upper Right")); assertEquals(expectedUpperRight, met.get("Upper Right")); assertNotNull(met.get("Upper Left")); assertEquals(expectedUpperLeft, met.get("Upper Left")); assertNotNull(met.get("Upper Right")); assertEquals(expectedLowerRight, met.get("Lower Right")); assertNotNull(met.get("Upper Right")); assertEquals(expectedLowerLeft, met.get("Lower Left")); }
@Override public void init(final Properties props) { secretKey = getSecretKey(props); }
@Test void assertCreateNewInstanceWithEmptyDigestAlgorithm() { assertThrows(AlgorithmInitializationException.class, () -> cryptographicAlgorithm.init(PropertiesBuilder.build(new Property("aes-key-value", "123456abc"), new Property("digest-algorithm-name", "")))); }
@Override public String getName() { return "Azure DevOps"; }
@Test public void getName() { assertThat(underTest.getName()).isEqualTo("Azure DevOps"); }
@Override public Stream<MappingField> resolveAndValidateFields( boolean isKey, List<MappingField> userFields, Map<String, String> options, InternalSerializationService serializationService ) { Map<QueryPath, MappingField> fieldsByPath = extractFields(userFields, isKey); for (QueryPath path : fieldsByPath.keySet()) { if (path.isTopLevel()) { throw QueryException.error("Cannot use the '" + path + "' field with Avro serialization"); } } Schema schema = getSchema(fieldsByPath, options, isKey); if (schema != null && options.containsKey("schema.registry.url")) { throw new IllegalArgumentException("Inline schema cannot be used with schema registry"); } if (userFields.isEmpty()) { if (schema == null) { throw QueryException.error( "Either a column list or an inline schema is required to create Avro-based mapping"); } return resolveFields(schema, (name, type) -> new MappingField(name, type, new QueryPath(name, isKey).toString())); } else { if (schema != null) { validate(schema, getFields(fieldsByPath).collect(toList())); } return fieldsByPath.values().stream(); } }
@Test public void test_resolveFields() { Stream<MappingField> fields = INSTANCE.resolveAndValidateFields( isKey, List.of(field("field", QueryDataType.INT)), emptyMap(), null ); assertThat(fields).containsExactly(field("field", QueryDataType.INT)); }
public boolean shouldDropFrame(final InetSocketAddress address, final UnsafeBuffer buffer, final int length) { return false; }
@Test void shouldOnlyDropInMatchingFrames() { final FixedLossGenerator fixedLossGenerator = new FixedLossGenerator(1, 2000, 1408); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 0, 0, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 0, 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 0, 2 * 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 0, 3 * 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 1, 0, 1408)); assertTrue(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 1, 1408, 1408)); assertTrue(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 1, 2 * 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 1, 3 * 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 2, 0, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 2, 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 2, 2 * 1408, 1408)); assertFalse(fixedLossGenerator.shouldDropFrame(null, null, 123, 456, 2, 3 * 1408, 1408)); }
@PostConstruct public void init() { rejectTips = RateLimitUtils.getRejectTips(polarisRateLimitProperties); }
@Test public void testInit() { quotaCheckServletFilter.init(); try { Field rejectTips = QuotaCheckServletFilter.class.getDeclaredField("rejectTips"); rejectTips.setAccessible(true); assertThat(rejectTips.get(quotaCheckServletFilter)).isEqualTo("RejectRequestTips提示消息"); } catch (NoSuchFieldException | IllegalAccessException e) { fail("Exception encountered.", e); } quotaCheckWithHtmlRejectTipsServletFilter.init(); try { Field rejectTips = QuotaCheckServletFilter.class.getDeclaredField("rejectTips"); rejectTips.setAccessible(true); assertThat(rejectTips.get(quotaCheckWithHtmlRejectTipsServletFilter)).isEqualTo("<h1>RejectRequestTips提示消息</h1>"); } catch (NoSuchFieldException | IllegalAccessException e) { fail("Exception encountered.", e); } quotaCheckWithRateLimiterLimitedFallbackFilter.init(); }
public SubscriptionData querySubscriptionByConsumer(final String addr, final String group, final String topic, final long timeoutMillis) throws RemotingConnectException, RemotingSendRequestException, RemotingTimeoutException, InterruptedException, MQBrokerException { QuerySubscriptionByConsumerRequestHeader requestHeader = new QuerySubscriptionByConsumerRequestHeader(); requestHeader.setGroup(group); requestHeader.setTopic(topic); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.QUERY_SUBSCRIPTION_BY_CONSUMER, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); switch (response.getCode()) { case ResponseCode.SUCCESS: { QuerySubscriptionResponseBody subscriptionResponseBody = QuerySubscriptionResponseBody.decode(response.getBody(), QuerySubscriptionResponseBody.class); return subscriptionResponseBody.getSubscriptionData(); } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark()); }
@Test public void assertQuerySubscriptionByConsumer() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); QuerySubscriptionResponseBody responseBody = new QuerySubscriptionResponseBody(); SubscriptionData subscriptionData = new SubscriptionData(); subscriptionData.setTopic(defaultTopic); responseBody.setSubscriptionData(subscriptionData); setResponseBody(responseBody); SubscriptionData actual = mqClientAPI.querySubscriptionByConsumer(defaultBrokerAddr, group, defaultTopic, defaultTimeout); assertNotNull(actual); assertEquals(defaultTopic, actual.getTopic()); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldSupportCommandWithWhiteSpace() throws Exception { String jobWithCommand = """ <job name="functional"> <tasks> <exec command="c:\\program files\\cmd.exe" args="arguments" /> </tasks> </job> """; String configWithCommand = withCommand(jobWithCommand); CruiseConfig cruiseConfig = xmlLoader.deserializeConfig(configWithCommand); Task task = cruiseConfig.pipelineConfigByName(new CaseInsensitiveString("pipeline1")).first().allBuildPlans().first().tasks().first(); assertThat(task).isInstanceOf(ExecTask.class); assertThat(task).isEqualTo(new ExecTask("c:\\program files\\cmd.exe", "arguments", (String) null)); }
@Override public void updateAndFreezeOffset(MessageQueue mq, long offset) { if (mq != null) { this.offsetTable.computeIfAbsent(mq, k -> new ControllableOffset(offset)) .updateAndFreeze(offset); } }
@Test public void testUpdateAndFreezeOffset() throws Exception { OffsetStore offsetStore = new RemoteBrokerOffsetStore(mQClientFactory, group); MessageQueue messageQueue = new MessageQueue(topic, brokerName, 1); offsetStore.updateAndFreezeOffset(messageQueue, 1024); assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1024); offsetStore.updateOffset(messageQueue, 1023, false); assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1024); offsetStore.updateOffset(messageQueue, 1022, true); assertThat(offsetStore.readOffset(messageQueue, ReadOffsetType.READ_FROM_MEMORY)).isEqualTo(1024); }
public void stopRunning( StepMetaInterface smi, StepDataInterface sdi ) throws KettleException { if ( this.isStopped() || sdi.isDisposed() ) { return; } final DatabaseJoinData data = (DatabaseJoinData) sdi; dbLock.lock(); try { if ( data.db != null && data.db.getConnection() != null && !data.isCanceled ) { data.db.cancelStatement( data.pstmt ); setStopped( true ); data.isCanceled = true; } } finally { dbLock.unlock(); } }
@Test public void testStopRunningWhenStepDataInterfaceIsDisposed() throws KettleException { doReturn( false ).when( mockDatabaseJoin ).isStopped(); doReturn( true ).when( mockStepDataInterface ).isDisposed(); mockDatabaseJoin.stopRunning( mockStepMetaInterface, mockStepDataInterface ); verify( mockDatabaseJoin, times( 1 ) ).isStopped(); verify( mockStepDataInterface, times( 1 ) ).isDisposed(); }
@Override public Mono<Void> cleanup(Backup backup) { return Mono.<Void>create(sink -> { var status = backup.getStatus(); if (status == null || !StringUtils.hasText(status.getFilename())) { sink.success(); return; } var filename = status.getFilename(); var backupsRoot = getBackupsRoot(); var backupFile = backupsRoot.resolve(filename); try { checkDirectoryTraversal(backupsRoot, backupFile); deleteIfExists(backupFile); sink.success(); } catch (IOException e) { sink.error(e); } }).subscribeOn(scheduler); }
@Test void cleanupBackupWithNoFilename() { var backup = createSucceededBackup("fake-backup", null); StepVerifier.create(migrationService.cleanup(backup)) .verifyComplete(); verify(haloProperties, never()).getWorkDir(); verify(backupRoot, never()).get(); }
@Override public void accept(Point newPoint) { //ensure this method is never called by multiple threads at the same time. parallelismDetector.run( () -> doAccept(newPoint) ); }
@Test public void autoTrackClosureConstructorParameterIsRespected() { Duration maxPointDelta = Duration.ofSeconds(30); Duration maxTrackAge_small = Duration.ofMinutes(1); //way too small in real life Duration maxTrackAge_big = Duration.ofHours(2); //default value //5 points from the same flight that span 1m 20sec (manipulated times for testing) String p1 = "[RH],Center,ZDV,07-09-2019,02:49:54.000,SWA5423,B737,L,0564,380,414,229,37.3058,-101.8194,638,,,,,ZKC/21,,ZDV,,,,E0256,DEN,,IFR,,638,244060382,BWI,,380//380,,L,1,,,{RH}"; String p2 = "[RH],Center,ZDV,07-09-2019,02:50:16.000,SWA5423,B737,L,0564,380,415,229,37.2911,-101.8403,638,,,,,ZKC/21,,ZDV,,,,E0256,DEN,,IFR,,638,244060652,BWI,,380//380,,L,1,,,{RH}"; String p3 = "[RH],Center,ZDV,07-09-2019,02:50:39.000,SWA5423,B737,L,0564,380,415,229,37.2758,-101.8619,638,,,,,ZKC/21,,ZDV,,,,E0256,DEN,,IFR,,638,244060923,BWI,,380//380,,L,1,,,{RH}"; String p4 = "[RH],Center,ZDV,07-09-2019,02:50:51.000,SWA5423,B737,L,0564,380,414,229,37.2606,-101.8833,638,,,,,ZKC/21,,ZDV,,,,E0256,DEN,,IFR,,638,244061193,BWI,,380//380,,L,1,,,{RH}"; String p5 = "[RH],Center,ZDV,07-09-2019,02:51:14.000,SWA5423,B737,L,0564,380,414,229,37.2453,-101.9053,638,,,,,ZKC/21,,ZDV,,,,E0256,DEN,,IFR,,638,244061462,BWI,,380//380,,L,1,,,{RH}"; TestConsumer smallCounter = new TestConsumer(); TrackMaker smallTrackMaker = new TrackMaker(maxPointDelta, maxTrackAge_small, smallCounter); smallTrackMaker.accept(NopHit.from(p1)); smallTrackMaker.accept(NopHit.from(p2)); smallTrackMaker.accept(NopHit.from(p3)); smallTrackMaker.accept(NopHit.from(p4)); smallTrackMaker.accept(NopHit.from(p5)); //this point should cause the track to be "closeable" assertThat(smallCounter.numCallsToAccept, is(1)); //now do same thing...but with a bigger maxTrackAge TestConsumer bigCounter = new TestConsumer(); TrackMaker bigTrackMaker = new TrackMaker(maxPointDelta, maxTrackAge_big, bigCounter); bigTrackMaker.accept(NopHit.from(p1)); bigTrackMaker.accept(NopHit.from(p2)); bigTrackMaker.accept(NopHit.from(p3)); bigTrackMaker.accept(NopHit.from(p4)); bigTrackMaker.accept(NopHit.from(p5)); //this point should NOT cause the track to be "closeable" assertThat(bigCounter.numCallsToAccept, is(0)); //nothing emitted yet }
public static long computeStartOfNextDay(long now) { Calendar cal = Calendar.getInstance(); cal.setTime(new Date(now)); cal.add(Calendar.DAY_OF_MONTH, 1); cal.set(Calendar.MILLISECOND, 0); cal.set(Calendar.SECOND, 0); cal.set(Calendar.MINUTE, 0); cal.set(Calendar.HOUR_OF_DAY, 0); return cal.getTime().getTime(); }
@Test public void testDay() { // Mon Nov 20 18:05:17 GMT 2006 long now = 1164045917522L; now = correctBasedOnTimeZone(now); // Tue Nov 21 00:00:00 GMT 2006 long expected = 1164067200000L; expected = correctBasedOnTimeZone(expected); long computed = TimeUtil.computeStartOfNextDay(now); Assertions.assertEquals(expected - now, 1000 * (3600 * 5 + 60 * 54 + 42) + 478); Assertions.assertEquals(expected, computed); }
@Override public void check(final String databaseName, final ReadwriteSplittingRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkDataSources(databaseName, ruleConfig.getDataSourceGroups(), dataSourceMap, builtRules); checkLoadBalancer(databaseName, ruleConfig); }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void assertCheckWeightLoadBalanceInvalidDataSourceName() { ReadwriteSplittingRuleConfiguration config = mock(ReadwriteSplittingRuleConfiguration.class); Collection<ReadwriteSplittingDataSourceGroupRuleConfiguration> configs = Collections.singleton(createDataSourceGroupRuleConfiguration("write_ds_0", Arrays.asList("read_ds_0", "read_ds_1"))); when(config.getDataSourceGroups()).thenReturn(configs); AlgorithmConfiguration algorithm = new AlgorithmConfiguration("WEIGHT", PropertiesBuilder.build(new Property("read_ds_2", "1"), new Property("read_ds_1", "2"))); when(config.getLoadBalancers()).thenReturn(Collections.singletonMap("weight_ds", algorithm)); RuleConfigurationChecker checker = OrderedSPILoader.getServicesByClass(RuleConfigurationChecker.class, Collections.singleton(config.getClass())).get(config.getClass()); assertThrows(AlgorithmInitializationException.class, () -> checker.check("test", config, mockDataSources(), Collections.emptyList())); }
public LinkedHashMap<String, String> getKeyPropertyList(ObjectName mbeanName) { LinkedHashMap<String, String> keyProperties = keyPropertiesPerBean.get(mbeanName); if (keyProperties == null) { keyProperties = new LinkedHashMap<>(); String properties = mbeanName.getKeyPropertyListString(); Matcher match = PROPERTY_PATTERN.matcher(properties); while (match.lookingAt()) { keyProperties.put(match.group(1), match.group(2)); properties = properties.substring(match.end()); if (properties.startsWith(",")) { properties = properties.substring(1); } match.reset(properties); } keyPropertiesPerBean.put(mbeanName, keyProperties); } return keyProperties; }
@Test public void testIssue52() throws Throwable { JmxMBeanPropertyCache testCache = new JmxMBeanPropertyCache(); LinkedHashMap<String, String> parameterList = testCache.getKeyPropertyList( new ObjectName( "org.apache.camel:context=ourinternalname,type=endpoints,name=\"seda://endpointName\\?concurrentConsumers=8&size=50000\"")); assertSameElementsAndOrder( parameterList, "context", "ourinternalname", "type", "endpoints", "name", "\"seda://endpointName\\?concurrentConsumers=8&size=50000\""); }
@Deprecated public static NetworkParameters getParametersFromAddress(String address) throws AddressFormatException { return NetworkParameters.fromAddress(AddressParser.getLegacy().parseAddress(address)); }
@Test @Deprecated // Test a deprecated method just to make sure we didn't break it public void getNetworkViaParameters() { NetworkParameters params = LegacyAddress.getParametersFromAddress("17kzeh4N8g49GFvdDzSf8PjaPfyoD1MndL"); assertEquals(MAINNET.id(), params.getId()); params = LegacyAddress.getParametersFromAddress("n4eA2nbYqErp7H6jebchxAN59DmNpksexv"); assertEquals(TESTNET.id(), params.getId()); }
IpcPublication getSharedIpcPublication(final long streamId) { return findSharedIpcPublication(ipcPublications, streamId); }
@Test void shouldBeAbleToAddAndRemoveSubscriptionToIpcPublication() { final long idAdd = driverProxy.addSubscription(CHANNEL_IPC, STREAM_ID_1); driverProxy.removeSubscription(idAdd); doWorkUntil(() -> nanoClock.nanoTime() >= CLIENT_LIVENESS_TIMEOUT_NS); final IpcPublication ipcPublication = driverConductor.getSharedIpcPublication(STREAM_ID_1); assertNull(ipcPublication); }
public Optional<Object> evaluate(final Map<String, Object> columnPairsMap, final String outputColumn, final String regexField) { return rows.stream() .map(row -> row.evaluate(columnPairsMap, outputColumn, regexField)) .filter(Optional::isPresent) .findFirst() .map(Optional::get); }
@Test void evaluateKeyFoundMultipleMatching() { KiePMMLInlineTable kiePMMLInlineTable = new KiePMMLInlineTable("name", Collections.emptyList(), ROWS); Map<String, Object> columnPairsMap = IntStream.range(0, 3).boxed() .collect(Collectors.toMap(i -> "KEY-1-" + i, i -> "VALUE-1-" + i)); Optional<Object> retrieved = kiePMMLInlineTable.evaluate(columnPairsMap, "KEY-1-2", null); assertThat(retrieved).isPresent(); assertThat(retrieved.get()).isEqualTo("VALUE-1-2"); }
@Override public Map<String, Metric> getMetrics() { final Map<String, Metric> gauges = new HashMap<>(); gauges.put("total.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit() + mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("total.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed() + mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("total.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax() == -1 ? -1 : mxBean.getHeapMemoryUsage().getMax() + mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("total.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted() + mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("heap.init", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getInit()); gauges.put("heap.used", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getUsed()); gauges.put("heap.max", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getMax()); gauges.put("heap.committed", (Gauge<Long>) () -> mxBean.getHeapMemoryUsage().getCommitted()); gauges.put("heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax()); } }); gauges.put("non-heap.init", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getInit()); gauges.put("non-heap.used", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getUsed()); gauges.put("non-heap.max", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getMax()); gauges.put("non-heap.committed", (Gauge<Long>) () -> mxBean.getNonHeapMemoryUsage().getCommitted()); gauges.put("non-heap.usage", new RatioGauge() { @Override protected Ratio getRatio() { final MemoryUsage usage = mxBean.getNonHeapMemoryUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); for (final MemoryPoolMXBean pool : memoryPools) { final String poolName = name("pools", WHITESPACE.matcher(pool.getName()).replaceAll("-")); gauges.put(name(poolName, "usage"), new RatioGauge() { @Override protected Ratio getRatio() { MemoryUsage usage = pool.getUsage(); return Ratio.of(usage.getUsed(), usage.getMax() == -1 ? usage.getCommitted() : usage.getMax()); } }); gauges.put(name(poolName, "max"), (Gauge<Long>) () -> pool.getUsage().getMax()); gauges.put(name(poolName, "used"), (Gauge<Long>) () -> pool.getUsage().getUsed()); gauges.put(name(poolName, "committed"), (Gauge<Long>) () -> pool.getUsage().getCommitted()); // Only register GC usage metrics if the memory pool supports usage statistics. if (pool.getCollectionUsage() != null) { gauges.put(name(poolName, "used-after-gc"), (Gauge<Long>) () -> pool.getCollectionUsage().getUsed()); } gauges.put(name(poolName, "init"), (Gauge<Long>) () -> pool.getUsage().getInit()); } return Collections.unmodifiableMap(gauges); }
@Test public void hasAGaugeForNonHeapMax() { final Gauge gauge = (Gauge) gauges.getMetrics().get("non-heap.max"); assertThat(gauge.getValue()) .isEqualTo(4L); }
public final long getJobSubmissionTime() { return jobSubmissionTime; }
@Test public final void testGetJobSubmissionTime() { Assert.assertNull(resourceSkyline); ReservationInterval riAdd = new ReservationInterval(0, 10); skylineList.addInterval(riAdd, resource1); riAdd = new ReservationInterval(10, 20); skylineList.addInterval(riAdd, resource1); resourceSkyline = new ResourceSkyline("1", 1024.5, 0, 20, resource1, skylineList); Assert.assertEquals(0, resourceSkyline.getJobSubmissionTime()); }
@Beta public static void asyncReadCompactedEntries(TopicCompactionService topicCompactionService, ManagedCursor cursor, int maxEntries, long bytesToRead, Position maxReadPosition, boolean readFromEarliest, AsyncCallbacks.ReadEntriesCallback callback, boolean wait, @Nullable Consumer consumer) { Objects.requireNonNull(topicCompactionService); Objects.requireNonNull(cursor); checkArgument(maxEntries > 0); Objects.requireNonNull(callback); final Position readPosition; if (readFromEarliest) { readPosition = PositionFactory.EARLIEST; } else { readPosition = cursor.getReadPosition(); } // TODO: redeliver epoch link https://github.com/apache/pulsar/issues/13690 PersistentDispatcherSingleActiveConsumer.ReadEntriesCtx readEntriesCtx = PersistentDispatcherSingleActiveConsumer.ReadEntriesCtx.create(consumer, DEFAULT_CONSUMER_EPOCH); CompletableFuture<Position> lastCompactedPositionFuture = topicCompactionService.getLastCompactedPosition(); lastCompactedPositionFuture.thenCompose(lastCompactedPosition -> { if (lastCompactedPosition == null || readPosition.compareTo( lastCompactedPosition.getLedgerId(), lastCompactedPosition.getEntryId()) > 0) { if (wait) { cursor.asyncReadEntriesOrWait(maxEntries, bytesToRead, callback, readEntriesCtx, maxReadPosition); } else { cursor.asyncReadEntries(maxEntries, bytesToRead, callback, readEntriesCtx, maxReadPosition); } return CompletableFuture.completedFuture(null); } ManagedCursorImpl managedCursor = (ManagedCursorImpl) cursor; int numberOfEntriesToRead = managedCursor.applyMaxSizeCap(maxEntries, bytesToRead); return topicCompactionService.readCompactedEntries(readPosition, numberOfEntriesToRead) .thenAccept(entries -> { if (CollectionUtils.isEmpty(entries)) { Position seekToPosition = lastCompactedPosition.getNext(); if (readPosition.compareTo(seekToPosition.getLedgerId(), seekToPosition.getEntryId()) > 0) { seekToPosition = readPosition; } cursor.seek(seekToPosition); callback.readEntriesComplete(Collections.emptyList(), readEntriesCtx); return; } long entriesSize = 0; for (Entry entry : entries) { entriesSize += entry.getLength(); } managedCursor.updateReadStats(entries.size(), entriesSize); Entry lastEntry = entries.get(entries.size() - 1); cursor.seek(lastEntry.getPosition().getNext(), true); callback.readEntriesComplete(entries, readEntriesCtx); }); }).exceptionally((exception) -> { exception = FutureUtil.unwrapCompletionException(exception); callback.readEntriesFailed(ManagedLedgerException.getManagedLedgerException(exception), readEntriesCtx); return null; }); }
@Test public void testReadCompactedEntriesWithEmptyEntries() throws ExecutionException, InterruptedException { Position lastCompactedPosition = PositionFactory.create(1, 100); TopicCompactionService service = Mockito.mock(TopicCompactionService.class); Mockito.doReturn(CompletableFuture.completedFuture(Collections.emptyList())) .when(service).readCompactedEntries(Mockito.any(), Mockito.intThat(argument -> argument > 0)); Mockito.doReturn(CompletableFuture.completedFuture(lastCompactedPosition)).when(service) .getLastCompactedPosition(); Position initPosition = PositionFactory.create(1, 90); AtomicReference<Position> readPositionRef = new AtomicReference<>(initPosition.getNext()); ManagedCursorImpl cursor = Mockito.mock(ManagedCursorImpl.class); Mockito.doReturn(readPositionRef.get()).when(cursor).getReadPosition(); Mockito.doReturn(1).when(cursor).applyMaxSizeCap(Mockito.anyInt(), Mockito.anyLong()); Mockito.doAnswer(invocation -> { readPositionRef.set(invocation.getArgument(0)); return null; }).when(cursor).seek(Mockito.any()); CompletableFuture<List<Entry>> completableFuture = new CompletableFuture<>(); final AtomicReference<Throwable> throwableRef = new AtomicReference<>(); AsyncCallbacks.ReadEntriesCallback readEntriesCallback = new AsyncCallbacks.ReadEntriesCallback() { @Override public void readEntriesComplete(List<Entry> entries, Object ctx) { completableFuture.complete(entries); } @Override public void readEntriesFailed(ManagedLedgerException exception, Object ctx) { completableFuture.completeExceptionally(exception); throwableRef.set(exception); } }; CompactedTopicUtils.asyncReadCompactedEntries(service, cursor, 1, 100, PositionFactory.LATEST, false, readEntriesCallback, false, null); List<Entry> entries = completableFuture.get(); Assert.assertTrue(entries.isEmpty()); Assert.assertNull(throwableRef.get()); Assert.assertEquals(readPositionRef.get(), lastCompactedPosition.getNext()); }
public static Metric metric(String name) { return MetricsImpl.metric(name, Unit.COUNT); }
@Test public void when_metricsDisabled_then_unavailableDuringJobExecution() { int generatedItems = 1000; pipeline.readFrom(TestSources.itemStream(1_000)) .withIngestionTimestamps() .filter(l -> l.sequence() < generatedItems) .map(t -> { Metrics.metric("total").increment(); return t; }) .writeTo(Sinks.list("sink")); Job job = instance.getJet().newJob(pipeline, new JobConfig().setMetricsEnabled(false)); List<Object> list = instance.getList("sink"); assertTrueEventually(() -> assertFalse(list.isEmpty())); assertTrue(job.getMetrics().get("total").isEmpty()); }