focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@GetMapping public String getHealth() { // TODO UP DOWN WARN StringBuilder sb = new StringBuilder(); String dbStatus = dataSourceService.getHealth(); boolean addressServerHealthy = isAddressServerHealthy(); if (dbStatus.contains(HEALTH_UP) && addressServerHealthy && ServerMemberManager.isInIpList()) { sb.append(HEALTH_UP); } else if (dbStatus.contains(HEALTH_WARN) && addressServerHealthy && ServerMemberManager.isInIpList()) { sb.append("WARN:"); sb.append("slave db (").append(dbStatus.split(":")[1]).append(") down. "); } else { sb.append("DOWN:"); if (dbStatus.contains(HEALTH_DOWN)) { sb.append("master db (").append(dbStatus.split(":")[1]).append(") down. "); } if (!addressServerHealthy) { sb.append("address server down. "); } if (!ServerMemberManager.isInIpList()) { sb.append("server ip ").append(InetUtils.getSelfIP()) .append(" is not in the serverList of address server. "); } } return sb.toString(); }
@Test void testGetHealthWhenTheLoopUpInfoIsDown() throws Exception { when(dataSourceService.getHealth()).thenReturn("UP"); when(memberManager.getLookup()).thenReturn(memberLookup); when(memberLookup.useAddressServer()).thenReturn(true); final HashMap<String, Object> info = new HashMap<>(); info.put("addressServerHealth", "false"); when(memberLookup.info()).thenReturn(info); MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(Constants.HEALTH_CONTROLLER_PATH); String actualValue = mockmvc.perform(builder).andReturn().getResponse().getContentAsString(); assertEquals("DOWN:address server down. ", actualValue); }
public abstract void setPropertyValue(Object instance, String pn, Object pv) throws NoSuchPropertyException, IllegalArgumentException;
@Test void testSetPropertyValue() throws Exception { Assertions.assertThrows(NoSuchPropertyException.class, () -> { Wrapper w = Wrapper.getWrapper(Object.class); w.setPropertyValue(null, null, null); }); }
public StateChangeExecutor() { this("stateChangeExecutor"); }
@Test public void testStateChangeExecutor() { // INIT -> LEADER runOne("StateChangeExecutor_initTOleader", FrontendNodeType.INIT, FrontendNodeType.LEADER); // INIT -> FOLLOWER runOne("StateChangeExecutor_initTOfollower", FrontendNodeType.INIT, FrontendNodeType.FOLLOWER); // UNKNOWN -> LEADER runOne("StateChangeExecutor_unknownTOleader", FrontendNodeType.UNKNOWN, FrontendNodeType.LEADER); // UNKNOWN -> FOLLOWER runOne("StateChangeExecutor_unknownTOfollower", FrontendNodeType.UNKNOWN, FrontendNodeType.FOLLOWER); // FOLLOWER -> LEADER runOne("StateChangeExecutor_followerTOleader", FrontendNodeType.FOLLOWER, FrontendNodeType.LEADER); // FOLLOWER -> UNKNOWN runOne("StateChangeExecutor_followerTOunknown", FrontendNodeType.FOLLOWER, FrontendNodeType.UNKNOWN); // OBSERVER -> UNKNOWN runOne("StateChangeExecutor_observerTOunknown", FrontendNodeType.OBSERVER, FrontendNodeType.UNKNOWN); }
public final <KIn, VIn> void addGlobalStore(final StoreFactory storeFactory, final String sourceName, final TimestampExtractor timestampExtractor, final Deserializer<KIn> keyDeserializer, final Deserializer<VIn> valueDeserializer, final String topic, final String processorName, final ProcessorSupplier<KIn, VIn, Void, Void> stateUpdateSupplier, final boolean reprocessOnRestore) { Objects.requireNonNull(storeFactory, "store builder must not be null"); ApiUtils.checkSupplier(stateUpdateSupplier); validateGlobalStoreArguments(sourceName, topic, processorName, stateUpdateSupplier, storeFactory.name(), storeFactory.loggingEnabled()); validateTopicNotAlreadyRegistered(topic); final String[] topics = {topic}; final String[] predecessors = {sourceName}; final ProcessorNodeFactory<KIn, VIn, Void, Void> nodeFactory = new ProcessorNodeFactory<>( processorName, predecessors, stateUpdateSupplier ); globalTopics.add(topic); nodeFactories.put(sourceName, new SourceNodeFactory<>( sourceName, topics, null, timestampExtractor, keyDeserializer, valueDeserializer) ); storeNameToReprocessOnRestore.put(storeFactory.name(), reprocessOnRestore ? Optional.of(new ReprocessFactory<>(stateUpdateSupplier, keyDeserializer, valueDeserializer)) : Optional.empty()); nodeToSourceTopics.put(sourceName, Arrays.asList(topics)); nodeGrouper.add(sourceName); nodeFactory.addStateStore(storeFactory.name()); nodeFactories.put(processorName, nodeFactory); nodeGrouper.add(processorName); nodeGrouper.unite(processorName, predecessors); globalStateBuilders.put(storeFactory.name(), storeFactory); connectSourceStoreAndTopic(storeFactory.name(), topic); nodeGroups = null; }
@Test public void testAddGlobalStoreWithBadSupplier() { final org.apache.kafka.streams.processor.api.Processor<?, ?, Void, Void> processor = new MockApiProcessorSupplier<Object, Object, Void, Void>().get(); final IllegalArgumentException exception = assertThrows( IllegalArgumentException.class, () -> builder.addGlobalStore( new MockKeyValueStoreBuilder("global-store", false).asFactory().withLoggingDisabled(), "globalSource", null, null, null, "globalTopic", "global-processor", () -> processor, false ) ); assertThat(exception.getMessage(), containsString("#get() must return a new object each time it is called.")); }
public static void setProtectedFieldValue(String protectedField, Object object, Object newValue) { try { // acgegi would silently fail to write to final fields // FieldUtils.writeField(Object, field, true) only sets accessible on *non* public fields // and then fails with IllegalAccessException (even if you make the field accessible in the interim! // for backwards compatability we need to use a few steps Field field = org.apache.commons.lang.reflect.FieldUtils.getField(object.getClass(), protectedField, true); field.setAccessible(true); field.set(object, newValue); } catch (Exception x) { throw new RuntimeException(x); } }
@Test public void setNonExistingField_Should_Fail() { InnerClassWithProtectedField sut = new InnerClassWithProtectedField(); assertThrows(Exception.class, () -> FieldUtils.setProtectedFieldValue("bogus", sut, "whatever")); }
public synchronized boolean updateTo(long requestedVersion) throws Throwable { return updateTo(new HollowConsumer.VersionInfo(requestedVersion)); }
@Test public void testUpdateTo_updateToArbitraryVersionButNoVersionsRetrieved_throwsException() throws Throwable { long v = Long.MAX_VALUE - 1; expectedException.expect(IllegalArgumentException.class); expectedException.expectMessage(String.format("Could not create an update plan for version %s, because that " + "version or any qualifying previous versions could not be retrieved.", v)); subject.updateTo(v); }
public synchronized void updateMetricsLabels(Map<String, String> labels) { telemetryProvider.updateLabels(labels); }
@Test public void testUpdateMetricsLabels() { clientTelemetryReporter.configure(configs); clientTelemetryReporter.contextChange(metricsContext); assertTrue(clientTelemetryReporter.telemetryProvider().resource().getAttributesList().isEmpty()); clientTelemetryReporter.updateMetricsLabels(Collections.singletonMap("key1", "value1")); assertEquals(1, clientTelemetryReporter.telemetryProvider().resource().getAttributesList().size()); assertEquals("key1", clientTelemetryReporter.telemetryProvider().resource().getAttributesList().get(0).getKey()); assertEquals("value1", clientTelemetryReporter.telemetryProvider().resource().getAttributesList().get(0).getValue().getStringValue()); clientTelemetryReporter.updateMetricsLabels(Collections.singletonMap("key2", "value2")); assertEquals(2, clientTelemetryReporter.telemetryProvider().resource().getAttributesList().size()); clientTelemetryReporter.telemetryProvider().resource().getAttributesList().forEach(attribute -> { if (attribute.getKey().equals("key1")) { assertEquals("value1", attribute.getValue().getStringValue()); } else { assertEquals("key2", attribute.getKey()); assertEquals("value2", attribute.getValue().getStringValue()); } }); clientTelemetryReporter.updateMetricsLabels(Collections.singletonMap("key2", "valueUpdated")); assertEquals(2, clientTelemetryReporter.telemetryProvider().resource().getAttributesList().size()); clientTelemetryReporter.telemetryProvider().resource().getAttributesList().forEach(attribute -> { if (attribute.getKey().equals("key1")) { assertEquals("value1", attribute.getValue().getStringValue()); } else { assertEquals("key2", attribute.getKey()); assertEquals("valueUpdated", attribute.getValue().getStringValue()); } }); }
public String convertToSQL92(List<String> tokens) { StringBuilder selector = new StringBuilder(); boolean isFirstToken = true; for (Iterator i = tokens.iterator(); i.hasNext(); ) { String token = i.next().toString(); if (token.matches("^[^=]*='.*[\\*\\?].*'$")) { token = token.replace('?', '_') .replace('*', '%') .replaceFirst("=", " LIKE "); } if (isFirstToken) { isFirstToken = false; } else { selector.append(" AND "); } selector.append('(') .append(token) .append(')'); } return selector.toString(); }
@Test public void testConvertToSQL92() { System.out.print("testTokens = " + datum); System.out.println(" output = " + expected); PurgeCommand pc = new PurgeCommand(); Assert.assertEquals(expected, pc.convertToSQL92(datum)); }
static String segmentFileName(final long recordingId, final long segmentBasePosition) { return recordingId + "-" + segmentBasePosition + Configuration.RECORDING_SEGMENT_SUFFIX; }
@Test void shouldGenerateRecordingName() { final long recordingId = 1L; final long segmentPosition = 2 * 64 * 1024; final String expected = "1-" + (2 * 64 * 1024) + ".rec"; final String actual = Archive.segmentFileName(recordingId, segmentPosition); assertEquals(expected, actual); }
@Override public CheckResult runCheck() { String filter = buildQueryFilter(stream.getId(), query); String query = field + ":\"" + value + "\""; Integer backlogSize = getBacklog(); boolean backlogEnabled = false; int searchLimit = 1; if(backlogSize != null && backlogSize > 0) { backlogEnabled = true; searchLimit = backlogSize; } try { SearchResult result = searches.search( query, filter, RelativeRange.create(configuration.getAlertCheckInterval()), searchLimit, 0, new Sorting(Message.FIELD_TIMESTAMP, Sorting.Direction.DESC) ); final List<MessageSummary> summaries; if (backlogEnabled) { summaries = Lists.newArrayListWithCapacity(result.getResults().size()); for (ResultMessage resultMessage : result.getResults()) { final Message msg = resultMessage.getMessage(); summaries.add(new MessageSummary(resultMessage.getIndex(), msg)); } } else { summaries = Collections.emptyList(); } final long count = result.getTotalResults(); final String resultDescription = "Stream received messages matching <" + query + "> " + "(Current grace time: " + grace + " minutes)"; if (count > 0) { LOG.debug("Alert check <{}> found [{}] messages.", id, count); return new CheckResult(true, this, resultDescription, Tools.nowUTC(), summaries); } else { LOG.debug("Alert check <{}> returned no results.", id); return new NegativeCheckResult(); } } catch (InvalidRangeParametersException e) { // cannot happen lol LOG.error("Invalid timerange.", e); return null; } }
@Test public void testCorrectUsageOfRelativeRange() throws Exception { final Stream stream = mock(Stream.class); final Searches searches = mock(Searches.class); final Configuration configuration = mock(Configuration.class); final SearchResult searchResult = mock(SearchResult.class); final int alertCheckInterval = 42; final RelativeRange relativeRange = RelativeRange.create(alertCheckInterval); when(stream.getId()).thenReturn("stream-id"); when(configuration.getAlertCheckInterval()).thenReturn(alertCheckInterval); when(searches.search(anyString(), anyString(), eq(relativeRange), anyInt(), anyInt(), any(Sorting.class))).thenReturn(searchResult); final FieldContentValueAlertCondition alertCondition = new FieldContentValueAlertCondition(searches, configuration, stream, null, DateTime.now(DateTimeZone.UTC), "mockuser", ImmutableMap.<String,Object>of("field", "test", "value", "test"), "Field Content Value Test COndition"); final AbstractAlertCondition.CheckResult result = alertCondition.runCheck(); }
@GetMapping("/state") public ResponseEntity<Map<String, String>> serverState() { Map<String, String> serverState = new HashMap<>(4); for (ModuleState each : ModuleStateHolder.getInstance().getAllModuleStates()) { each.getStates().forEach((s, o) -> serverState.put(s, null == o ? null : o.toString())); } return ResponseEntity.ok().body(serverState); }
@Test void serverState() throws Exception { MockHttpServletRequestBuilder builder = MockMvcRequestBuilders.get(CONSOLE_URL); MockHttpServletResponse response = mockmvc.perform(builder).andReturn().getResponse(); assertEquals(200, response.getStatus()); ObjectNode responseContent = JacksonUtils.toObj(response.getContentAsByteArray(), ObjectNode.class); assertEquals(EnvUtil.STANDALONE_MODE_CLUSTER, responseContent.get(Constants.STARTUP_MODE_STATE).asText()); assertEquals("null", responseContent.get(Constants.FUNCTION_MODE_STATE).asText()); assertEquals(VersionUtils.version, responseContent.get(Constants.NACOS_VERSION).asText()); }
@Override @JsonIgnore public JobTriggerData toJobTriggerData(EventDto dto) { return EventNotificationExecutionJob.Data.builder().eventDto(dto).build(); }
@Test public void toJobTriggerData() { final DateTime now = DateTime.parse("2019-01-01T00:00:00.000Z"); final ImmutableList<String> keyTuple = ImmutableList.of("a", "b"); final EventDto eventDto = EventDto.builder() .id("01DF119QKMPCR5VWBXS8783799") .eventDefinitionType("aggregation-v1") .eventDefinitionId("54e3deadbeefdeadbeefaffe") .originContext("urn:graylog:message:es:graylog_0:199a616d-4d48-4155-b4fc-339b1c3129b2") .eventTimestamp(now) .processingTimestamp(now) .streams(ImmutableSet.of("000000000000000000000002")) .sourceStreams(ImmutableSet.of("000000000000000000000001")) .message("Test message") .source("source") .keyTuple(keyTuple) .key(String.join("|", keyTuple)) .priority(4) .alert(false) .fields(ImmutableMap.of("hello", "world")) .build(); final TeamsEventNotificationConfig teamsEventNotificationConfig = TeamsEventNotificationConfig.builder().build(); EventNotificationExecutionJob.Data data = (EventNotificationExecutionJob.Data) teamsEventNotificationConfig.toJobTriggerData(eventDto); assertEquals(data.type(), EventNotificationExecutionJob.TYPE_NAME); assertEquals(data.eventDto().processingTimestamp(), now); }
public Object getCell(final int columnIndex) { Preconditions.checkArgument(columnIndex > 0 && columnIndex < data.length + 1); return data[columnIndex - 1]; }
@Test void assertGetCellWithColumnIndexOutOfRange() { assertThrows(IllegalArgumentException.class, () -> memoryResultSetRow.getCell(2)); }
@DeleteMapping("/batchDelete") public ShenyuAdminResult deleteTags(@RequestBody @NotEmpty final List<@NotBlank String> ids) { Integer deleteCount = tagService.delete(ids); return ShenyuAdminResult.success(ShenyuResultMessage.DELETE_SUCCESS, deleteCount); }
@Test public void testDeleteTags() throws Exception { List<String> ids = new ArrayList<>(); ids.add("123"); ids.add("456"); given(tagService.delete(ids)).willReturn(ids.size()); this.mockMvc.perform(MockMvcRequestBuilders.delete("/tag/batchDelete") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(ids))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.DELETE_SUCCESS))) .andExpect(jsonPath("$.data", is(2))) .andReturn(); }
public MaxHitRateTracker(int timeRangeInSeconds) { this(timeRangeInSeconds, timeRangeInSeconds * MAX_TIME_RANGE_FACTOR); }
@Test public void testMaxHitRateTracker() { int timeInSec = 60; MaxHitRateTracker hitCounter = new MaxHitRateTracker(timeInSec); long currentTimestamp = System.currentTimeMillis(); for (int i = 0; i < timeInSec; i++) { for (int j = 0; j < 5; j++) { hitCounter.hit(currentTimestamp + i * 1000); } } long latestTimeStamp = currentTimestamp + (timeInSec - 1) * 1000; Assert.assertNotNull(hitCounter); Assert.assertEquals(5, hitCounter.getMaxCountPerBucket(latestTimeStamp)); Assert.assertEquals(5 * 60, hitCounter.getHitCount(latestTimeStamp)); // 2 seconds have passed, the hit counter should return 5 as well since the count in the last bucket could increase. latestTimeStamp = latestTimeStamp + 2000L; Assert.assertEquals(5, hitCounter.getMaxCountPerBucket(latestTimeStamp)); Assert.assertEquals(5 * (60 - 2), hitCounter.getHitCount(latestTimeStamp)); // This time it should return 0 as the internal lastAccessTimestamp has already been updated and there is no more // hits between the gap. latestTimeStamp = latestTimeStamp + 2000L; Assert.assertEquals(0, hitCounter.getMaxCountPerBucket(latestTimeStamp)); Assert.assertEquals(5 * (60 - 4), hitCounter.getHitCount(latestTimeStamp)); // Increment the hit in this second and we should see the result becomes 1. hitCounter.hit(latestTimeStamp); latestTimeStamp = latestTimeStamp + 2000L; Assert.assertEquals(1, hitCounter.getMaxCountPerBucket(latestTimeStamp)); Assert.assertEquals(5 * (60 - 6) + 1, hitCounter.getHitCount(latestTimeStamp)); // More than a time range period has passed and the hit counter should return 0 as there is no hits. hitCounter.hit(latestTimeStamp); latestTimeStamp = latestTimeStamp + timeInSec * 2 * 1000L + 2000L; Assert.assertEquals(0, hitCounter.getMaxCountPerBucket(latestTimeStamp)); Assert.assertEquals(0, hitCounter.getHitCount(latestTimeStamp)); }
public boolean hasAuthority() { return !(mUri.getAuthority() instanceof NoAuthority); }
@Test public void hasAuthorityTests() { assertFalse(new AlluxioURI(".").hasAuthority()); assertFalse(new AlluxioURI("/").hasAuthority()); assertFalse(new AlluxioURI("file:/").hasAuthority()); assertFalse(new AlluxioURI("file:///test").hasAuthority()); assertTrue(new AlluxioURI("file://localhost/").hasAuthority()); assertTrue(new AlluxioURI("file://localhost:8080/").hasAuthority()); assertTrue(new AlluxioURI(null, Authority.fromString("localhost:8080"), "/").hasAuthority()); assertTrue(new AlluxioURI(null, Authority.fromString("localhost"), "/").hasAuthority()); }
public NetworkClient.InFlightRequest completeNext(String node) { NetworkClient.InFlightRequest inFlightRequest = requestQueue(node).pollLast(); inFlightRequestCount.decrementAndGet(); return inFlightRequest; }
@Test public void testCompleteNext() { int correlationId1 = addRequest(dest); int correlationId2 = addRequest(dest); assertEquals(2, inFlightRequests.count()); assertEquals(correlationId1, inFlightRequests.completeNext(dest).header.correlationId()); assertEquals(1, inFlightRequests.count()); assertEquals(correlationId2, inFlightRequests.completeNext(dest).header.correlationId()); assertEquals(0, inFlightRequests.count()); }
AcknowledgeType acknowledgeType() { if (options.has(rejectOpt)) { return AcknowledgeType.REJECT; } else if (options.has(releaseOpt)) { return AcknowledgeType.RELEASE; } else { return AcknowledgeType.ACCEPT; } }
@Test public void testRejectOption() throws IOException { String[] args = new String[]{ "--bootstrap-server", "localhost:9092", "--topic", "test", "--reject" }; ConsoleShareConsumerOptions config = new ConsoleShareConsumerOptions(args); assertEquals(AcknowledgeType.REJECT, config.acknowledgeType()); }
static short toFloat16(float f) { int bits = Float.floatToRawIntBits(f); int s = (bits >>> FP32_SIGN_SHIFT); int e = (bits >>> FP32_EXPONENT_SHIFT) & FP32_SHIFTED_EXPONENT_MASK; int m = (bits) & FP32_SIGNIFICAND_MASK; int outE = 0; int outM = 0; if (e == 0xff) { // Infinite or NaN outE = 0x1f; outM = m != 0 ? 0x200 : 0; } else { e = e - FP32_EXPONENT_BIAS + EXPONENT_BIAS; if (e >= 0x1f) { // Overflow outE = 0x1f; } else if (e <= 0) { // Underflow if (e < -10) { // The absolute fp32 value is less than MIN_VALUE, flush to +/-0 } else { // The fp32 value is a normalized float less than MIN_NORMAL, // we convert to a denorm fp16 m = m | 0x800000; int shift = 14 - e; outM = m >> shift; int lowm = m & ((1 << shift) - 1); int hway = 1 << (shift - 1); // if above halfway or exactly halfway and outM is odd if (lowm + (outM & 1) > hway) { // Round to nearest even // Can overflow into exponent bit, which surprisingly is OK. // This increment relies on the +outM in the return statement below outM++; } } } else { outE = e; outM = m >> 13; // if above halfway or exactly halfway and outM is odd if ((m & 0x1fff) + (outM & 0x1) > 0x1000) { // Round to nearest even // Can overflow into exponent bit, which surprisingly is OK. // This increment relies on the +outM in the return statement below outM++; } } } // The outM is added here as the +1 increments for outM above can // cause an overflow in the exponent bit which is OK. return (short) ((s << SIGN_SHIFT) | (outE << EXPONENT_SHIFT) + outM); }
@Test public void testFloatToFloat16() { // Zeroes, NaN and infinities assertEquals(POSITIVE_ZERO, Float16.toFloat16(0.0f)); assertEquals(NEGATIVE_ZERO, Float16.toFloat16(-0.0f)); assertEquals(NaN, Float16.toFloat16(Float.NaN)); assertEquals(POSITIVE_INFINITY, Float16.toFloat16(Float.POSITIVE_INFINITY)); assertEquals(NEGATIVE_INFINITY, Float16.toFloat16(Float.NEGATIVE_INFINITY)); // Known values assertEquals((short) 0x3c01, Float16.toFloat16(1.0009765625f)); assertEquals((short) 0xc000, Float16.toFloat16(-2.0f)); assertEquals((short) 0x0400, Float16.toFloat16(6.10352e-5f)); assertEquals((short) 0x7bff, Float16.toFloat16(65504.0f)); assertEquals((short) 0x3555, Float16.toFloat16(1.0f / 3.0f)); // Subnormals assertEquals((short) 0x03ff, Float16.toFloat16(6.09756e-5f)); assertEquals(MIN_VALUE, Float16.toFloat16(5.96046e-8f)); assertEquals((short) 0x83ff, Float16.toFloat16(-6.09756e-5f)); assertEquals((short) 0x8001, Float16.toFloat16(-5.96046e-8f)); // Subnormals (flushed to +/-0) assertEquals(POSITIVE_ZERO, Float16.toFloat16(5.96046e-9f)); assertEquals(NEGATIVE_ZERO, Float16.toFloat16(-5.96046e-9f)); // Test for values that overflow the mantissa bits into exp bits assertEquals((short) 0x1000, Float16.toFloat16(Float.intBitsToFloat(0x39fff000))); assertEquals((short) 0x0400, Float16.toFloat16(Float.intBitsToFloat(0x387fe000))); // Floats with absolute value above +/-65519 are rounded to +/-inf // when using round-to-even assertEquals((short) 0x7bff, Float16.toFloat16(65519.0f)); assertEquals((short) 0x7bff, Float16.toFloat16(65519.9f)); assertEquals(POSITIVE_INFINITY, Float16.toFloat16(65520.0f)); assertEquals(NEGATIVE_INFINITY, Float16.toFloat16(-65520.0f)); // Check if numbers are rounded to nearest even when they // cannot be accurately represented by Half assertEquals((short) 0x6800, Float16.toFloat16(2049.0f)); assertEquals((short) 0x6c00, Float16.toFloat16(4098.0f)); assertEquals((short) 0x7000, Float16.toFloat16(8196.0f)); assertEquals((short) 0x7400, Float16.toFloat16(16392.0f)); assertEquals((short) 0x7800, Float16.toFloat16(32784.0f)); // Miscellaneous values. In general, they're chosen to test the sign/exponent and // exponent/mantissa boundaries assertEquals((short) 0x101c, Float16.toFloat16(+0.00050163269043f)); assertEquals((short) 0x901c, Float16.toFloat16(-0.00050163269043f)); assertEquals((short) 0x101d, Float16.toFloat16(+0.000502109527588f)); assertEquals((short) 0x901d, Float16.toFloat16(-0.000502109527588f)); assertEquals((short) 0x121c, Float16.toFloat16(+0.00074577331543f)); assertEquals((short) 0x921c, Float16.toFloat16(-0.00074577331543f)); assertEquals((short) 0x141c, Float16.toFloat16(+0.00100326538086f)); assertEquals((short) 0x941c, Float16.toFloat16(-0.00100326538086f)); assertEquals((short) 0x501c, Float16.toFloat16(+32.875f)); assertEquals((short) 0xd01c, Float16.toFloat16(-32.875f)); // A few subnormals for good measure assertEquals((short) 0x001c, Float16.toFloat16(+1.66893005371e-06f)); assertEquals((short) 0x801c, Float16.toFloat16(-1.66893005371e-06f)); assertEquals((short) 0x021c, Float16.toFloat16(+3.21865081787e-05f)); assertEquals((short) 0x821c, Float16.toFloat16(-3.21865081787e-05f)); }
public static File getWebRoot() { final String classPath = ClassUtil.getClassPath(); if (StrUtil.isNotBlank(classPath)) { return getParent(file(classPath), 2); } return null; }
@Test public void getWebRootTest() { final File webRoot = FileUtil.getWebRoot(); assertNotNull(webRoot); assertEquals("hutool-core", webRoot.getName()); }
@Override public ResultSet getCatalogs() { return null; }
@Test void assertGetCatalogs() { assertNull(metaData.getCatalogs()); }
public LogicalSchema resolve(final ExecutionStep<?> step, final LogicalSchema schema) { return Optional.ofNullable(HANDLERS.get(step.getClass())) .map(h -> h.handle(this, schema, step)) .orElseThrow(() -> new IllegalStateException("Unhandled step class: " + step.getClass())); }
@Test public void shouldResolveSchemaForTableSource() { // Given: final TableSource step = new TableSource( PROPERTIES, "foo", formats, Optional.empty(), SCHEMA, SystemColumns.CURRENT_PSEUDOCOLUMN_VERSION_NUMBER, formats ); // When: final LogicalSchema result = resolver.resolve(step, SCHEMA); // Then: assertThat(result, is(SCHEMA.withPseudoAndKeyColsInValue(false))); }
public synchronized void addHistory(TaskRunStatus status) { historyTaskRunMap.put(status.getQueryId(), status); taskName2Status.put(status.getTaskName(), status); }
@Test public void testCRUD(@Mocked RepoExecutor repo) { TaskRunStatus status = new TaskRunStatus(); status.setQueryId("aaa"); status.setTaskName("t1"); status.setState(Constants.TaskRunState.SUCCESS); new MockUp<TableKeeper>() { @Mock public boolean isReady() { return true; } }; String jsonString = StringEscapeUtils.escapeJava(GsonUtils.GSON.toJson(status)); new Expectations() { { repo.executeDML(String.format("INSERT INTO _statistics_.task_run_history (task_id, task_run_id, task_name, " + "task_state, create_time, finish_time, expire_time, history_content_json) " + "VALUES(0, 'aaa', 't1', 'SUCCESS', '1970-01-01 08:00:00', " + "'1970-01-01 08:00:00', '1970-01-01 08:00:00', " + "'%s')", jsonString)); } }; TaskRunHistoryTable history = new TaskRunHistoryTable(); history.addHistory(status); // lookup by params TGetTasksParams params = new TGetTasksParams(); new Expectations() { { repo.executeDQL("SELECT history_content_json FROM _statistics_.task_run_history WHERE TRUE AND " + "get_json_string(history_content_json, 'dbName') = 'default_cluster:d1'"); } }; params.setDb("d1"); history.lookup(params); new Expectations() { { repo.executeDQL("SELECT history_content_json FROM _statistics_.task_run_history WHERE TRUE AND " + "task_state = 'SUCCESS'"); } }; params.setDb(null); params.setState("SUCCESS"); history.lookup(params); new Expectations() { { repo.executeDQL("SELECT history_content_json FROM _statistics_.task_run_history WHERE TRUE AND " + "task_name = 't1'"); } }; params.setDb(null); params.setState(null); params.setTask_name("t1"); history.lookup(params); new Expectations() { { repo.executeDQL("SELECT history_content_json FROM _statistics_.task_run_history WHERE TRUE AND " + "task_run_id = 'q1'"); } }; params.setDb(null); params.setState(null); params.setTask_name(null); params.setQuery_id("q1"); history.lookup(params); // lookup by task names String dbName = ""; Set<String> taskNames = Set.of("t1", "t2"); new Expectations() { { repo.executeDQL("SELECT history_content_json FROM _statistics_.task_run_history WHERE TRUE AND " + "task_name IN ('t1','t2')"); } }; history.lookupByTaskNames(dbName, taskNames); }
static Reference<File> createBlobStorageDirectory( Configuration configuration, @Nullable Reference<File> fallbackStorageDirectory) throws IOException { final String basePath = configuration.get(BlobServerOptions.STORAGE_DIRECTORY); File baseDir = null; if (StringUtils.isNullOrWhitespaceOnly(basePath)) { if (fallbackStorageDirectory != null) { baseDir = fallbackStorageDirectory.deref(); if (baseDir.mkdirs() || baseDir.exists()) { return fallbackStorageDirectory; } } } else { baseDir = new File(basePath); File storageDir; // NOTE: although we will be using UUIDs, there may be collisions int maxAttempts = 10; for (int attempt = 0; attempt < maxAttempts; attempt++) { storageDir = new File(baseDir, String.format("blobStore-%s", UUID.randomUUID())); // Create the storage dir if it doesn't exist. Only return it when the operation was // successful. if (storageDir.mkdirs()) { return Reference.owned(storageDir); } } } if (baseDir != null) { throw new IOException( "Could not create storage directory for BLOB store in '" + baseDir + "'."); } else { throw new IOException( String.format( "Could not create storage directory for BLOB store because no storage directory has " + "been specified under %s and no fallback storage directory provided.", BlobServerOptions.STORAGE_DIRECTORY.key())); } }
@Test void testTaskManagerFallbackBlobStorageDirectory1() throws IOException { Configuration config = new Configuration(); final File fallbackDirectory = TempDirUtils.newFile(tempDir, "foobar"); File dir = BlobUtils.createBlobStorageDirectory(config, Reference.borrowed(fallbackDirectory)) .deref(); assertThat(dir).isEqualTo(fallbackDirectory); }
public Set<String> keySet() { return members.keySet(); }
@Test public void testKeySet() { JsonObject a = new JsonObject(); assertThat(a.keySet()).hasSize(0); a.add("foo", new JsonArray()); a.add("bar", new JsonObject()); assertThat(a.size()).isEqualTo(2); assertThat(a.keySet()).hasSize(2); assertThat(a.keySet()).containsExactly("foo", "bar").inOrder(); a.addProperty("1", true); a.addProperty("2", false); // Insertion order should be preserved by keySet() Deque<String> expectedKeys = new ArrayDeque<>(Arrays.asList("foo", "bar", "1", "2")); // Note: Must wrap in ArrayList because Deque implementations do not implement `equals` assertThat(new ArrayList<>(a.keySet())).isEqualTo(new ArrayList<>(expectedKeys)); Iterator<String> iterator = a.keySet().iterator(); // Remove keys one by one for (int i = a.size(); i >= 1; i--) { assertThat(iterator.hasNext()).isTrue(); assertThat(iterator.next()).isEqualTo(expectedKeys.getFirst()); iterator.remove(); expectedKeys.removeFirst(); assertThat(a.size()).isEqualTo(i - 1); assertThat(new ArrayList<>(a.keySet())).isEqualTo(new ArrayList<>(expectedKeys)); } }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { try { return this.toAttributes(session.toPath(file)); } catch(IOException e) { throw new LocalExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testConvert() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); if(session.isPosixFilesystem()) { session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path file = new Path(new LocalHomeFinderFeature().find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); new LocalTouchFeature(session).touch(file, new TransferStatus()); final java.nio.file.Path local = session.toPath(file); final PosixFileAttributes posixAttributes = Files.readAttributes(local, PosixFileAttributes.class); final LocalAttributesFinderFeature finder = new LocalAttributesFinderFeature(session); assertEquals(PosixFilePermissions.toString(posixAttributes.permissions()), finder.find(file).getPermission().getSymbol()); Files.setPosixFilePermissions(local, PosixFilePermissions.fromString("rw-------")); assertEquals("rw-------", finder.find(file).getPermission().getSymbol()); Files.setPosixFilePermissions(local, PosixFilePermissions.fromString("rwxrwxrwx")); assertEquals("rwxrwxrwx", finder.find(file).getPermission().getSymbol()); Files.setPosixFilePermissions(local, PosixFilePermissions.fromString("rw-rw----")); assertEquals("rw-rw----", finder.find(file).getPermission().getSymbol()); assertEquals(posixAttributes.size(), finder.find(file).getSize()); assertEquals(posixAttributes.lastModifiedTime().toMillis(), finder.find(file).getModificationDate()); assertEquals(posixAttributes.creationTime().toMillis(), finder.find(file).getCreationDate()); assertEquals(posixAttributes.lastAccessTime().toMillis(), finder.find(file).getAccessedDate()); new LocalDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); } }
@Override public boolean isEmpty() { if (rangeBitSetMap.isEmpty()) { return true; } for (BitSet rangeBitSet : rangeBitSetMap.values()) { if (!rangeBitSet.isEmpty()) { return false; } } return true; }
@Test public void testIsEmpty() { OpenLongPairRangeSet<LongPair> set = new OpenLongPairRangeSet<>(consumer); assertTrue(set.isEmpty()); // lowerValueOpen and upperValue are both -1 so that an empty set will be added set.addOpenClosed(0, -1, 0, -1); assertTrue(set.isEmpty()); set.addOpenClosed(1, 1, 1, 5); assertFalse(set.isEmpty()); }
@CombineFunction public static void merge(@AggregationState State state, State other) { NumericHistogram input = other.get(); NumericHistogram previous = state.get(); if (previous == null) { state.set(input); } else { previous.mergeWith(input); } }
@Test public void testMerge() { Accumulator singleStep = factory.createAccumulator(UpdateMemory.NOOP); singleStep.addInput(input); Block singleStepResult = getFinalBlock(singleStep); Accumulator partialStep = factory.createAccumulator(UpdateMemory.NOOP); partialStep.addInput(input); Block intermediate = getIntermediateBlock(partialStep); Accumulator finalStep = factory.createAccumulator(UpdateMemory.NOOP); finalStep.addIntermediate(intermediate); finalStep.addIntermediate(intermediate); Block actual = getFinalBlock(finalStep); Map<Double, Double> expected = Maps.transformValues(extractSingleValue(singleStepResult), value -> value * 2); assertEquals(extractSingleValue(actual), expected); }
protected Collection<Identity> filter(final Credentials credentials, final Collection<Identity> identities) { if(credentials.isPublicKeyAuthentication()) { final Local selected = credentials.getIdentity(); for(Identity identity : identities) { if(identity.getComment() != null) { final String candidate = new String(identity.getComment(), StandardCharsets.UTF_8); if(selected.getAbsolute().equals(candidate)) { if(log.isDebugEnabled()) { log.debug(String.format("Matching identity %s found", candidate)); } return Collections.singletonList(identity); } } } } return identities; }
@Test public void filterIdentitiesMatch() { final SFTPAgentAuthentication authentication = new SFTPAgentAuthentication(new SSHClient(), new OpenSSHAgentAuthenticator(new AgentProxy(null))); final Credentials credentials = new Credentials("user").withIdentity(new Local("mykey") { @Override public boolean exists() { return true; } }); final List<Identity> identities = new ArrayList<>(); final Identity nomatch = mock(Identity.class); when(nomatch.getComment()).thenReturn(StringUtils.getBytes("mykey2", StandardCharsets.UTF_8)); final Identity match = mock(Identity.class); when(match.getComment()).thenReturn(StringUtils.getBytes("mykey", StandardCharsets.UTF_8)); identities.add(nomatch); identities.add(match); final Collection<Identity> filtered = authentication.filter(credentials, identities); assertEquals(1, filtered.size()); assertArrayEquals(match.getComment(), filtered.iterator().next().getComment()); }
public T merge(T other) { checkNotNull(other, "Cannot merge with null resources"); checkArgument(getClass() == other.getClass(), "Merge with different resource type"); checkArgument(name.equals(other.getName()), "Merge with different resource name"); return create(value.add(other.getValue())); }
@Test void testMergeErrorOnDifferentTypes() { final Resource v1 = new TestResource(0.1); final Resource v2 = new CPUResource(0.1); // v1.merge(v2); assertThatThrownBy(() -> v1.merge(v2)).isInstanceOf(IllegalArgumentException.class); }
public static String substVars(String val, PropertyContainer pc1) throws ScanException { return substVars(val, pc1, null); }
@Test public void testUndefinedValues() throws ScanException { String withUndefinedValues = "${axyz}"; String result = OptionHelper.substVars(withUndefinedValues, context); assertEquals("axyz" + OptionHelper._IS_UNDEFINED, result); }
public static boolean supportCombinedTxnLog(TransactionState.LoadJobSourceType sourceType) { return RunMode.isSharedDataMode() && Config.lake_use_combined_txn_log && isLoadingTransaction(sourceType); }
@Test public void testSupportCombinedTxnLog() throws Exception { Config.lake_use_combined_txn_log = true; Assert.assertTrue(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.BACKEND_STREAMING)); Assert.assertTrue(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.ROUTINE_LOAD_TASK)); Assert.assertTrue(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.INSERT_STREAMING)); Assert.assertTrue(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.BATCH_LOAD_JOB)); Assert.assertFalse(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.LAKE_COMPACTION)); Assert.assertFalse(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.FRONTEND_STREAMING)); Assert.assertFalse(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.BYPASS_WRITE)); Assert.assertFalse(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.DELETE)); Assert.assertFalse(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.MV_REFRESH)); Config.lake_use_combined_txn_log = false; Assert.assertFalse(LakeTableHelper.supportCombinedTxnLog(TransactionState.LoadJobSourceType.BACKEND_STREAMING)); }
public synchronized void position(TopicPartition tp, FetchPosition position) { assignedState(tp).position(position); }
@Test public void cantChangePositionForNonAssignedPartition() { assertThrows(IllegalStateException.class, () -> state.position(tp0, new SubscriptionState.FetchPosition(1, Optional.empty(), leaderAndEpoch))); }
public synchronized boolean createIndex(String indexName) throws ElasticsearchResourceManagerException { LOG.info("Creating index using name '{}'.", indexName); try { // Check to see if the index exists if (indexExists(indexName)) { return false; } managedIndexNames.add(indexName); return elasticsearchClient .indices() .create(new CreateIndexRequest(indexName), RequestOptions.DEFAULT) .isAcknowledged(); } catch (Exception e) { throw new ElasticsearchResourceManagerException("Error creating index.", e); } }
@Test public void testCreateIndexShouldThrowErrorWhenCollectionNameIsInvalid() { assertThrows( ElasticsearchResourceManagerException.class, () -> testManager.createIndex("invalid#name")); }
public static long hash64(byte[] data) { return hash64(data, 1337); }
@Test public void testEmpty() { assertEquals("31290877cceaea29", HexUtil.toHex(MetroHash.hash64(StrUtil.utf8Bytes(""), 0))); }
@Nullable public String getValue(@Nullable TraceContext context) { if (context == null) return null; return this.context.getValue(this, context); }
@Test void getValue_current_nothingCurrent() { assertThat(AMZN_TRACE_ID.getValue()) .isNull(); }
@Override public void close() throws InterruptedException { beginShutdown("KafkaEventQueue#close"); eventHandlerThread.join(); log.info("closed event queue."); }
@Test public void testRejectedExecutionException() throws Exception { KafkaEventQueue queue = new KafkaEventQueue(Time.SYSTEM, logContext, "testRejectedExecutionException"); queue.close(); CompletableFuture<Void> future = new CompletableFuture<>(); queue.append(new EventQueue.Event() { @Override public void run() { future.complete(null); } @Override public void handleException(Throwable e) { future.completeExceptionally(e); } }); assertEquals(RejectedExecutionException.class, assertThrows( ExecutionException.class, () -> future.get()).getCause().getClass()); }
@Override public int getColumnDisplaySize(final int column) { Preconditions.checkArgument(1 == column); return 0; }
@Test void assertGetColumnDisplaySize() throws SQLException { assertThat(actualMetaData.getColumnDisplaySize(1), is(0)); }
public void applyConfig(ClientBwListDTO configDTO) { requireNonNull(configDTO, "Client filtering config must not be null"); requireNonNull(configDTO.mode, "Config mode must not be null"); requireNonNull(configDTO.entries, "Config entries must not be null"); ClientSelector selector; switch (configDTO.mode) { case DISABLED: selector = ClientSelectors.any(); break; case WHITELIST: selector = createSelector(configDTO.entries); break; case BLACKLIST: selector = ClientSelectors.inverse(createSelector(configDTO.entries)); break; default: throw new IllegalArgumentException("Unknown client B/W list mode: " + configDTO.mode); } clientEngine.applySelector(selector); }
@Test public void testApplyConfig_emptyWhitelist() { ClientBwListDTO config = createConfig(Mode.WHITELIST); handler.applyConfig(config); Client client = createClient("127.0.0.1", "a_name"); assertFalse(clientEngine.isClientAllowed(client)); }
@Override public String group(String s) { return group; }
@Test public void testAclResolution() { long perm; FsPermission p1 = new FsPermission((short)0755); FileStatus fileStatus = file("dingo", "dingo", p1); perm = ugi.getPermissionsProto(fileStatus, null); match(perm, p1); AclEntry aclEntry = new AclEntry.Builder() .setType(AclEntryType.USER) .setScope(AclEntryScope.ACCESS) .setPermission(FsAction.ALL) .setName("dingo") .build(); AclStatus aclStatus = new AclStatus.Builder() .owner("dingo") .group(("dingo")) .addEntry(aclEntry) .setPermission(p1) .build(); perm = ugi.getPermissionsProto(null, aclStatus); match(perm, p1); }
public static void registerServer(HazelcastInstance instance) { register(SERVER_INSTANCES_REF, instance); }
@Test(expected = IllegalArgumentException.class) public void register_whenNull() { OutOfMemoryErrorDispatcher.registerServer(null); }
static MemberMap createNew(MemberImpl... members) { return createNew(0, members); }
@Test(expected = IllegalArgumentException.class) public void create_failsWithDuplicateUuid() { MemberImpl member1 = newMember(5000); MemberImpl member2 = new MemberImpl.Builder(newAddress(5001)) .version(VERSION) .uuid(member1.getUuid()).build(); MemberMap.createNew(member1, member2); }
public String format(Date then) { if (then == null) then = now(); Duration d = approximateDuration(then); return format(d); }
@Test public void testDecadesFromNow() throws Exception { PrettyTime t = new PrettyTime(now); Assert.assertEquals("3 decades from now", t.format(now.plus(3, ChronoUnit.DECADES))); }
public Stream<Hit> stream() { if (nPostingLists == 0) { return Stream.empty(); } return StreamSupport.stream(new PredicateSpliterator(), false); }
@Test void requireThatEmptyPostingListsWork() { PredicateSearch search = createPredicateSearch( new byte[0], postingList(SubqueryBitmap.ALL_SUBQUERIES)); assertEquals(List.of().toString(), search.stream().toList().toString()); }
public void shutdown() { HashMap<JobID, Optional<StateChangelogStorage<?>>> toReleaseStorage; HashMap<JobID, StateChangelogStorageView<ChangelogStateHandleStreamImpl>> toReleaseStorageView; synchronized (lock) { if (closed) { return; } closed = true; toReleaseStorage = new HashMap<>(changelogStoragesByJobId); toReleaseStorageView = new HashMap<>(changelogStorageViewsByJobId); changelogStoragesByJobId.clear(); changelogStorageViewsByJobId.clear(); } ShutdownHookUtil.removeShutdownHook(shutdownHook, getClass().getSimpleName(), LOG); LOG.info("Shutting down TaskExecutorStateChangelogStoragesManager."); for (Map.Entry<JobID, Optional<StateChangelogStorage<?>>> entry : toReleaseStorage.entrySet()) { entry.getValue().ifPresent(this::doRelease); } for (Map.Entry<JobID, StateChangelogStorageView<ChangelogStateHandleStreamImpl>> entry : toReleaseStorageView.entrySet()) { doRelease(entry.getValue()); } }
@Test void testShutdown() throws IOException { StateChangelogStorageLoader.initialize(TestStateChangelogStorageFactory.pluginManager); TaskExecutorStateChangelogStoragesManager manager = new TaskExecutorStateChangelogStoragesManager(); Configuration configuration = new Configuration(); configuration.set( StateChangelogOptions.STATE_CHANGE_LOG_STORAGE, TestStateChangelogStorageFactory.identifier); JobID jobId1 = new JobID(1L, 1L); StateChangelogStorage<?> storage1 = manager.stateChangelogStorageForJob( jobId1, configuration, createUnregisteredTaskManagerJobMetricGroup(), TestLocalRecoveryConfig.disabled()); assertThat(storage1).isInstanceOf(TestStateChangelogStorage.class); assertThat(((TestStateChangelogStorage) storage1).closed).isFalse(); JobID jobId2 = new JobID(1L, 2L); StateChangelogStorage<?> storage2 = manager.stateChangelogStorageForJob( jobId1, configuration, createUnregisteredTaskManagerJobMetricGroup(), TestLocalRecoveryConfig.disabled()); assertThat(storage2).isInstanceOf(TestStateChangelogStorage.class); assertThat(((TestStateChangelogStorage) storage2).closed).isFalse(); manager.shutdown(); assertThat(((TestStateChangelogStorage) storage1).closed).isTrue(); assertThat(((TestStateChangelogStorage) storage2).closed).isTrue(); StateChangelogStorageLoader.initialize(null); }
@Override public boolean containsAll(Collection<?> c) { checkComparator(); for (Object object : c) { if (binarySearch((V) object).getIndex() < 0) { return false; } } return true; }
@Test public void testContainsAll() { RPriorityQueue<Integer> set = redisson.getPriorityQueue("set"); for (int i = 0; i < 200; i++) { set.add(i); } Assertions.assertTrue(set.containsAll(Arrays.asList(30, 11))); Assertions.assertFalse(set.containsAll(Arrays.asList(30, 711, 11))); }
Aggregation parseAggregationConfig(String aggName) { if (StringUtils.isEmpty(aggName) || !fetchMode.equals(TbGetTelemetryNodeConfiguration.FETCH_MODE_ALL)) { return Aggregation.NONE; } return Aggregation.valueOf(aggName); }
@Test public void givenAggregationIncorrect_whenParseAggregation_thenException() { Assertions.assertThrows(IllegalArgumentException.class, () -> { node.parseAggregationConfig("TOP"); }); }
@Override public void open() { super.open(); for (String propertyKey : properties.stringPropertyNames()) { LOGGER.debug("propertyKey: {}", propertyKey); String[] keyValue = propertyKey.split("\\.", 2); if (2 == keyValue.length) { LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]); Properties prefixProperties; if (basePropertiesMap.containsKey(keyValue[0])) { prefixProperties = basePropertiesMap.get(keyValue[0]); } else { prefixProperties = new Properties(); basePropertiesMap.put(keyValue[0].trim(), prefixProperties); } prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey)); } } Set<String> removeKeySet = new HashSet<>(); for (String key : basePropertiesMap.keySet()) { if (!COMMON_KEY.equals(key)) { Properties properties = basePropertiesMap.get(key); if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) { LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.", key, DRIVER_KEY, key, key, URL_KEY); removeKeySet.add(key); } } } for (String key : removeKeySet) { basePropertiesMap.remove(key); } LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap); setMaxLineResults(); setMaxRows(); //TODO(zjffdu) Set different sql splitter for different sql dialects. this.sqlSplitter = new SqlSplitter(); }
@Test void testQueryWithEscapedCharacters() throws IOException, InterpreterException { String sqlQuery = "select '\\n', ';';" + "select replace('A\\;B', '\\', 'text');" + "select '\\', ';';" + "select '''', ';'"; Properties properties = new Properties(); properties.setProperty("common.max_count", "1000"); properties.setProperty("common.max_retry", "3"); properties.setProperty("default.driver", "org.h2.Driver"); properties.setProperty("default.url", getJdbcConnection()); properties.setProperty("default.user", ""); properties.setProperty("default.password", ""); properties.setProperty("default.splitQueries", "true"); JDBCInterpreter t = new JDBCInterpreter(properties); t.open(); InterpreterResult interpreterResult = t.interpret(sqlQuery, context); assertEquals(InterpreterResult.Code.SUCCESS, interpreterResult.code()); List<InterpreterResultMessage> resultMessages = context.out.toInterpreterResultMessage(); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(0).getType()); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(1).getType()); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(2).getType()); assertEquals(InterpreterResult.Type.TABLE, resultMessages.get(3).getType()); assertEquals("'\\n'\t';'\n\\n\t;\n", resultMessages.get(0).getData()); assertEquals("'Atext;B'\nAtext;B\n", resultMessages.get(1).getData()); assertEquals("'\\'\t';'\n\\\t;\n", resultMessages.get(2).getData()); assertEquals("''''\t';'\n'\t;\n", resultMessages.get(3).getData()); }
@Override @Transactional(rollbackFor = Exception.class) @LogRecord(type = SYSTEM_USER_TYPE, subType = SYSTEM_USER_CREATE_SUB_TYPE, bizNo = "{{#user.id}}", success = SYSTEM_USER_CREATE_SUCCESS) public Long createUser(UserSaveReqVO createReqVO) { // 1.1 校验账户配合 tenantService.handleTenantInfo(tenant -> { long count = userMapper.selectCount(); if (count >= tenant.getAccountCount()) { throw exception(USER_COUNT_MAX, tenant.getAccountCount()); } }); // 1.2 校验正确性 validateUserForCreateOrUpdate(null, createReqVO.getUsername(), createReqVO.getMobile(), createReqVO.getEmail(), createReqVO.getDeptId(), createReqVO.getPostIds()); // 2.1 插入用户 AdminUserDO user = BeanUtils.toBean(createReqVO, AdminUserDO.class); user.setStatus(CommonStatusEnum.ENABLE.getStatus()); // 默认开启 user.setPassword(encodePassword(createReqVO.getPassword())); // 加密密码 userMapper.insert(user); // 2.2 插入关联岗位 if (CollectionUtil.isNotEmpty(user.getPostIds())) { userPostMapper.insertBatch(convertList(user.getPostIds(), postId -> new UserPostDO().setUserId(user.getId()).setPostId(postId))); } // 3. 记录操作日志上下文 LogRecordContext.putVariable("user", user); return user.getId(); }
@Test public void testCreatUser_success() { // 准备参数 UserSaveReqVO reqVO = randomPojo(UserSaveReqVO.class, o -> { o.setSex(RandomUtil.randomEle(SexEnum.values()).getSex()); o.setMobile(randomString()); o.setPostIds(asSet(1L, 2L)); }).setId(null); // 避免 id 被赋值 // mock 账户额度充足 TenantDO tenant = randomPojo(TenantDO.class, o -> o.setAccountCount(1)); doNothing().when(tenantService).handleTenantInfo(argThat(handler -> { handler.handle(tenant); return true; })); // mock deptService 的方法 DeptDO dept = randomPojo(DeptDO.class, o -> { o.setId(reqVO.getDeptId()); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); }); when(deptService.getDept(eq(dept.getId()))).thenReturn(dept); // mock postService 的方法 List<PostDO> posts = CollectionUtils.convertList(reqVO.getPostIds(), postId -> randomPojo(PostDO.class, o -> { o.setId(postId); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); })); when(postService.getPostList(eq(reqVO.getPostIds()), isNull())).thenReturn(posts); // mock passwordEncoder 的方法 when(passwordEncoder.encode(eq(reqVO.getPassword()))).thenReturn("yudaoyuanma"); // 调用 Long userId = userService.createUser(reqVO); // 断言 AdminUserDO user = userMapper.selectById(userId); assertPojoEquals(reqVO, user, "password", "id"); assertEquals("yudaoyuanma", user.getPassword()); assertEquals(CommonStatusEnum.ENABLE.getStatus(), user.getStatus()); // 断言关联岗位 List<UserPostDO> userPosts = userPostMapper.selectListByUserId(user.getId()); assertEquals(1L, userPosts.get(0).getPostId()); assertEquals(2L, userPosts.get(1).getPostId()); }
@Override public void deleteFileConfig(Long id) { // 校验存在 FileConfigDO config = validateFileConfigExists(id); if (Boolean.TRUE.equals(config.getMaster())) { throw exception(FILE_CONFIG_DELETE_FAIL_MASTER); } // 删除 fileConfigMapper.deleteById(id); // 清空缓存 clearCache(id, null); }
@Test public void testDeleteFileConfig_success() { // mock 数据 FileConfigDO dbFileConfig = randomFileConfigDO().setMaster(false); fileConfigMapper.insert(dbFileConfig);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbFileConfig.getId(); // 调用 fileConfigService.deleteFileConfig(id); // 校验数据不存在了 assertNull(fileConfigMapper.selectById(id)); // 验证 cache assertNull(fileConfigService.getClientCache().getIfPresent(id)); }
@Udf public <T> Integer calcArrayLength( @UdfParameter(description = "The array") final List<T> array ) { if (array == null) { return null; } return array.size(); }
@Test public void shouldReturnNullForNullArray() { assertThat(udf.calcArrayLength(null), is(nullValue())); }
public void recordEmitDelay(long emitDelay) { this.emitDelay = emitDelay; }
@Test public void testEmitEventTimeLagTracking() { sourceMetrics.recordEmitDelay(3L); assertGauge(metricListener, CURRENT_EMIT_EVENT_TIME_LAG, 3L); }
public static Optional<PrimitiveBoxed> getKiePMMLPrimitiveBoxed(Class<?> c) { return KIE_PMML_PRIMITIVE_BOXEDS.stream().filter(pBoxed -> c.equals(pBoxed.getPrimitive()) || c.equals(pBoxed.getBoxed())).findFirst(); }
@Test void getKiePMMLPrimitiveBoxed() { for (int i = 0; i < types; i++) { assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(primitives[i]).isPresent()).isTrue(); assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(boxeds[i]).isPresent()).isTrue(); } assertThat(PrimitiveBoxedUtils.getKiePMMLPrimitiveBoxed(String.class)).isNotPresent(); }
@Override public Optional<ShardingIndexReviser> getIndexReviser(final ShardingRule rule, final String tableName) { return rule.findShardingTableByActualTable(tableName).map(ShardingIndexReviser::new); }
@Test void assertGetIndexReviser() { Optional<ShardingIndexReviser> indexReviser = reviseEntry.getIndexReviser(rule, "t_order0"); assertTrue(indexReviser.isPresent()); assertThat(indexReviser.get().getClass(), is(ShardingIndexReviser.class)); }
@Override public boolean shouldRescale( VertexParallelism currentParallelism, VertexParallelism newParallelism) { for (JobVertexID vertex : currentParallelism.getVertices()) { int parallelismChange = newParallelism.getParallelism(vertex) - currentParallelism.getParallelism(vertex); if (parallelismChange != 0) { return true; } } return false; }
@Test void testNoScaleOnSameParallelism() { final RescalingController rescalingController = new EnforceParallelismChangeRescalingController(); assertThat(rescalingController.shouldRescale(forParallelism(2), forParallelism(2))) .isFalse(); }
@Override public void init(Properties properties) { super.init(properties); passwordFile = properties.getProperty(PROPERTY_PASSWORD_FILE); passwordUserProperties = Boolean.parseBoolean(properties.getProperty(PROPERTY_PASSWORD_USER_PROPERTIES, "true")); passwordNetworkInterface = properties.getProperty(PROPERTY_PASSWORD_NETWORK_INTERFACE); checkFalse(passwordFile == null && passwordNetworkInterface == null && !passwordUserProperties, "At least one of the properties used to generate encryption password has to be configured"); }
@Test(expected = IllegalArgumentException.class) public void testNoPasswordInputProvided() { EncryptionReplacer replacer = new EncryptionReplacer(); Properties properties = new Properties(); properties.setProperty(EncryptionReplacer.PROPERTY_PASSWORD_USER_PROPERTIES, "false"); replacer.init(properties); }
@Override public Object merge(T mergingValue, T existingValue) { if (existingValue == null) { return mergingValue.getRawValue(); } return existingValue.getRawValue(); }
@Test public void merge_existingValuePresent() { MapMergeTypes existing = mergingValueWithGivenValue(EXISTING); MapMergeTypes merging = mergingValueWithGivenValue(MERGING); assertEquals(EXISTING, mergePolicy.merge(merging, existing)); }
@Override public int getDatabaseMinorVersion() { return 0; }
@Test void assertGetDatabaseMinorVersion() { assertThat(metaData.getDatabaseMinorVersion(), is(0)); }
@Override public void remove() { throw new UnsupportedOperationException(); }
@Test void removeUnsupported() { PDOutlineItemIterator pdOutlineItemIterator = new PDOutlineItemIterator(new PDOutlineItem()); assertThrows(UnsupportedOperationException.class, () -> pdOutlineItemIterator.remove()); }
@Override public void write(final int b) throws IOException { throw new IOException(new UnsupportedOperationException()); }
@Test public void testWriteLargeChunk() throws Exception { final CryptoVault vault = this.getVault(); final ByteArrayOutputStream cipherText = new ByteArrayOutputStream(); final FileHeader header = vault.getFileHeaderCryptor().create(); final CryptoOutputStream stream = new CryptoOutputStream( new ProxyOutputStream(cipherText), vault.getFileContentCryptor(), header, new RandomNonceGenerator(vault.getNonceSize()), 0); final byte[] cleartext = RandomUtils.nextBytes(vault.getFileContentCryptor().cleartextChunkSize() + 1); stream.write(cleartext, 0, cleartext.length); stream.close(); final byte[] read = new byte[cleartext.length]; final CryptoInputStream cryptoInputStream = new CryptoInputStream(new ByteArrayInputStream(cipherText.toByteArray()), vault.getFileContentCryptor(), header, 0); IOUtils.readFully(cryptoInputStream, read); cryptoInputStream.close(); assertArrayEquals(cleartext, read); }
@Override public <V1, R> KTable<K, R> join(final KTable<K, V1> other, final ValueJoiner<? super V, ? super V1, ? extends R> joiner) { return doJoin(other, joiner, NamedInternal.empty(), null, false, false); }
@Test public void shouldNotAllowNullOtherTableOnJoin() { assertThrows(NullPointerException.class, () -> table.join(null, MockValueJoiner.TOSTRING_JOINER)); }
@GET @Path("/{connector}/topics") @Operation(summary = "Get the list of topics actively used by the specified connector") public Response getConnectorActiveTopics(final @PathParam("connector") String connector) { if (isTopicTrackingDisabled) { throw new ConnectRestException(Response.Status.FORBIDDEN.getStatusCode(), "Topic tracking is disabled."); } ActiveTopicsInfo info = herder.connectorActiveTopics(connector); return Response.ok(Collections.singletonMap(info.connector(), info)).build(); }
@Test public void testConnectorActiveTopics() { when(serverConfig.topicTrackingEnabled()).thenReturn(true); when(serverConfig.topicTrackingResetEnabled()).thenReturn(true); when(herder.connectorActiveTopics(CONNECTOR_NAME)) .thenReturn(new ActiveTopicsInfo(CONNECTOR_NAME, CONNECTOR_ACTIVE_TOPICS)); connectorsResource = new ConnectorsResource(herder, serverConfig, restClient, REQUEST_TIMEOUT); Response response = connectorsResource.getConnectorActiveTopics(CONNECTOR_NAME); assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); Map<String, Map<String, Object>> body = (Map<String, Map<String, Object>>) response.getEntity(); assertEquals(CONNECTOR_NAME, ((ActiveTopicsInfo) body.get(CONNECTOR_NAME)).connector()); assertEquals(new HashSet<>(CONNECTOR_ACTIVE_TOPICS), ((ActiveTopicsInfo) body.get(CONNECTOR_NAME)).topics()); }
protected Connection connectNow() { return connectNow(Duration.ofSeconds(45)); }
@Test void testDisposeTimeoutLongOverflow() { assertThatExceptionOfType(ArithmeticException.class) .isThrownBy(() -> new TestClientTransport(Mono.just(EmbeddedChannel::new)).connectNow().disposeNow(Duration.ofMillis(Long.MAX_VALUE))); }
public ModelApiResponse code(Integer code) { this.code = code; return this; }
@Test public void codeTest() { // TODO: test code }
@Override public void commitJob(JobContext jobContext) throws IOException { Configuration conf = jobContext.getConfiguration(); syncFolder = conf.getBoolean(DistCpConstants.CONF_LABEL_SYNC_FOLDERS, false); overwrite = conf.getBoolean(DistCpConstants.CONF_LABEL_OVERWRITE, false); updateRoot = conf.getBoolean(CONF_LABEL_UPDATE_ROOT, false); targetPathExists = conf.getBoolean( DistCpConstants.CONF_LABEL_TARGET_PATH_EXISTS, true); ignoreFailures = conf.getBoolean( DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false); if (blocksPerChunk > 0) { concatFileChunks(conf); } super.commitJob(jobContext); cleanupTempFiles(jobContext); try { if (conf.getBoolean(DistCpConstants.CONF_LABEL_DELETE_MISSING, false)) { deleteMissing(conf); } else if (conf.getBoolean(DistCpConstants.CONF_LABEL_ATOMIC_COPY, false)) { commitData(conf); } else if (conf.get(CONF_LABEL_TRACK_MISSING) != null) { // save missing information to a directory trackMissing(conf); } // for HDFS-14621, should preserve status after -delete String attributes = conf.get(DistCpConstants.CONF_LABEL_PRESERVE_STATUS); final boolean preserveRawXattrs = conf.getBoolean( DistCpConstants.CONF_LABEL_PRESERVE_RAWXATTRS, false); if ((attributes != null && !attributes.isEmpty()) || preserveRawXattrs) { preserveFileAttributesForDirectories(conf); } taskAttemptContext.setStatus("Commit Successful"); } finally { cleanup(conf); } }
@Test public void testDeleteMissingFlatInterleavedFiles() throws IOException { TaskAttemptContext taskAttemptContext = getTaskAttemptContext(config); JobContext jobContext = new JobContextImpl(taskAttemptContext.getConfiguration(), taskAttemptContext.getTaskAttemptID().getJobID()); Configuration conf = jobContext.getConfiguration(); String sourceBase; String targetBase; FileSystem fs = null; try { OutputCommitter committer = new CopyCommitter(null, taskAttemptContext); fs = FileSystem.get(conf); sourceBase = "/tmp1/" + String.valueOf(rand.nextLong()); targetBase = "/tmp1/" + String.valueOf(rand.nextLong()); createFile(fs, sourceBase + "/1"); createFile(fs, sourceBase + "/3"); createFile(fs, sourceBase + "/4"); createFile(fs, sourceBase + "/5"); createFile(fs, sourceBase + "/7"); createFile(fs, sourceBase + "/8"); createFile(fs, sourceBase + "/9"); createFile(fs, targetBase + "/2"); createFile(fs, targetBase + "/4"); createFile(fs, targetBase + "/5"); createFile(fs, targetBase + "/7"); createFile(fs, targetBase + "/9"); createFile(fs, targetBase + "/A"); final DistCpOptions options = new DistCpOptions.Builder( Collections.singletonList(new Path(sourceBase)), new Path("/out")) .withSyncFolder(true).withDeleteMissing(true).build(); options.appendToConf(conf); final DistCpContext context = new DistCpContext(options); CopyListing listing = new GlobbedCopyListing(conf, CREDENTIALS); Path listingFile = new Path("/tmp1/" + String.valueOf(rand.nextLong())); listing.buildListing(listingFile, context); conf.set(CONF_LABEL_TARGET_WORK_PATH, targetBase); conf.set(CONF_LABEL_TARGET_FINAL_PATH, targetBase); committer.commitJob(jobContext); verifyFoldersAreInSync(fs, targetBase, sourceBase); Assert.assertEquals(4, fs.listStatus(new Path(targetBase)).length); //Test for idempotent commit committer.commitJob(jobContext); verifyFoldersAreInSync(fs, targetBase, sourceBase); Assert.assertEquals(4, fs.listStatus(new Path(targetBase)).length); } finally { TestDistCpUtils.delete(fs, "/tmp1"); conf.set(DistCpConstants.CONF_LABEL_DELETE_MISSING, "false"); } }
Record convert(Object data) { return convert(data, null); }
@Test public void testStructConvert() { Table table = mock(Table.class); when(table.schema()).thenReturn(SCHEMA); RecordConverter converter = new RecordConverter(table, config); Struct data = createStructData(); Record record = converter.convert(data); assertRecordValues(record); }
public EntityShareResponse prepareShare(GRN ownedEntity, EntityShareRequest request, User sharingUser, Subject sharingSubject) { requireNonNull(ownedEntity, "ownedEntity cannot be null"); requireNonNull(request, "request cannot be null"); requireNonNull(sharingUser, "sharingUser cannot be null"); requireNonNull(sharingSubject, "sharingSubject cannot be null"); final GRN sharingUserGRN = grnRegistry.ofUser(sharingUser); final Set<Grantee> modifiableGrantees = getModifiableGrantees(sharingUser, sharingUserGRN, ownedEntity); final Set<GRN> modifiableGranteeGRNs = modifiableGrantees.stream().map(Grantee::grn).collect(Collectors.toSet()); final ImmutableSet<ActiveShare> modifiableActiveShares = getActiveShares(ownedEntity, sharingUser, modifiableGranteeGRNs); return EntityShareResponse.builder() .entity(ownedEntity.toString()) .sharingUser(sharingUserGRN) .availableGrantees(modifiableGrantees) .availableCapabilities(getAvailableCapabilities()) .activeShares(modifiableActiveShares) .selectedGranteeCapabilities(getSelectedGranteeCapabilities(modifiableActiveShares, request)) .missingPermissionsOnDependencies(checkMissingPermissionsOnDependencies(ownedEntity, sharingUserGRN, modifiableActiveShares, request)) .validationResult(validateRequest(ownedEntity, request, sharingUser, modifiableGranteeGRNs)) .build(); }
@DisplayName("Validates we can modify ownerless entitites") @Test void validateOwnerless() { final GRN entity = grnRegistry.newGRN(GRNTypes.DASHBOARD, "54e3deadbeefdeadbeefaffe"); final GRN horst = grnRegistry.newGRN(GRNTypes.USER, "horst"); final EntityShareRequest shareRequest = EntityShareRequest.create(ImmutableMap.of(horst, Capability.MANAGE)); final User user = createMockUser("hans"); final Subject subject = mock(Subject.class); final EntityShareResponse entityShareResponse = entitySharesService.prepareShare(entity, shareRequest, user, subject); assertThat(entityShareResponse.validationResult()).satisfies(validationResult -> { assertThat(validationResult.failed()).isFalse(); assertThat(validationResult.getErrors()).isEmpty(); }); }
public void deploy(ApplicationPackage applicationPackage, Map<Version, FileRegistry> fileRegistryMap, AllocatedHosts allocatedHosts) throws IOException { client.initialize(); client.writeApplicationPackage(applicationPackage); client.write(fileRegistryMap); client.write(allocatedHosts); }
@Test public void require_that_deployer_is_initialized() throws IOException { Curator curator = new MockCurator(); File serverdbDir = folder.newFolder("serverdb"); File defsDir = new File(serverdbDir, "serverdefs"); try { IOUtils.createWriter(new File(defsDir, defFile), true); } catch (IOException e) { e.printStackTrace(); fail(); } deploy(FilesApplicationPackage.fromFile(new File("src/test/apps/content")), curator, 1); deploy(FilesApplicationPackage.fromFile(new File("src/test/apps/content")), curator, 2); }
public static List<CommonUpstream> convertCommonUpstreamList(final List<? extends CommonUpstream> upstreamList) { return Optional.ofNullable(upstreamList) .orElse(Collections.emptyList()) .stream() .map(upstream -> new CommonUpstream(upstream.getProtocol(), upstream.getUpstreamHost(), upstream.getUpstreamUrl(), upstream.isStatus(), upstream.getTimestamp())) .collect(Collectors.toList()); }
@Test public void convertCommonUpstreamList() { List<DivideUpstream> existDivideUpstreams = new ArrayList<>(); DivideUpstream divideUpstream = CommonUpstreamUtils.buildDivideUpstream("http", HOST, PORT); existDivideUpstreams.add(divideUpstream); List<CommonUpstream> commonUpstreams = CommonUpstreamUtils.convertCommonUpstreamList(existDivideUpstreams); Assert.assertNotNull(commonUpstreams); Assert.assertEquals(1, commonUpstreams.size()); Assert.assertEquals("http", commonUpstreams.get(0).getProtocol()); Assert.assertEquals(HOST + ":" + PORT, commonUpstreams.get(0).getUpstreamUrl()); Assert.assertNotNull(CommonUpstreamUtils.convertCommonUpstreamList(null)); Assert.assertNotNull(CommonUpstreamUtils.convertCommonUpstreamList(new ArrayList<>())); }
@Override public final void isEqualTo(@Nullable Object other) { @SuppressWarnings("UndefinedEquals") // the contract of this method is to follow Multimap.equals boolean isEqual = Objects.equal(actual, other); if (isEqual) { return; } // Fail but with a more descriptive message: if ((actual instanceof ListMultimap && other instanceof SetMultimap) || (actual instanceof SetMultimap && other instanceof ListMultimap)) { String actualType = (actual instanceof ListMultimap) ? "ListMultimap" : "SetMultimap"; String otherType = (other instanceof ListMultimap) ? "ListMultimap" : "SetMultimap"; failWithoutActual( fact("expected", other), fact("an instance of", otherType), fact("but was", actualCustomStringRepresentationForPackageMembersToCall()), fact("an instance of", actualType), simpleFact( lenientFormat( "a %s cannot equal a %s if either is non-empty", actualType, otherType))); } else if (actual instanceof ListMultimap) { containsExactlyEntriesIn((Multimap<?, ?>) checkNotNull(other)).inOrder(); } else if (actual instanceof SetMultimap) { containsExactlyEntriesIn((Multimap<?, ?>) checkNotNull(other)); } else { super.isEqualTo(other); } }
@Test public void isEqualTo_failsWithSameToString() { expectFailureWhenTestingThat(ImmutableMultimap.of(1, "a", 1, "b", 2, "c")) .isEqualTo(ImmutableMultimap.of(1L, "a", 1L, "b", 2L, "c")); assertFailureKeys("missing", "unexpected", "---", "expected", "but was"); assertFailureValue("missing", "[1=a, 1=b, 2=c] (Map.Entry<java.lang.Long, java.lang.String>)"); assertFailureValue( "unexpected", "[1=a, 1=b, 2=c] (Map.Entry<java.lang.Integer, java.lang.String>)"); assertFailureValue("expected", "{1=[a, b], 2=[c]}"); assertFailureValue("but was", "{1=[a, b], 2=[c]}"); }
@Override public void start() { configuration.get(PROPERTY_SONAR_CE_WORKER_COUNT) .ifPresent(workerCount -> LOG.warn("Property {} is not supported anymore and will be ignored." + " Remove it from sonar.properties to remove this warning.", PROPERTY_SONAR_CE_WORKER_COUNT)); }
@Test public void start_logs_a_warning_if_property_workerCount_exists_and_empty() { settings.setProperty(PROPERTY_SONAR_CE_WORKER_COUNT, ""); underTest.start(); verifyWarnMessage(); }
@JsonProperty @Nullable public Set<String> getCompressedMimeTypes() { return compressedMimeTypes; }
@Test void hasCompressedMimeTypes() { assertThat(gzip.getCompressedMimeTypes()) .isEqualTo(Collections.singleton("text/plain")); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) { if (list == null) { return FEELFnResult.ofResult(true); } boolean result = true; for (final Object element : list) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" + " a Boolean")); } else { if (element != null) { result &= (Boolean) element; } } } return FEELFnResult.ofResult(result); }
@Test void invokeListParamReturnFalse() { FunctionTestUtil.assertResult(nnAllFunction.invoke(Arrays.asList(Boolean.TRUE, Boolean.FALSE)), false); FunctionTestUtil.assertResult(nnAllFunction.invoke(Arrays.asList(Boolean.TRUE, null, Boolean.FALSE)), false); }
@Override public void onNewIntent(Intent intent) { }
@Test public void onNewIntent() { }
@Override public void storeProperty(String key, String value) { checkArgument(key != null, "Key of context property must not be null"); checkArgument(value != null, "Value of context property must not be null"); contextProperties.put(key, value); }
@Test public void storeProperty_throws_IAE_if_key_is_null() { assertThatThrownBy(() -> underTest.storeProperty(null, "bar")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Key of context property must not be null"); }
@Override public void execute(SensorContext context) { FilePredicates p = context.fileSystem().predicates(); for (InputFile file : context.fileSystem().inputFiles(p.and(p.hasLanguages(Xoo.KEY), p.hasType(Type.MAIN)))) { tokenize(file, context); } }
@Test public void testExecution() throws IOException { String content = "public class Foo {\n\n}"; createSourceFile(content); sensor.execute(context); assertThat(context.cpdTokens("foo:src/foo.xoo")).extracting("value", "startLine", "startUnit", "endUnit") .containsExactly( tuple("publicclassFoo{", 1, 1, 4), tuple("}", 3, 5, 5)); }
@Override public NetworkClientDelegate.PollResult poll(long currentTimeMs) { if (!coordinatorRequestManager.coordinator().isPresent() || membershipManager.shouldSkipHeartbeat()) { membershipManager.onHeartbeatRequestSkipped(); return NetworkClientDelegate.PollResult.EMPTY; } pollTimer.update(currentTimeMs); if (pollTimer.isExpired() && !membershipManager.isLeavingGroup()) { logger.warn("Consumer poll timeout has expired. This means the time between " + "subsequent calls to poll() was longer than the configured max.poll.interval.ms, " + "which typically implies that the poll loop is spending too much time processing " + "messages. You can address this either by increasing max.poll.interval.ms or by " + "reducing the maximum size of batches returned in poll() with max.poll.records."); membershipManager.transitionToSendingLeaveGroup(true); NetworkClientDelegate.UnsentRequest leaveHeartbeat = makeHeartbeatRequest(currentTimeMs, true); // We can ignore the leave response because we can join before or after receiving the response. heartbeatRequestState.reset(); heartbeatState.reset(); return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(leaveHeartbeat)); } // Case 1: The member is leaving boolean heartbeatNow = membershipManager.state() == MemberState.LEAVING || // Case 2: The member state indicates it should send a heartbeat without waiting for the interval, and there is no heartbeat request currently in-flight (membershipManager.shouldHeartbeatNow() && !heartbeatRequestState.requestInFlight()); if (!heartbeatRequestState.canSendRequest(currentTimeMs) && !heartbeatNow) { return new NetworkClientDelegate.PollResult(heartbeatRequestState.timeToNextHeartbeatMs(currentTimeMs)); } NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, false); return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request)); }
@Test public void testFencedMemberStopHeartbeatUntilItReleasesAssignmentToRejoin() { heartbeatRequestManager = createHeartbeatRequestManager( coordinatorRequestManager, membershipManager, heartbeatState, heartbeatRequestState, backgroundEventHandler); time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(1, result.unsentRequests.size()); // Receive HB response fencing member when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); ClientResponse response = createHeartbeatResponse(result.unsentRequests.get(0), Errors.FENCED_MEMBER_EPOCH); result.unsentRequests.get(0).handler().onComplete(response); verify(membershipManager).transitionToFenced(); verify(heartbeatRequestState).onFailedAttempt(anyLong()); verify(heartbeatRequestState).reset(); when(membershipManager.shouldSkipHeartbeat()).thenReturn(true); result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(0, result.unsentRequests.size(), "Member should not send heartbeats while FENCED"); when(membershipManager.shouldSkipHeartbeat()).thenReturn(false); result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(1, result.unsentRequests.size(), "Fenced member should resume heartbeat after transitioning to JOINING"); }
public static <InputT, OutputT> FlatMapElements<InputT, OutputT> via( InferableFunction<? super InputT, ? extends Iterable<OutputT>> fn) { TypeDescriptor<OutputT> outputType = TypeDescriptors.extractFromTypeParameters( (TypeDescriptor<Iterable<OutputT>>) fn.getOutputTypeDescriptor(), Iterable.class, new TypeDescriptors.TypeVariableExtractor<Iterable<OutputT>, OutputT>() {}); TypeDescriptor<InputT> inputType = (TypeDescriptor<InputT>) fn.getInputTypeDescriptor(); return new FlatMapElements<>(fn, inputType, outputType); }
@Test @Category(NeedsRunner.class) public void testFlatMapFnOutputTypeDescriptor() throws Exception { PCollection<String> output = pipeline .apply(Create.of("hello")) .apply( FlatMapElements.via( new SimpleFunction<String, Set<String>>() { @Override public Set<String> apply(String input) { return ImmutableSet.copyOf(input.split("")); } })); assertThat( output.getTypeDescriptor(), equalTo((TypeDescriptor<String>) new TypeDescriptor<String>() {})); assertThat( pipeline.getCoderRegistry().getCoder(output.getTypeDescriptor()), equalTo(pipeline.getCoderRegistry().getCoder(new TypeDescriptor<String>() {}))); // Make sure the pipeline runs pipeline.run(); }
public Materialization create( final StreamsMaterialization delegate, final MaterializationInfo info, final QueryId queryId, final QueryContext.Stacker contextStacker ) { final TransformVisitor transformVisitor = new TransformVisitor(queryId, contextStacker); final List<Transform> transforms = info .getTransforms() .stream() .map(xform -> xform.visit(transformVisitor)) .collect(Collectors.toList()); return materializationFactory.create( delegate, info.getSchema(), transforms ); }
@Test public void shouldBuildMaterializationWithNegativePredicateTransform() { // Given: factory.create(materialization, info, queryId, contextStacker); when(predicate.transform(any(), any(), any())).thenReturn(Optional.empty()); final Transform transform = getTransform(1); // Then: final Optional<GenericRow> result = transform.apply(keyIn, rowIn, ctx); // Then: assertThat(result, is(Optional.empty())); }
public boolean filter(char[] content, int offset, int length) { if (content == null) { return false; } boolean filtered = false; for (int i = offset; i < offset + length; i++) { if (isFiltered(content[i])) { filtered = true; content[i] = REPLACEMENT_CHAR; } } if (filtered) { LOG.warn("Identified and replaced non-XML chars"); } return filtered; }
@Test public void testFilter1ArgFiltered() { when(nonXmlCharFiltererMock.filter(anyString())).thenCallRealMethod(); when(nonXmlCharFiltererMock.filter(eq(new char[] { 'a', 'b', 'c' }), anyInt(), anyInt())) .thenAnswer(new Answer<Boolean>() { public Boolean answer(InvocationOnMock invocation) { char[] buffer = (char[]) invocation.getArguments()[0]; buffer[0] = 'i'; buffer[1] = 'o'; return true; } }); String result = nonXmlCharFiltererMock.filter("abc"); verify(nonXmlCharFiltererMock).filter(any(char[].class), eq(0), eq(3)); assertEquals("ioc", result, "Should have returned filtered string"); }
@Override public Object read(final PostgreSQLPacketPayload payload, final int parameterValueLength) { byte[] bytes = new byte[parameterValueLength]; payload.getByteBuf().readBytes(bytes); return ARRAY_PARAMETER_DECODER.decodeStringArray(bytes, '{' != bytes[0]); }
@Test void assertRead() { String parameterValue = "{\"a\",\"b\"}"; int expectedLength = 4 + parameterValue.length(); ByteBuf byteBuf = ByteBufTestUtils.createByteBuf(expectedLength); byteBuf.writeInt(parameterValue.length()); byteBuf.writeCharSequence(parameterValue, StandardCharsets.ISO_8859_1); byteBuf.readInt(); PostgreSQLPacketPayload payload = new PostgreSQLPacketPayload(byteBuf, StandardCharsets.UTF_8); Object actual = newInstance().read(payload, parameterValue.length()); assertThat(actual, is(new String[]{"a", "b"})); assertThat(byteBuf.readerIndex(), is(expectedLength)); }
public static String[] parseKey(String groupKey) { StringBuilder sb = new StringBuilder(); String dataId = null; String group = null; String tenant = null; for (int i = 0; i < groupKey.length(); ++i) { char c = groupKey.charAt(i); if ('+' == c) { if (null == dataId) { dataId = sb.toString(); sb.setLength(0); } else if (null == group) { group = sb.toString(); sb.setLength(0); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else if ('%' == c) { char next = groupKey.charAt(++i); char nextnext = groupKey.charAt(++i); if ('2' == next && 'B' == nextnext) { sb.append('+'); } else if ('2' == next && '5' == nextnext) { sb.append('%'); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else { sb.append(c); } } if (StringUtils.isBlank(group)) { group = sb.toString(); } else { tenant = sb.toString(); } if (group.length() == 0) { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } return new String[] {dataId, group, tenant}; }
@Test void testParseKeyBySingleCharacter() { // Act final String[] actual = GroupKey2.parseKey("/"); // Assert result assertArrayEquals(new String[] {null, "/", null}, actual); }
@Override public BasicTypeDefine<MysqlType> reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.<MysqlType>builder() .name(column.getName()) .nullable(column.isNullable()) .comment(column.getComment()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case NULL: builder.nativeType(MysqlType.NULL); builder.columnType(MYSQL_NULL); builder.dataType(MYSQL_NULL); break; case BOOLEAN: builder.nativeType(MysqlType.BOOLEAN); builder.columnType(String.format("%s(%s)", MYSQL_TINYINT, 1)); builder.dataType(MYSQL_TINYINT); builder.length(1L); break; case TINYINT: builder.nativeType(MysqlType.TINYINT); builder.columnType(MYSQL_TINYINT); builder.dataType(MYSQL_TINYINT); break; case SMALLINT: builder.nativeType(MysqlType.SMALLINT); builder.columnType(MYSQL_SMALLINT); builder.dataType(MYSQL_SMALLINT); break; case INT: builder.nativeType(MysqlType.INT); builder.columnType(MYSQL_INT); builder.dataType(MYSQL_INT); break; case BIGINT: builder.nativeType(MysqlType.BIGINT); builder.columnType(MYSQL_BIGINT); builder.dataType(MYSQL_BIGINT); break; case FLOAT: builder.nativeType(MysqlType.FLOAT); builder.columnType(MYSQL_FLOAT); builder.dataType(MYSQL_FLOAT); break; case DOUBLE: builder.nativeType(MysqlType.DOUBLE); builder.columnType(MYSQL_DOUBLE); builder.dataType(MYSQL_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = (int) Math.max(0, scale - (precision - MAX_PRECISION)); precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } builder.nativeType(MysqlType.DECIMAL); builder.columnType(String.format("%s(%s,%s)", MYSQL_DECIMAL, precision, scale)); builder.dataType(MYSQL_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.nativeType(MysqlType.VARBINARY); builder.columnType( String.format("%s(%s)", MYSQL_VARBINARY, MAX_VARBINARY_LENGTH / 2)); builder.dataType(MYSQL_VARBINARY); } else if (column.getColumnLength() < MAX_VARBINARY_LENGTH) { builder.nativeType(MysqlType.VARBINARY); builder.columnType( String.format("%s(%s)", MYSQL_VARBINARY, column.getColumnLength())); builder.dataType(MYSQL_VARBINARY); } else if (column.getColumnLength() < POWER_2_24) { builder.nativeType(MysqlType.MEDIUMBLOB); builder.columnType(MYSQL_MEDIUMBLOB); builder.dataType(MYSQL_MEDIUMBLOB); } else { builder.nativeType(MysqlType.LONGBLOB); builder.columnType(MYSQL_LONGBLOB); builder.dataType(MYSQL_LONGBLOB); } break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.nativeType(MysqlType.LONGTEXT); builder.columnType(MYSQL_LONGTEXT); builder.dataType(MYSQL_LONGTEXT); } else if (column.getColumnLength() < POWER_2_8) { builder.nativeType(MysqlType.VARCHAR); builder.columnType( String.format("%s(%s)", MYSQL_VARCHAR, column.getColumnLength())); builder.dataType(MYSQL_VARCHAR); } else if (column.getColumnLength() < POWER_2_16) { builder.nativeType(MysqlType.TEXT); builder.columnType(MYSQL_TEXT); builder.dataType(MYSQL_TEXT); } else if (column.getColumnLength() < POWER_2_24) { builder.nativeType(MysqlType.MEDIUMTEXT); builder.columnType(MYSQL_MEDIUMTEXT); builder.dataType(MYSQL_MEDIUMTEXT); } else { builder.nativeType(MysqlType.LONGTEXT); builder.columnType(MYSQL_LONGTEXT); builder.dataType(MYSQL_LONGTEXT); } break; case DATE: builder.nativeType(MysqlType.DATE); builder.columnType(MYSQL_DATE); builder.dataType(MYSQL_DATE); break; case TIME: builder.nativeType(MysqlType.TIME); builder.dataType(MYSQL_TIME); if (version.isAtOrBefore(MySqlVersion.V_5_5)) { builder.columnType(MYSQL_TIME); } else if (column.getScale() != null && column.getScale() > 0) { int timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", MYSQL_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(MYSQL_TIME); } break; case TIMESTAMP: builder.nativeType(MysqlType.DATETIME); builder.dataType(MYSQL_DATETIME); if (version.isAtOrBefore(MySqlVersion.V_5_5)) { builder.columnType(MYSQL_DATETIME); } else if (column.getScale() != null && column.getScale() > 0) { int timestampScale = column.getScale(); if (timestampScale > MAX_TIMESTAMP_SCALE) { timestampScale = MAX_TIMESTAMP_SCALE; log.warn( "The timestamp column {} type timestamp({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to timestamp({})", column.getName(), column.getScale(), MAX_TIMESTAMP_SCALE, timestampScale); } builder.columnType(String.format("%s(%s)", MYSQL_DATETIME, timestampScale)); builder.scale(timestampScale); } else { builder.columnType(MYSQL_DATETIME); } break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.MYSQL, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertShort() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.SHORT_TYPE).build(); BasicTypeDefine<MysqlType> typeDefine = MySqlTypeConverter.DEFAULT_INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(MysqlType.SMALLINT, typeDefine.getNativeType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_SMALLINT, typeDefine.getColumnType()); Assertions.assertEquals(MySqlTypeConverter.MYSQL_SMALLINT, typeDefine.getDataType()); }
public void put(String key, String val) throws IllegalArgumentException { if (key == null) { throw new IllegalArgumentException("key cannot be null"); } Map<String, String> current = readWriteThreadLocalMap.get(); if (current == null) { current = new HashMap<String, String>(); readWriteThreadLocalMap.set(current); } current.put(key, val); nullifyReadOnlyThreadLocalMap(); }
@Test public void nearSimultaneousPutsShouldNotCauseConcurrentModificationException() throws InterruptedException { // For the weirdest reason, modifications to mdcAdapter must be done // before the definition anonymous ChildThread class below. Otherwise, the // map in the child thread, the one contained in // mdcAdapter.copyOnInheritThreadLocal, // is null. How strange is that? // let the map have lots of elements so that copying it takes time for (int i = 0; i < 2048; i++) { mdcAdapter.put("k" + i, "v" + i); } ChildThread childThread = new ChildThread(mdcAdapter, null, null) { @Override public void run() { for (int i = 0; i < 16; i++) { mdcAdapter.put("ck" + i, "cv" + i); Thread.yield(); } successful = true; } }; childThread.start(); Thread.sleep(1); for (int i = 0; i < 16; i++) { mdcAdapter.put("K" + i, "V" + i); } childThread.join(); Assertions.assertTrue(childThread.successful); }
private static String toString(final FsPermission permission) { return String.format("%o", permission.toShort()); }
@Test public void testHdfsFileStatus() throws Exception { HdfsFileStatus hdfsFileStatus = new HdfsFileStatus.Builder() .replication(1) .blocksize(1024) .perm(new FsPermission((short) 777)) .owner("owner") .group("group") .symlink(new byte[0]) .path(new byte[0]) .fileId(1010) .isdir(true) .build(); Assert.assertFalse(hdfsFileStatus.isSymlink()); LambdaTestUtils.intercept(IOException.class, "Path " + hdfsFileStatus.getPath() + " is not a symbolic link", () -> hdfsFileStatus.getSymlink()); String expectString = new StringBuilder() .append("HdfsLocatedFileStatus") .append("{") .append("path=" + null) .append("; isDirectory=" + true) .append("; modification_time=" + 0) .append("; access_time=" + 0) .append("; owner=" + "owner") .append("; group=" + "group") .append("; permission=" + "r----x--t") .append("; isSymlink=" + false) .append("; hasAcl=" + false) .append("; isEncrypted=" + false) .append("; isErasureCoded=" + false) .append("}") .toString(); Assert.assertEquals(expectString, hdfsFileStatus.toString()); }
public static String format(double amount, boolean isUseTraditional) { return format(amount, isUseTraditional, false); }
@Test public void formatHundredMillionLongTest() { String f = NumberChineseFormatter.format(1_0000_0000L, false); assertEquals("一亿", f); f = NumberChineseFormatter.format(1_0000_0001L, false); assertEquals("一亿零一", f); f = NumberChineseFormatter.format(1_0000_1000L, false); assertEquals("一亿零一千", f); f = NumberChineseFormatter.format(1_0001_0000L, false); assertEquals("一亿零一万", f); f = NumberChineseFormatter.format(1_0010_0000L, false); assertEquals("一亿零一十万", f); f = NumberChineseFormatter.format(1_0010_0000L, false); assertEquals("一亿零一十万", f); f = NumberChineseFormatter.format(1_0100_0000L, false); assertEquals("一亿零一百万", f); f = NumberChineseFormatter.format(1_1000_0000L, false); assertEquals("一亿一千万", f); f = NumberChineseFormatter.format(10_1000_0000L, false); assertEquals("一十亿零一千万", f); f = NumberChineseFormatter.format(100_1000_0000L, false); assertEquals("一百亿零一千万", f); f = NumberChineseFormatter.format(1000_1000_0000L, false); assertEquals("一千亿零一千万", f); f = NumberChineseFormatter.format(1100_1000_0000L, false); assertEquals("一千一百亿零一千万", f); f = NumberChineseFormatter.format(9999_0000_0000L, false); assertEquals("九千九百九十九亿", f); }
@Override public String toString() { return toStringHelper(getClass()) .add("targetAddress", Arrays.toString(targetAddress)) .add("destinationAddress", Arrays.toString(destinationAddress)) .toString(); }
@Test public void testToStringRedirect() throws Exception { Redirect rd = deserializer.deserialize(bytePacket, 0, bytePacket.length); String str = rd.toString(); // TODO: need to handle TARGET_ADDRESS and DESTINATION_ADDRESS }
public CoordinatorResult<OffsetCommitResponseData, CoordinatorRecord> commitOffset( RequestContext context, OffsetCommitRequestData request ) throws ApiException { Group group = validateOffsetCommit(context, request); // In the old consumer group protocol, the offset commits maintain the session if // the group is in Stable or PreparingRebalance state. if (group.type() == Group.GroupType.CLASSIC) { ClassicGroup classicGroup = (ClassicGroup) group; if (classicGroup.isInState(ClassicGroupState.STABLE) || classicGroup.isInState(ClassicGroupState.PREPARING_REBALANCE)) { groupMetadataManager.rescheduleClassicGroupMemberHeartbeat( classicGroup, classicGroup.member(request.memberId()) ); } } final OffsetCommitResponseData response = new OffsetCommitResponseData(); final List<CoordinatorRecord> records = new ArrayList<>(); final long currentTimeMs = time.milliseconds(); final OptionalLong expireTimestampMs = expireTimestampMs(request.retentionTimeMs(), currentTimeMs); request.topics().forEach(topic -> { final OffsetCommitResponseTopic topicResponse = new OffsetCommitResponseTopic().setName(topic.name()); response.topics().add(topicResponse); topic.partitions().forEach(partition -> { if (isMetadataInvalid(partition.committedMetadata())) { topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.OFFSET_METADATA_TOO_LARGE.code())); } else { log.debug("[GroupId {}] Committing offsets {} for partition {}-{} from member {} with leader epoch {}.", request.groupId(), partition.committedOffset(), topic.name(), partition.partitionIndex(), request.memberId(), partition.committedLeaderEpoch()); topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(Errors.NONE.code())); final OffsetAndMetadata offsetAndMetadata = OffsetAndMetadata.fromRequest( partition, currentTimeMs, expireTimestampMs ); records.add(GroupCoordinatorRecordHelpers.newOffsetCommitRecord( request.groupId(), topic.name(), partition.partitionIndex(), offsetAndMetadata, metadataImage.features().metadataVersion() )); } }); }); if (!records.isEmpty()) { metrics.record(GroupCoordinatorMetrics.OFFSET_COMMITS_SENSOR_NAME, records.size()); } return new CoordinatorResult<>(records, response); }
@Test public void testGenericGroupOffsetCommitWithUnknownInstanceId() { OffsetMetadataManagerTestContext context = new OffsetMetadataManagerTestContext.Builder().build(); // Create an empty group. ClassicGroup group = context.groupMetadataManager.getOrMaybeCreateClassicGroup( "foo", true ); // Add member without static id. group.add(mkGenericMember("member", Optional.empty())); // Verify that the request is rejected with the correct exception. assertThrows(UnknownMemberIdException.class, () -> context.commitOffset( new OffsetCommitRequestData() .setGroupId("foo") .setMemberId("member") .setGroupInstanceId("instanceid") .setGenerationIdOrMemberEpoch(10) .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(100L) )) )) ) ); }
@Override public EnhancedPluginType getType() { return EnhancedPluginType.Client.EXCEPTION; }
@Test public void testType() { assertThat(exceptionCircuitBreakerReporter.getType()).isEqualTo(EnhancedPluginType.Client.EXCEPTION); }
public List<Ce.Task> formatQueue(DbSession dbSession, List<CeQueueDto> dtos) { DtoCache cache = DtoCache.forQueueDtos(dbClient, dbSession, dtos); return dtos.stream().map(input -> formatQueue(input, cache)).toList(); }
@Test public void formatQueue_with_component_and_other_fields() { String uuid = "COMPONENT_UUID"; db.components().insertPrivateProject((t) -> t.setUuid(uuid).setKey("COMPONENT_KEY").setName("Component Name")).getMainBranchComponent(); UserDto user = db.users().insertUser(); CeQueueDto dto = new CeQueueDto(); dto.setUuid("UUID"); dto.setTaskType("TYPE"); dto.setStatus(CeQueueDto.Status.PENDING); dto.setCreatedAt(1_450_000_000_000L); dto.setComponentUuid(uuid); dto.setSubmitterUuid(user.getUuid()); db.getDbClient().ceQueueDao().insert(db.getSession(), dto); makeInProgress(db.getSession(), "workerUuid", 1_958_000_000_000L, dto); CeQueueDto inProgress = db.getDbClient().ceQueueDao().selectByUuid(db.getSession(), dto.getUuid()).get(); Ce.Task wsTask = underTest.formatQueue(db.getSession(), inProgress); assertThat(wsTask.getType()).isEqualTo("TYPE"); assertThat(wsTask.getId()).isEqualTo("UUID"); assertThat(wsTask.getComponentId()).isEqualTo(uuid); assertThat(wsTask.getComponentKey()).isEqualTo("COMPONENT_KEY"); assertThat(wsTask.getComponentName()).isEqualTo("Component Name"); assertThat(wsTask.getComponentQualifier()).isEqualTo("TRK"); assertThat(wsTask.getStatus()).isEqualTo(Ce.TaskStatus.IN_PROGRESS); assertThat(wsTask.getSubmitterLogin()).isEqualTo(user.getLogin()); assertThat(wsTask.hasExecutionTimeMs()).isTrue(); assertThat(wsTask.hasExecutedAt()).isFalse(); assertThat(wsTask.hasScannerContext()).isFalse(); }
public ImmutableSet<GrantDTO> getForGranteesOrGlobal(Set<GRN> grantees) { return streamQuery(DBQuery.or( DBQuery.in(GrantDTO.FIELD_GRANTEE, grantees), DBQuery.is(GrantDTO.FIELD_GRANTEE, GRNRegistry.GLOBAL_USER_GRN.toString()) )).collect(ImmutableSet.toImmutableSet()); }
@Test @MongoDBFixtures("grants.json") public void getForGranteesOrGlobal() { final GRN jane = grnRegistry.newGRN("user", "jane"); final GRN john = grnRegistry.newGRN("user", "john"); assertThat(dbService.getForGranteesOrGlobal(Collections.singleton(jane))).hasSize(4); assertThat(dbService.getForGranteesOrGlobal(Collections.singleton(john))).hasSize(3); }
public MetricsBuilder enableJvm(Boolean enableJvm) { this.enableJvm = enableJvm; return getThis(); }
@Test void enableJvm() { MetricsBuilder builder = MetricsBuilder.newBuilder(); builder.enableJvm(true); Assertions.assertTrue(builder.build().getEnableJvm()); }
@Override public KeyValueIterator<K, V> all() { final NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>> nextIteratorFunction = new NextIteratorFunction<K, V, ReadOnlyKeyValueStore<K, V>>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.all(); } catch (final InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>( storeName, new CompositeKeyValueIterator<>(stores.iterator(), nextIteratorFunction)); }
@Test public void shouldThrowUnsupportedOperationExceptionWhileRemove() { try (final KeyValueIterator<String, String> keyValueIterator = theStore.all()) { assertThrows(UnsupportedOperationException.class, keyValueIterator::remove); } }
@Override public TenantId tenantId() { return tenantId; }
@Test public void testEquality() { DefaultVirtualNetwork network1 = new DefaultVirtualNetwork(NetworkId.networkId(0), TenantId.tenantId(tenantIdValue1)); DefaultVirtualNetwork network2 = new DefaultVirtualNetwork(NetworkId.networkId(0), TenantId.tenantId(tenantIdValue1)); DefaultVirtualNetwork network3 = new DefaultVirtualNetwork(NetworkId.networkId(0), TenantId.tenantId(tenantIdValue2)); DefaultVirtualNetwork network4 = new DefaultVirtualNetwork(NetworkId.networkId(1), TenantId.tenantId(tenantIdValue2)); new EqualsTester().addEqualityGroup(network1, network2).addEqualityGroup(network3) .addEqualityGroup(network4).testEquals(); }
@Override public void writeInt(final int v) throws IOException { ensureAvailable(INT_SIZE_IN_BYTES); Bits.writeInt(buffer, pos, v, isBigEndian); pos += INT_SIZE_IN_BYTES; }
@Test public void testWriteIntForPositionVByteOrder() throws Exception { int expected = 100; out.writeInt(2, expected, LITTLE_ENDIAN); int actual = Bits.readIntL(out.buffer, 2); assertEquals(expected, actual); }
@Override public Iterable<Token> tokenize(String input, Language language, StemMode stemMode, boolean removeAccents) { if (input.isEmpty()) return List.of(); List<Token> tokens = textToTokens(input, analyzerFactory.getAnalyzer(language, stemMode, removeAccents)); log.log(Level.FINEST, () -> "Tokenized '" + language + "' text='" + input + "' into: n=" + tokens.size() + ", tokens=" + tokens); return tokens; }
@Test public void compositeConfigKey() { String reversingAnalyzerKey = Language.ENGLISH.languageCode() + "/" + StemMode.ALL; LuceneAnalysisConfig enConfig = new LuceneAnalysisConfig.Builder() .analysis( Map.of(reversingAnalyzerKey, new LuceneAnalysisConfig.Analysis.Builder().tokenFilters(List.of( new LuceneAnalysisConfig .Analysis .TokenFilters .Builder() .name("reverseString")))) ).build(); LuceneLinguistics linguistics = new LuceneLinguistics(enConfig, new ComponentRegistry<>()); // Matching StemMode Iterable<Token> tokens = linguistics .getTokenizer() .tokenize("Dogs and Cats", Language.ENGLISH, StemMode.ALL, false); assertEquals(List.of("sgoD", "dna", "staC"), tokenStrings(tokens)); // StemMode is different Iterable<Token> stemModeTokens = linguistics .getTokenizer() .tokenize("Dogs and Cats", Language.ENGLISH, StemMode.BEST, false); assertEquals(List.of("dog", "cat"), tokenStrings(stemModeTokens)); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldThrowOnAmbiguousImplicitCastWithGenerics() { // Given: givenFunctions( function(FIRST_FUNC, -1, LONG, GenericType.of("A"), GenericType.of("B")), function(SECOND_FUNC, -1, DOUBLE, GenericType.of("A"), GenericType.of("B")) ); // When: final KsqlException e = assertThrows(KsqlException.class, () -> udfIndex .getFunction(ImmutableList .of(SqlArgument.of(INTEGER), SqlArgument.of(INTEGER), SqlArgument.of(INTEGER)))); // Then: assertThat(e.getMessage(), containsString("Function 'name' cannot be resolved due " + "to ambiguous method parameters " + "(INTEGER, INTEGER, INTEGER)")); }
protected boolean checkFeExistByIpOrFqdn(String ipOrFqdn) throws UnknownHostException { Pair<String, String> targetIpAndFqdn = NetUtils.getIpAndFqdnByHost(ipOrFqdn); for (Frontend fe : frontends.values()) { Pair<String, String> curIpAndFqdn; try { curIpAndFqdn = NetUtils.getIpAndFqdnByHost(fe.getHost()); } catch (UnknownHostException e) { LOG.warn("failed to get right ip by fqdn {}", fe.getHost(), e); if (targetIpAndFqdn.second.equals(fe.getHost()) && !Strings.isNullOrEmpty(targetIpAndFqdn.second)) { return true; } continue; } // target, cur has same ip if (targetIpAndFqdn.first.equals(curIpAndFqdn.first)) { return true; } // target, cur has same fqdn and both of them are not equal "" if (targetIpAndFqdn.second.equals(curIpAndFqdn.second) && !Strings.isNullOrEmpty(targetIpAndFqdn.second)) { return true; } } return false; }
@Test(expected = UnknownHostException.class) public void testCheckFeExistByIpOrFqdnException() throws UnknownHostException { NodeMgr nodeMgr = new NodeMgr(); nodeMgr.checkFeExistByIpOrFqdn("not-exist-host.com"); }
@ApiOperation(value = "Creates or Updates the Administration Settings (saveAdminSettings)", notes = "Creates or Updates the Administration Settings. Platform generates random Administration Settings Id during settings creation. " + "The Administration Settings Id will be present in the response. Specify the Administration Settings Id when you would like to update the Administration Settings. " + "Referencing non-existing Administration Settings Id will cause an error." + SYSTEM_AUTHORITY_PARAGRAPH) @PreAuthorize("hasAuthority('SYS_ADMIN')") @RequestMapping(value = "/settings", method = RequestMethod.POST) @ResponseBody public AdminSettings saveAdminSettings( @Parameter(description = "A JSON value representing the Administration Settings.") @RequestBody AdminSettings adminSettings) throws ThingsboardException { accessControlService.checkPermission(getCurrentUser(), Resource.ADMIN_SETTINGS, Operation.WRITE); adminSettings.setTenantId(getTenantId()); adminSettings = checkNotNull(adminSettingsService.saveAdminSettings(TenantId.SYS_TENANT_ID, adminSettings)); if (adminSettings.getKey().equals("mail")) { mailService.updateMailConfiguration(); ((ObjectNode) adminSettings.getJsonValue()).remove("password"); ((ObjectNode) adminSettings.getJsonValue()).remove("refreshToken"); } else if (adminSettings.getKey().equals("sms")) { smsService.updateSmsConfiguration(); } return adminSettings; }
@Test public void testSaveAdminSettings() throws Exception { loginSysAdmin(); AdminSettings adminSettings = doGet("/api/admin/settings/general", AdminSettings.class); JsonNode jsonValue = adminSettings.getJsonValue(); ((ObjectNode) jsonValue).put("baseUrl", "http://myhost.org"); adminSettings.setJsonValue(jsonValue); doPost("/api/admin/settings", adminSettings).andExpect(status().isOk()); doGet("/api/admin/settings/general") .andExpect(status().isOk()) .andExpect(content().contentType(contentType)) .andExpect(jsonPath("$.jsonValue.baseUrl", is("http://myhost.org"))); ((ObjectNode) jsonValue).put("baseUrl", "http://localhost:8080"); adminSettings.setJsonValue(jsonValue); doPost("/api/admin/settings", adminSettings) .andExpect(status().isOk()); }
public Span handleSendWithParent(RpcClientRequest request, @Nullable TraceContext parent) { if (request == null) throw new NullPointerException("request == null"); return handleSend(request, tracer.nextSpanWithParent(sampler, request, parent)); }
@Test void handleSendWithParent_overrideNull() { try (Scope scope = httpTracing.tracing.currentTraceContext().newScope(null)) { brave.Span span = handler.handleSendWithParent(request, context); // If the overwrite was successful, we have a child span. assertThat(span.context().parentIdAsLong()).isEqualTo(context.spanId()); } }