focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public boolean hasTempMessage() { try { this.treeMapLock.readLock().lockInterruptibly(); try { return !this.msgTreeMap.isEmpty(); } finally { this.treeMapLock.readLock().unlock(); } } catch (InterruptedException e) { } return true; }
@Test public void testHasTempMessage() { ProcessQueue processQueue = createProcessQueue(); assertFalse(processQueue.hasTempMessage()); }
public static Calendar toCalendar(String date) throws IOException { Calendar retval = null; if ((date != null) && (!date.trim().isEmpty())) { date = date.trim(); // these are the default values int month = 1; int day = 1; int hour = 0; int minute = 0; int second = 0; // first string off the prefix if it exists try { SimpleTimeZone zone = null; if (Pattern.matches("^\\d{4}-\\d{2}-\\d{2}T.*", date)) { // Assuming ISO860 date string return fromISO8601(date); } else if (date.startsWith("D:")) { date = date.substring(2); } date = date.replaceAll("[-:T]", ""); if (date.length() < 4) { throw new IOException("Error: Invalid date format '" + date + "'"); } int year = Integer.parseInt(date.substring(0, 4)); if (date.length() >= 6) { month = Integer.parseInt(date.substring(4, 6)); } if (date.length() >= 8) { day = Integer.parseInt(date.substring(6, 8)); } if (date.length() >= 10) { hour = Integer.parseInt(date.substring(8, 10)); } if (date.length() >= 12) { minute = Integer.parseInt(date.substring(10, 12)); } int timeZonePos = 12; if (date.length() - 12 > 5 || (date.length() - 12 == 3 && date.endsWith("Z"))) { second = Integer.parseInt(date.substring(12, 14)); timeZonePos = 14; } if (date.length() >= (timeZonePos + 1)) { char sign = date.charAt(timeZonePos); if (sign == 'Z') { zone = new SimpleTimeZone(0, "Unknown"); } else { int hours = 0; int minutes = 0; if (date.length() >= (timeZonePos + 3)) { if (sign == '+') { // parseInt cannot handle the + sign hours = Integer.parseInt(date.substring((timeZonePos + 1), (timeZonePos + 3))); } else { hours = -Integer.parseInt(date.substring(timeZonePos, (timeZonePos + 2))); } } if (sign == '+') { if (date.length() >= (timeZonePos + 5)) { minutes = Integer.parseInt(date.substring((timeZonePos + 3), (timeZonePos + 5))); } } else { if (date.length() >= (timeZonePos + 4)) { minutes = Integer.parseInt(date.substring((timeZonePos + 2), (timeZonePos + 4))); } } zone = new SimpleTimeZone(hours * 60 * 60 * 1000 + minutes * 60 * 1000, "Unknown"); } } if (zone == null) { retval = new GregorianCalendar(); } else { updateZoneId(zone); retval = new GregorianCalendar(zone); } retval.clear(); retval.set(year, month - 1, day, hour, minute, second); } catch (NumberFormatException e) { // remove the arbitrary : in the timezone. SimpleDateFormat can't handle it if (date.charAt(date.length() - 3) == ':' && (date.charAt(date.length() - 6) == '+' || date.charAt(date.length() - 6) == '-')) { // that's a timezone string, remove the : date = date.substring(0, date.length() - 3) + date.substring(date.length() - 2); } for (int i = 0; (retval == null) && (i < POTENTIAL_FORMATS.length); i++) { try { Date utilDate = POTENTIAL_FORMATS[i].parse(date); retval = new GregorianCalendar(); retval.setTime(utilDate); } catch (ParseException pe) { // ignore and move to next potential format } } if (retval == null) { // we didn't find a valid date format so throw an exception throw new IOException("Error converting date:" + date, e); } } } return retval; }
@Test void testDateConversion() throws Exception { // Test partial dates Calendar convDate = DateConverter.toCalendar("2015-02-02"); assertEquals(2015, convDate.get(Calendar.YEAR)); //Test missing seconds assertEquals(DateConverter.toCalendar("2015-12-08T12:07:00-05:00"), DateConverter.toCalendar("2015-12-08T12:07-05:00")); assertEquals(DateConverter.toCalendar("2011-11-20T10:09:00Z"), DateConverter.toCalendar("2011-11-20T10:09Z")); // Test some time zone offsets String testString1 = ""; String testString2 = ""; DateTimeFormatter dateTimeFormatter = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm[:ss][.SSS][XXX]"); //Test missing seconds testString1 = "2015-12-08T12:07:00-05:00"; testString2 = "2015-12-08T12:07-05:00"; assertEquals(DateConverter.toCalendar(testString1), DateConverter.toCalendar(testString2)); assertEquals(DateConverter.toCalendar(testString1).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); assertEquals(DateConverter.toCalendar(testString2).toInstant(),ZonedDateTime.parse(testString2, dateTimeFormatter).toInstant()); // Test some time zone offsets testString1 = "2015-02-02T16:37:19.192Z"; testString2 = "2015-02-02T16:37:19.192Z"; assertEquals(DateConverter.toCalendar(testString2).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); testString1 = "2015-02-02T16:37:19.192+00:00"; testString2 = "2015-02-02T16:37:19.192Z"; assertEquals(DateConverter.toCalendar(testString2).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); testString1 = "2015-02-02T16:37:19.192+02:00"; testString2 = "2015-02-02T16:37:19.192+02:00"; assertEquals(DateConverter.toCalendar(testString2).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); testString1 = "2015-02-02T16:37:19.192Z"; testString2 = "2015-02-02T08:37:19.192PST"; assertEquals(DateConverter.toCalendar(testString2).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); testString1 = "2015-02-02T16:37:19.192+01:00"; testString2 = "2015-02-02T16:37:19.192Europe/Berlin"; assertEquals(DateConverter.toCalendar(testString2).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); // PDFBOX-4902: half-hour TZ testString1 = "2015-02-02T16:37:19.192+05:30"; assertEquals(DateConverter.toCalendar(testString1).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); testString1 = "2015-02-02T16:37:19.192-05:30"; assertEquals(DateConverter.toCalendar(testString1).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); testString1 = "2015-02-02T16:37:19.192+10:30"; assertEquals(DateConverter.toCalendar(testString1).toInstant(),ZonedDateTime.parse(testString1, dateTimeFormatter).toInstant()); }
@Override public CompletableFuture<Void> setRole(NodeId nodeId, DeviceId deviceId, MastershipRole role) { checkNotNull(nodeId, NODE_ID_NULL); checkNotNull(deviceId, DEVICE_ID_NULL); checkNotNull(role, ROLE_NULL); CompletableFuture<MastershipEvent> eventFuture = null; switch (role) { case MASTER: eventFuture = store.setMaster(nodeId, deviceId); break; case STANDBY: eventFuture = store.setStandby(nodeId, deviceId); break; case NONE: eventFuture = store.relinquishRole(nodeId, deviceId); break; default: log.info("Unknown role; ignoring"); return CompletableFuture.completedFuture(null); } return eventFuture.thenAccept(this::post) .thenApply(v -> null); }
@Test public void setRole() { mgr.setRole(NID_OTHER, DEV_MASTER, MASTER); assertEquals("wrong local role:", NONE, mgr.getLocalRole(DEV_MASTER)); assertEquals("wrong obtained role:", STANDBY, Futures.getUnchecked(mgr.requestRoleFor(DEV_MASTER))); //set to master mgr.setRole(NID_LOCAL, DEV_MASTER, MASTER); assertEquals("wrong local role:", MASTER, mgr.getLocalRole(DEV_MASTER)); }
public boolean isShortestPossiblePushData() { checkState(isPushData()); if (data == null) return true; // OP_N if (data.length == 0) return opcode == OP_0; if (data.length == 1) { byte b = data[0]; if (b >= 0x01 && b <= 0x10) return opcode == OP_1 + b - 1; if ((b & 0xFF) == 0x81) return opcode == OP_1NEGATE; } if (data.length < OP_PUSHDATA1) return opcode == data.length; if (data.length < 256) return opcode == OP_PUSHDATA1; if (data.length < 65536) return opcode == OP_PUSHDATA2; // can never be used, but implemented for completeness return opcode == OP_PUSHDATA4; }
@Test public void testShortestPossibleDataPush() { assertTrue("empty push", new ScriptBuilder().data(new byte[0]).build().chunks().get(0) .isShortestPossiblePushData()); for (byte i = -1; i < 127; i++) assertTrue("push of single byte " + i, new ScriptBuilder().data(new byte[] { i }).build().chunks() .get(0).isShortestPossiblePushData()); for (int len = 2; len < Script.MAX_SCRIPT_ELEMENT_SIZE; len++) assertTrue("push of " + len + " bytes", new ScriptBuilder().data(new byte[len]).build().chunks().get(0) .isShortestPossiblePushData()); // non-standard chunks for (byte i = 1; i <= 16; i++) assertFalse("push of smallnum " + i, new ScriptChunk(1, new byte[] { i }).isShortestPossiblePushData()); assertFalse("push of 75 bytes", new ScriptChunk(OP_PUSHDATA1, new byte[75]).isShortestPossiblePushData()); assertFalse("push of 255 bytes", new ScriptChunk(OP_PUSHDATA2, new byte[255]).isShortestPossiblePushData()); assertFalse("push of 65535 bytes", new ScriptChunk(OP_PUSHDATA4, new byte[65535]).isShortestPossiblePushData()); }
@VisibleForTesting List<MessageSummary> getMessageBacklog(EventNotificationContext ctx, TeamsEventNotificationConfigV2 config) { List<MessageSummary> backlog = notificationCallbackService.getBacklogForEvent(ctx); if (config.backlogSize() > 0 && backlog != null) { return backlog.stream().limit(config.backlogSize()).collect(Collectors.toList()); } return backlog; }
@Test public void testBacklogMessageLimitWhenBacklogSizeIsFive() { TeamsEventNotificationConfigV2 config = TeamsEventNotificationConfigV2.builder() .backlogSize(5) .build(); // Global setting is at 50 and the message override is 5 then the backlog size = 5 List<MessageSummary> messageSummaries = teamsEventNotification.getMessageBacklog(eventNotificationContext, config); assertThat(messageSummaries.size()).isEqualTo(5); }
@Override public long read() { return gaugeSource.read(); }
@Test public void whenNotVisitedWithCachedMetricSourceReadsDefault() { SomeObject someObject = new SomeObject(); someObject.longField = 42; metricsRegistry.registerDynamicMetricsProvider(someObject); LongGaugeImpl longGauge = metricsRegistry.newLongGauge("foo.longField"); // needed to collect dynamic metrics and update the gauge created from them metricsRegistry.collect(mock(MetricsCollector.class)); assertEquals(42, longGauge.read()); // clears the cached metric source metricsRegistry.deregisterDynamicMetricsProvider(someObject); metricsRegistry.collect(mock(MetricsCollector.class)); assertEquals(LongGaugeImpl.DEFAULT_VALUE, longGauge.read()); }
@Override public void afterJob(JobExecution jobExecution) { LOG.debug("sending after job execution event [{}]...", jobExecution); producerTemplate.sendBodyAndHeader(endpointUri, jobExecution, EventType.HEADER_KEY, EventType.AFTER.name()); LOG.debug("sent after job execution event"); }
@Test public void shouldSetAfterJobEventHeader() throws Exception { // When jobExecutionListener.afterJob(jobExecution); // Then Exchange beforeJobEvent = consumer().receive("seda:eventQueue"); assertEquals(CamelJobExecutionListener.EventType.AFTER.name(), beforeJobEvent.getIn().getHeader(CamelJobExecutionListener.EventType.HEADER_KEY)); }
@Override public Long createSocialClient(SocialClientSaveReqVO createReqVO) { // 校验重复 validateSocialClientUnique(null, createReqVO.getUserType(), createReqVO.getSocialType()); // 插入 SocialClientDO client = BeanUtils.toBean(createReqVO, SocialClientDO.class); socialClientMapper.insert(client); return client.getId(); }
@Test public void testCreateSocialClient_success() { // 准备参数 SocialClientSaveReqVO reqVO = randomPojo(SocialClientSaveReqVO.class, o -> o.setSocialType(randomEle(SocialTypeEnum.values()).getType()) .setUserType(randomEle(UserTypeEnum.values()).getValue()) .setStatus(randomCommonStatus())) .setId(null); // 防止 id 被赋值 // 调用 Long socialClientId = socialClientService.createSocialClient(reqVO); // 断言 assertNotNull(socialClientId); // 校验记录的属性是否正确 SocialClientDO socialClient = socialClientMapper.selectById(socialClientId); assertPojoEquals(reqVO, socialClient, "id"); }
@Override public List<BaseballTeam> findAll() { return baseballTeamRepository.findAll(); }
@Test void findAll_은_모든_구단을_반환한다() { // given // when List<BaseballTeam> teams = baseballTeamReadService.findAll(); // then assertAll( () -> assertThat(teams).hasSize(2), () -> assertThat(teams) .anyMatch( team -> team.getId().equals(1L) && team.getName().equals("두산 베어스")), () -> assertThat(teams) .anyMatch( team -> team.getId().equals(2L) && team.getName().equals("SSG 랜더스"))); }
public static OutboundObserverFactory fromOptions(PipelineOptions options) { List<String> experiments = options.as(ExperimentalOptions.class).getExperiments(); if (experiments != null && experiments.contains("beam_fn_api_buffered_stream")) { int bufferSize = getBufferSize(experiments); if (bufferSize > 0) { return OutboundObserverFactory.clientBuffered( options.as(ExecutorOptions.class).getScheduledExecutorService(), bufferSize); } return OutboundObserverFactory.clientBuffered( options.as(ExecutorOptions.class).getScheduledExecutorService()); } return OutboundObserverFactory.clientDirect(); }
@Test public void testBufferedStreamInstantiation() { StreamObserver<String> observer = HarnessStreamObserverFactories.fromOptions( PipelineOptionsFactory.fromArgs( new String[] {"--experiments=beam_fn_api_buffered_stream"}) .create()) .outboundObserverFor(this::fakeFactory, mockRequestObserver); assertThat(observer, instanceOf(BufferingStreamObserver.class)); }
public static String toCamelCase(String input) { if (input == null || input.isEmpty()) { return ""; } StringBuilder result = new StringBuilder(); boolean toUpperCase = false; for (int i = 0; i < input.length(); i++) { char c = input.charAt(i); if (c == '_') { // Set flag to uppercase next valid character toUpperCase = true; } else { // Append character either in uppercase or lowercase based on the flag if (toUpperCase) { result.append(Character.toUpperCase(c)); toUpperCase = false; } else { result.append(Character.toLowerCase(c)); } } } return result.toString(); }
@Test void toCamelCase() { assertEquals("teacherStatics", StringUtils.toCamelCase("teacher_statics")); assertEquals("teacherstatics", StringUtils.toCamelCase("teacherstatics")); assertEquals("teacherStatics", StringUtils.toCamelCase("teacher__Statics")); assertEquals("teacherStatics", StringUtils.toCamelCase("teacher_Statics")); assertEquals("teacherStatics", StringUtils.toCamelCase("TEACHER_STATICS")); }
@VisibleForTesting static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) { return createStreamExecutionEnvironment( options, MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()), options.getFlinkConfDir()); }
@Test public void shouldFallbackToDefaultParallelismStreaming() { FlinkPipelineOptions options = getDefaultPipelineOptions(); options.setRunner(TestFlinkRunner.class); options.setFlinkMaster("host:80"); StreamExecutionEnvironment sev = FlinkExecutionEnvironments.createStreamExecutionEnvironment(options); assertThat(options.getParallelism(), is(1)); assertThat(sev.getParallelism(), is(1)); }
String extractTemplate(String templateLocation) { try { URL url = Resources.getResource(templateLocation); return Resources.toString(url, StandardCharsets.UTF_8); } catch (IOException | IllegalArgumentException e) { throw new IllegalStateException("Cannot read the template " + templateLocation, e); } }
@Test public void extract_nonexistant_template() { assertThrows(IllegalStateException.class, () -> underTest.extractTemplate("not-there")); }
public static MetricSnapshots convert(List<MatchedRule> matchedRules) { Map<String, List<MatchedRule>> rulesByPrometheusMetricName = new HashMap<>(); for (MatchedRule matchedRule : matchedRules) { List<MatchedRule> matchedRulesWithSameName = rulesByPrometheusMetricName.computeIfAbsent( matchedRule.name, name -> new ArrayList<>()); matchedRulesWithSameName.add(matchedRule); } if (LOGGER.isLoggable(Level.FINE)) { rulesByPrometheusMetricName .values() .forEach( matchedRules1 -> matchedRules1.forEach( matchedRule -> LOGGER.log( Level.FINE, "matchedRule %s", matchedRule))); } MetricSnapshots.Builder result = MetricSnapshots.builder(); for (List<MatchedRule> rulesWithSameName : rulesByPrometheusMetricName.values()) { result.metricSnapshot(convertRulesWithSameName(rulesWithSameName)); } return result.build(); }
@Test public void testMatchedRuleAggregation() { List<MatchedRule> matchedRules = new ArrayList<>(); matchedRules.add( new MatchedRule( "jvm_memory_committed_bytes", "java.lang<type=Memory><HeapMemoryUsage>committed: 16252928", "UNKNOWN", "java.lang.management.MemoryUsage" + " java.lang:name=null,type=Memory,attribute=committed", of("area"), of("heap"), 1.6252928E7, 1.0)); matchedRules.add( new MatchedRule( "jvm_memory_committed_bytes", "java.lang<type=Memory><NonHeapMemoryUsage>committed: 17170432", "UNKNOWN", "java.lang.management.MemoryUsage" + " java.lang:name=null,type=Memory,attribute=committed", of("area"), of("nonheap"), 2.1757952E7, 1.0)); MetricSnapshots metricSnapshots = MatchedRuleToMetricSnapshotsConverter.convert(matchedRules); assertThat(metricSnapshots).hasSize(1); metricSnapshots.forEach( metricSnapshot -> { MetricMetadata metricMetadata = metricSnapshot.getMetadata(); assertThat(metricMetadata.getName()).isEqualTo("jvm_memory_committed_bytes"); assertThat(metricMetadata.getPrometheusName()) .isEqualTo("jvm_memory_committed_bytes"); List<? extends DataPointSnapshot> dataPointSnapshots = metricSnapshot.getDataPoints(); assertThat(dataPointSnapshots).hasSize(2); }); }
public static URI getServerAddress(final KsqlRestConfig restConfig) { final List<String> listeners = restConfig.getList(KsqlRestConfig.LISTENERS_CONFIG); final String address = listeners.stream() .map(String::trim) .findFirst() .orElseThrow(() -> new ConfigException(KsqlRestConfig.LISTENERS_CONFIG, listeners, "value cannot be empty")); try { return new URL(address).toURI(); } catch (final Exception e) { throw new ConfigException(KsqlRestConfig.LISTENERS_CONFIG, listeners, e.getMessage()); } }
@Test(expected = ConfigException.class) public void shouldThrowConfigExceptionIfInvalidServerAddress() { // Given: final KsqlRestConfig restConfig = new KsqlRestConfig( Collections.singletonMap(KsqlRestConfig.LISTENERS_CONFIG, "invalid")); // Then: ServerUtil.getServerAddress(restConfig); }
public static SmartFilterTestExecutionResultDTO execSmartFilterTest(SmartFilterTestExecutionDTO execData) { Predicate<TopicMessageDTO> predicate; try { predicate = MessageFilters.celScriptFilter(execData.getFilterCode()); } catch (Exception e) { log.info("Smart filter '{}' compilation error", execData.getFilterCode(), e); return new SmartFilterTestExecutionResultDTO() .error("Compilation error : " + e.getMessage()); } try { var result = predicate.test( new TopicMessageDTO() .key(execData.getKey()) .content(execData.getValue()) .headers(execData.getHeaders()) .offset(execData.getOffset()) .partition(execData.getPartition()) .timestamp( Optional.ofNullable(execData.getTimestampMs()) .map(ts -> OffsetDateTime.ofInstant(Instant.ofEpochMilli(ts), ZoneOffset.UTC)) .orElse(null)) ); return new SmartFilterTestExecutionResultDTO() .result(result); } catch (Exception e) { log.info("Smart filter {} execution error", execData, e); return new SmartFilterTestExecutionResultDTO() .error("Execution error : " + e.getMessage()); } }
@Test void execSmartFilterTestReturnsErrorOnFilterCompilationError() { var result = execSmartFilterTest( new SmartFilterTestExecutionDTO() .filterCode("this is invalid CEL syntax = 1") ); assertThat(result.getResult()).isNull(); assertThat(result.getError()).containsIgnoringCase("Compilation error"); }
public static List<String> splitStatementsAcrossBlocks(CharSequence string) { List<String> statements = codeAwareSplitOnChar(string, false, true, ';', '\n', '{', '}'); return statements.stream() .filter(stmt -> !(stmt.isEmpty())) .filter(stmt -> !(stmt.startsWith("//"))) .collect(Collectors.toList()); }
@Test public void splitStatementsAcrossBlocksIf() { String text = "if (true) {\n" + " $fact.value1 = 2;\n" + " drools.update($fact);\n" + "}"; List<String> statements = splitStatementsAcrossBlocks(text); assertThat(statements.get(0)).isEqualTo("if (true)"); assertThat(statements.get(1)).isEqualTo("$fact.value1 = 2"); assertThat(statements.get(2)).isEqualTo("drools.update($fact)"); }
@Override protected Function3<EntityColumnMapping, Object[], Map<String, Object>, Object> compile(String sql) { StringBuilder builder = new StringBuilder(sql.length()); int argIndex = 0; for (int i = 0; i < sql.length(); i++) { char c = sql.charAt(i); if (c == '?') { builder.append("_arg").append(argIndex++); } else { builder.append(c); } } try { SpelExpressionParser parser = new SpelExpressionParser(); Expression expression = parser.parseExpression(builder.toString()); AtomicLong errorCount = new AtomicLong(); return (mapping, args, object) -> { if (errorCount.get() > 1024) { return null; } object = createArguments(mapping, object); if (args != null && args.length != 0) { int index = 0; for (Object parameter : args) { object.put("_arg" + index, parameter); } } StandardEvaluationContext context = SHARED_CONTEXT.get(); try { context.setRootObject(object); Object val = expression.getValue(context); errorCount.set(0); return val; } catch (Throwable err) { log.warn("invoke native sql [{}] value error", sql, err); errorCount.incrementAndGet(); } finally { context.setRootObject(null); } return null; }; } catch (Throwable error) { return spelError(sql, error); } }
@Test void testFunction() { SpelSqlExpressionInvoker invoker = new SpelSqlExpressionInvoker(); EntityColumnMapping mapping = Mockito.mock(EntityColumnMapping.class); Function3<EntityColumnMapping, Object[], Map<String, Object>, Object> func = invoker.compile("coalesce(name,?)"); assertEquals(2, func.apply(mapping, new Object[]{2}, Collections.emptyMap())); assertEquals(3, func.apply(mapping, null, Collections.singletonMap("name", 3))); }
@Override public <T extends State> T state(StateNamespace namespace, StateTag<T> address) { return workItemState.get(namespace, address, StateContexts.nullContext()); }
@Test public void testMapPutIfAbsentFails() throws Exception { StateTag<MapState<String, Integer>> addr = StateTags.map("map", StringUtf8Coder.of(), VarIntCoder.of()); MapState<String, Integer> mapState = underTest.state(NAMESPACE, addr); final String tag1 = "tag1"; mapState.put(tag1, 1); assertEquals(1, (int) mapState.putIfAbsent(tag1, 42).read()); assertEquals(1, (int) mapState.get(tag1).read()); final String tag2 = "tag2"; SettableFuture<Integer> future = SettableFuture.create(); when(mockReader.valueFuture( protoKeyFromUserKey(tag2, StringUtf8Coder.of()), STATE_FAMILY, VarIntCoder.of())) .thenReturn(future); waitAndSet(future, 2, 50); assertEquals(2, (int) mapState.putIfAbsent(tag2, 42).read()); assertEquals(2, (int) mapState.get(tag2).read()); }
public static <E extends Enum<E>> FlagSet<E> createFlagSet( final Class<E> enumClass, final String prefix, final EnumSet<E> flags) { return new FlagSet<>(enumClass, prefix, flags); }
@Test public void testCreateNullEnumClass() throws Throwable { intercept(NullPointerException.class, () -> createFlagSet(null, KEYDOT, SimpleEnum.a)); }
public static <T> T invoke(Object obj, Method method, Object... args) throws InvocationTargetRuntimeException, UtilException { try { return invokeRaw(obj, method, args); } catch (InvocationTargetException e) { throw new InvocationTargetRuntimeException(e); } catch (IllegalAccessException e) { throw new UtilException(e); } }
@Test public void invokeTest() { final TestClass testClass = new TestClass(); ReflectUtil.invoke(testClass, "setA", 10); assertEquals(10, testClass.getA()); }
public static Properties getPropertiesWithPrefix(Environment environment, String prefix) { return handleSpringBinder(environment, prefix, Properties.class); }
@Test void testGetPropertiesWithPrefix() { Properties actual = PropertiesUtil.getPropertiesWithPrefix(environment, "nacos.prefix"); assertEquals(3, actual.size()); }
@Override public CompletableFuture<CoordinationResponse> deliverCoordinationRequestToCoordinator( OperatorID operator, CoordinationRequest request) throws FlinkException { return state.tryCall( StateWithExecutionGraph.class, stateWithExecutionGraph -> stateWithExecutionGraph.deliverCoordinationRequestToCoordinator( operator, request), "deliverCoordinationRequestToCoordinator") .orElseGet( () -> FutureUtils.completedExceptionally( new FlinkException( "Coordinator of operator " + operator + " does not exist"))); }
@Test void testDeliverCoordinationRequestToCoordinatorFailsInIllegalState() throws Exception { final AdaptiveScheduler scheduler = new AdaptiveSchedulerBuilder( createJobGraph(), mainThreadExecutor, EXECUTOR_RESOURCE.getExecutor()) .build(); assertThatFuture( scheduler.deliverCoordinationRequestToCoordinator( new OperatorID(), new CoordinationRequest() {})) .eventuallyFailsWith(ExecutionException.class) .withCauseInstanceOf(FlinkException.class); }
public String doLayout(ILoggingEvent event) { // Reset working buffer. If the buffer is too large, then we need a new // one in order to avoid the penalty of creating a large array. if (buf.capacity() > UPPER_LIMIT) { buf = new StringBuilder(DEFAULT_SIZE); } else { buf.setLength(0); } // We yield to the \r\n heresy. buf.append("<log4j:event logger=\""); buf.append(Transform.escapeTags(event.getLoggerName())); buf.append("\"\r\n"); buf.append(" timestamp=\""); buf.append(event.getTimeStamp()); buf.append("\" level=\""); buf.append(event.getLevel()); buf.append("\" thread=\""); buf.append(Transform.escapeTags(event.getThreadName())); buf.append("\">\r\n"); buf.append(" <log4j:message>"); buf.append(Transform.escapeTags(event.getFormattedMessage())); buf.append("</log4j:message>\r\n"); // logback does not support NDC // String ndc = event.getNDC(); IThrowableProxy tp = event.getThrowableProxy(); if (tp != null) { StackTraceElementProxy[] stepArray = tp.getStackTraceElementProxyArray(); buf.append(" <log4j:throwable><![CDATA["); for (StackTraceElementProxy step : stepArray) { buf.append(CoreConstants.TAB); buf.append(step.toString()); buf.append("\r\n"); } buf.append("]]></log4j:throwable>\r\n"); } if (locationInfo) { StackTraceElement[] callerDataArray = event.getCallerData(); if (callerDataArray != null && callerDataArray.length > 0) { StackTraceElement immediateCallerData = callerDataArray[0]; buf.append(" <log4j:locationInfo class=\""); buf.append(immediateCallerData.getClassName()); buf.append("\"\r\n"); buf.append(" method=\""); buf.append(Transform.escapeTags(immediateCallerData.getMethodName())); buf.append("\" file=\""); buf.append(Transform.escapeTags(immediateCallerData.getFileName())); buf.append("\" line=\""); buf.append(immediateCallerData.getLineNumber()); buf.append("\"/>\r\n"); } } /* * <log4j:properties> <log4j:data name="name" value="value"/> * </log4j:properties> */ if (this.getProperties()) { Map<String, String> propertyMap = event.getMDCPropertyMap(); if ((propertyMap != null) && (propertyMap.size() != 0)) { Set<Entry<String, String>> entrySet = propertyMap.entrySet(); buf.append(" <log4j:properties>"); for (Entry<String, String> entry : entrySet) { buf.append("\r\n <log4j:data"); buf.append(" name=\"" + Transform.escapeTags(entry.getKey()) + "\""); buf.append(" value=\"" + Transform.escapeTags(entry.getValue()) + "\""); buf.append(" />"); } buf.append("\r\n </log4j:properties>"); } } buf.append("\r\n</log4j:event>\r\n\r\n"); return buf.toString(); }
@Test public void testDoLayout() throws Exception { ILoggingEvent le = createLoggingEvent(); String result = DOCTYPE + "<log4j:eventSet xmlns:log4j='http://jakarta.apache.org/log4j/'>"; if (layout.getFileHeader() != null) { result += layout.getFileHeader(); } if (layout.getPresentationHeader() != null) { result += layout.getPresentationHeader(); } result += layout.doLayout(le); if (layout.getPresentationFooter() != null) { result += layout.getPresentationFooter(); } if (layout.getFileFooter() != null) { result += layout.getFileFooter(); } result += "</log4j:eventSet>"; Document document = parse(result); XPath xpath = this.newXPath(); // Test log4j:event: NodeList eventNodes = (NodeList) xpath.compile("//log4j:event").evaluate(document, XPathConstants.NODESET); Assert.assertEquals(1, eventNodes.getLength()); // Test log4g:message: Assert.assertEquals(MESSAGE, xpath.compile("//log4j:message").evaluate(document, XPathConstants.STRING)); // Test log4j:data: NodeList dataNodes = (NodeList) xpath.compile("//log4j:data").evaluate(document, XPathConstants.NODESET); boolean foundMdc = false; for (int i = 0; i != dataNodes.getLength(); ++i) { Node dataNode = dataNodes.item(i); if (dataNode.getAttributes().getNamedItem("name").getNodeValue().equals(MDC_KEY)) { foundMdc = true; Assert.assertEquals(MDC_VALUE, dataNode.getAttributes().getNamedItem("value").getNodeValue()); break; } } Assert.assertTrue(foundMdc); }
public void playSound(AudioInputStream stream, float volume) { init(); // Walk the pending requests. for (var i = headIndex; i != tailIndex; i = (i + 1) % MAX_PENDING) { var playMessage = getPendingAudio()[i]; if (playMessage.getStream() == stream) { // Use the larger of the two volumes. playMessage.setVolume(Math.max(volume, playMessage.getVolume())); // Don't need to enqueue. return; } } getPendingAudio()[tailIndex] = new PlayMessage(stream, volume); tailIndex = (tailIndex + 1) % MAX_PENDING; }
@Test void testPlaySound() throws UnsupportedAudioFileException, IOException, InterruptedException { audio.playSound(audio.getAudioStream("./etc/Bass-Drum-1.wav"), -10.0f); // test that service is started assertTrue(audio.isServiceRunning()); // adding a small pause to be sure that the sound is ended Thread.sleep(5000); audio.stopService(); // test that service is finished assertFalse(audio.isServiceRunning()); }
public static String getTypeName(final int type) { switch (type) { case START_EVENT_V3: return "Start_v3"; case STOP_EVENT: return "Stop"; case QUERY_EVENT: return "Query"; case ROTATE_EVENT: return "Rotate"; case INTVAR_EVENT: return "Intvar"; case LOAD_EVENT: return "Load"; case NEW_LOAD_EVENT: return "New_load"; case SLAVE_EVENT: return "Slave"; case CREATE_FILE_EVENT: return "Create_file"; case APPEND_BLOCK_EVENT: return "Append_block"; case DELETE_FILE_EVENT: return "Delete_file"; case EXEC_LOAD_EVENT: return "Exec_load"; case RAND_EVENT: return "RAND"; case XID_EVENT: return "Xid"; case USER_VAR_EVENT: return "User var"; case FORMAT_DESCRIPTION_EVENT: return "Format_desc"; case TABLE_MAP_EVENT: return "Table_map"; case PRE_GA_WRITE_ROWS_EVENT: return "Write_rows_event_old"; case PRE_GA_UPDATE_ROWS_EVENT: return "Update_rows_event_old"; case PRE_GA_DELETE_ROWS_EVENT: return "Delete_rows_event_old"; case WRITE_ROWS_EVENT_V1: return "Write_rows_v1"; case UPDATE_ROWS_EVENT_V1: return "Update_rows_v1"; case DELETE_ROWS_EVENT_V1: return "Delete_rows_v1"; case BEGIN_LOAD_QUERY_EVENT: return "Begin_load_query"; case EXECUTE_LOAD_QUERY_EVENT: return "Execute_load_query"; case INCIDENT_EVENT: return "Incident"; case HEARTBEAT_LOG_EVENT: case HEARTBEAT_LOG_EVENT_V2: return "Heartbeat"; case IGNORABLE_LOG_EVENT: return "Ignorable"; case ROWS_QUERY_LOG_EVENT: return "Rows_query"; case WRITE_ROWS_EVENT: return "Write_rows"; case UPDATE_ROWS_EVENT: return "Update_rows"; case DELETE_ROWS_EVENT: return "Delete_rows"; case GTID_LOG_EVENT: return "Gtid"; case ANONYMOUS_GTID_LOG_EVENT: return "Anonymous_Gtid"; case PREVIOUS_GTIDS_LOG_EVENT: return "Previous_gtids"; case PARTIAL_UPDATE_ROWS_EVENT: return "Update_rows_partial"; case TRANSACTION_CONTEXT_EVENT : return "Transaction_context"; case VIEW_CHANGE_EVENT : return "view_change"; case XA_PREPARE_LOG_EVENT : return "Xa_prepare"; case TRANSACTION_PAYLOAD_EVENT : return "transaction_payload"; default: return "Unknown type:" + type; } }
@Test public void getTypeNameInputPositiveOutputNotNull10() { // Arrange final int type = 4; // Act final String actual = LogEvent.getTypeName(type); // Assert result Assert.assertEquals("Rotate", actual); }
@Override public boolean add(T newValue) { Objects.requireNonNull(newValue); return snapshottableAddUnlessPresent(new TimelineHashSetEntry<>(newValue)); }
@Test public void testNullsForbidden() { SnapshotRegistry registry = new SnapshotRegistry(new LogContext()); TimelineHashSet<String> set = new TimelineHashSet<>(registry, 1); assertThrows(NullPointerException.class, () -> set.add(null)); }
@Override public String builder(final String paramName, final ServerWebExchange exchange) { return RequestUrlUtils.getRewrittenRawPath(exchange); }
@Test public void testBuilder() { assertEquals("/uri/path", uriParameterData.builder(null, exchange)); }
public boolean hasGrantFor(GRN grantee, Capability capability, GRN target) { return db.findOne(DBQuery.and( DBQuery.is(GrantDTO.FIELD_GRANTEE, grantee), DBQuery.is(GrantDTO.FIELD_CAPABILITY, capability), DBQuery.is(GrantDTO.FIELD_TARGET, target) )) != null; }
@Test @MongoDBFixtures("grants.json") public void hasGrantFor() { final GRN jane = grnRegistry.parse("grn::::user:jane"); final GRN dashboard1 = grnRegistry.parse("grn::::dashboard:54e3deadbeefdeadbeef0000"); final GRN stream1 = grnRegistry.parse("grn::::stream:54e3deadbeefdeadbeef0000"); final GRN stream2 = grnRegistry.parse("grn::::stream:54e3deadbeefdeadbeef0001"); assertThat(dbService.hasGrantFor(jane, Capability.VIEW, stream1)).isTrue(); assertThat(dbService.hasGrantFor(jane, Capability.MANAGE, stream2)).isTrue(); assertThat(dbService.hasGrantFor(jane, Capability.OWN, dashboard1)).isTrue(); assertThat(dbService.hasGrantFor(jane, Capability.MANAGE, stream1)).isFalse(); assertThat(dbService.hasGrantFor(jane, Capability.VIEW, dashboard1)).isFalse(); }
@Override public ClusterClientProvider<String> deployApplicationCluster( final ClusterSpecification clusterSpecification, final ApplicationConfiguration applicationConfiguration) throws ClusterDeploymentException { if (client.getService(ExternalServiceDecorator.getExternalServiceName(clusterId)) .isPresent()) { throw new ClusterDeploymentException( "The Flink cluster " + clusterId + " already exists."); } checkNotNull(clusterSpecification); checkNotNull(applicationConfiguration); final KubernetesDeploymentTarget deploymentTarget = KubernetesDeploymentTarget.fromConfig(flinkConfig); if (KubernetesDeploymentTarget.APPLICATION != deploymentTarget) { throw new ClusterDeploymentException( "Couldn't deploy Kubernetes Application Cluster." + " Expected deployment.target=" + KubernetesDeploymentTarget.APPLICATION.getName() + " but actual one was \"" + deploymentTarget + "\""); } applicationConfiguration.applyToConfiguration(flinkConfig); // No need to do pipelineJars validation if it is a PyFlink job. if (!(PackagedProgramUtils.isPython(applicationConfiguration.getApplicationClassName()) || PackagedProgramUtils.isPython(applicationConfiguration.getProgramArguments()))) { final List<URI> pipelineJars = KubernetesUtils.checkJarFileForApplicationMode(flinkConfig); Preconditions.checkArgument(pipelineJars.size() == 1, "Should only have one jar"); } try { artifactUploader.uploadAll(flinkConfig); } catch (Exception ex) { throw new ClusterDeploymentException(ex); } final ClusterClientProvider<String> clusterClientProvider = deployClusterInternal( KubernetesApplicationClusterEntrypoint.class.getName(), clusterSpecification, false); try (ClusterClient<String> clusterClient = clusterClientProvider.getClusterClient()) { LOG.info( "Create flink application cluster {} successfully, JobManager Web Interface: {}", clusterId, clusterClient.getWebInterfaceURL()); } return clusterClientProvider; }
@Test void testDeployApplicationCluster() { flinkConfig.set( PipelineOptions.JARS, Collections.singletonList("local:///path/of/user.jar")); flinkConfig.set(DeploymentOptions.TARGET, KubernetesDeploymentTarget.APPLICATION.getName()); try { descriptor.deployApplicationCluster(clusterSpecification, appConfig); } catch (Exception ignored) { } mockExpectedServiceFromServerSide(loadBalancerSvc); final ClusterClient<String> clusterClient = descriptor.retrieve(CLUSTER_ID).getClusterClient(); checkClusterClient(clusterClient); checkUpdatedConfigAndResourceSetting(); }
public static String schemaToPdl(DataSchema schema, EncodingStyle encodingStyle) { StringWriter writer = new StringWriter(); SchemaToPdlEncoder encoder = new SchemaToPdlEncoder(writer); encoder.setEncodingStyle(encodingStyle); try { encoder.encode(schema); } catch (IOException e) { throw new IllegalStateException(e); } return writer.toString(); }
@Test public void testNamespaceAndCommentsOnNestedSchemas() throws IOException { String inputSchema = String.join("\n", "namespace com.linkedin.test.RecordDataSchema", "/**", "* some schema doc string", "*/", "record A {", "", " b:", " {", " namespace com.linkedin.test.RecordDataSchema.A", " /**", " * some inner schema doc string", " */", " record B {", " b1: string", "", " }", " }", "}"); DataSchema schema = TestUtil.dataSchemaFromPdlString(inputSchema); String indentedSchema = SchemaToPdlEncoder.schemaToPdl(schema, SchemaToPdlEncoder.EncodingStyle.INDENTED); assertEquals(String.join("\n", "namespace com.linkedin.test.RecordDataSchema", "", "/**", " * some schema doc string", " */", "record A {", "", " b: ", " {", " namespace com.linkedin.test.RecordDataSchema.A", " /**", " * some inner schema doc string", " */", " record B {", " b1: string", " }", " }", "}"), indentedSchema); }
public static synchronized void join(Mapper mapper) { if (Objects.isNull(mapper)) { return; } putMapper(mapper); LOGGER.info("[MapperManager] join successfully."); }
@Test void testJoin() { MapperManager.join(new AbstractMapperByMysql() { @Override public String getTableName() { return "test"; } @Override public String getDataSource() { return DataSourceConstant.MYSQL; } }); MapperManager instance = MapperManager.instance(false); Mapper mapper = instance.findMapper(DataSourceConstant.MYSQL, "test"); assertNotNull(mapper); }
@Override public void onGrantLeadership(UUID leaderSessionID) { runInLeaderEventThread( LEADER_ACQUISITION_EVENT_LOG_NAME, () -> onGrantLeadershipInternal(leaderSessionID)); }
@Test void testOnGrantLeadershipAsyncDoesNotBlock() throws Exception { testNonBlockingCall( latch -> TestingGenericLeaderContender.newBuilder() .setGrantLeadershipConsumer( ignoredSessionID -> latch.awaitQuietly()) .build(), (leadershipGranted, listener) -> { leadershipGranted.set(true); listener.onGrantLeadership(UUID.randomUUID()); }); }
@Override public int calculateCurrentConcurrentTaskNum() throws MetaNotFoundException { SystemInfoService systemInfoService = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); // TODO: need to refactor after be split into cn + dn int aliveNodeNum = systemInfoService.getAliveBackendNumber(); if (RunMode.isSharedDataMode()) { aliveNodeNum = 0; List<Long> computeIds = GlobalStateMgr.getCurrentState().getWarehouseMgr().getAllComputeNodeIds(warehouseId); for (long nodeId : computeIds) { ComputeNode node = GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo().getBackendOrComputeNode(nodeId); if (node != null && node.isAlive()) { ++aliveNodeNum; } } } int partitionNum = currentKafkaPartitions.size(); if (partitionNum == 0) { // In non-stop states (NEED_SCHEDULE/RUNNING), having `partitionNum` as 0 is equivalent // to `currentKafkaPartitions` being uninitialized. When `currentKafkaPartitions` is // uninitialized, it indicates that the job has just been created and hasn't been scheduled yet. // At this point, the user-specified number of partitions is used. partitionNum = customKafkaPartitions.size(); if (partitionNum == 0) { // If the user hasn't specified partition information, then we no longer take the `partition` // variable into account when calculating concurrency. partitionNum = Integer.MAX_VALUE; } } if (desireTaskConcurrentNum == 0) { desireTaskConcurrentNum = Config.max_routine_load_task_concurrent_num; } LOG.debug("current concurrent task number is min" + "(partition num: {}, desire task concurrent num: {}, alive be num: {}, config: {})", partitionNum, desireTaskConcurrentNum, aliveNodeNum, Config.max_routine_load_task_concurrent_num); currentTaskConcurrentNum = Math.min(Math.min(partitionNum, Math.min(desireTaskConcurrentNum, aliveNodeNum)), Config.max_routine_load_task_concurrent_num); return currentTaskConcurrentNum; }
@Test public void testBeNumMin(@Mocked GlobalStateMgr globalStateMgr, @Mocked SystemInfoService systemInfoService, @Mocked Database database, @Mocked RoutineLoadDesc routineLoadDesc) throws MetaNotFoundException { List<Integer> partitionList1 = Lists.newArrayList(1, 2); List<Integer> partitionList2 = Lists.newArrayList(1, 2, 3); List<Integer> partitionList3 = Lists.newArrayList(1, 2, 3, 4); List<Integer> partitionList4 = Lists.newArrayList(1, 2, 3, 4, 5, 6, 7); List<Long> beIds1 = Lists.newArrayList(1L); List<Long> beIds2 = Lists.newArrayList(1L, 2L, 3L, 4L); new Expectations() { { GlobalStateMgr.getCurrentState().getNodeMgr().getClusterInfo(); minTimes = 0; result = systemInfoService; systemInfoService.getBackendIds(true); result = beIds2; minTimes = 0; systemInfoService.getAliveBackendNumber(); result = beIds2.size(); minTimes = 0; systemInfoService.getTotalBackendNumber(); result = beIds2.size(); minTimes = 0; } }; // 3 partitions, 4 be RoutineLoadJob routineLoadJob = new KafkaRoutineLoadJob(1L, "kafka_routine_load_job", 1L, 1L, "127.0.0.1:9020", "topic1"); Deencapsulation.setField(routineLoadJob, "currentKafkaPartitions", partitionList2); Assert.assertEquals(3, routineLoadJob.calculateCurrentConcurrentTaskNum()); // 4 partitions, 4 be routineLoadJob = new KafkaRoutineLoadJob(1L, "kafka_routine_load_job", 1L, 1L, "127.0.0.1:9020", "topic1"); Deencapsulation.setField(routineLoadJob, "currentKafkaPartitions", partitionList3); Assert.assertEquals(4, routineLoadJob.calculateCurrentConcurrentTaskNum()); // 7 partitions, 4 be routineLoadJob = new KafkaRoutineLoadJob(1L, "kafka_routine_load_job", 1L, 1L, "127.0.0.1:9020", "topic1"); Deencapsulation.setField(routineLoadJob, "currentKafkaPartitions", partitionList4); Assert.assertEquals(4, routineLoadJob.calculateCurrentConcurrentTaskNum()); }
public static Cli build( final Long streamedQueryRowLimit, final Long streamedQueryTimeoutMs, final OutputFormat outputFormat, final KsqlRestClient restClient ) { final Console console = Console.build(outputFormat); return new Cli(streamedQueryRowLimit, streamedQueryTimeoutMs, restClient, console); }
@Test public void testSelectProject() { final Map<GenericKey, GenericRow> expectedResults = ImmutableMap .<GenericKey, GenericRow>builder() .put(genericKey("ORDER_1"), genericRow( "ITEM_1", 10.0, ImmutableList.of(100.0, 110.99, 90.0))) .put(genericKey("ORDER_2"), genericRow( "ITEM_2", 20.0, ImmutableList.of(10.0, 10.99, 9.0))) .put(genericKey("ORDER_3"), genericRow( "ITEM_3", 30.0, ImmutableList.of(10.0, 10.99, 91.0))) .put(genericKey("ORDER_4"), genericRow( "ITEM_4", 40.0, ImmutableList.of(10.0, 140.99, 94.0))) .put(genericKey("ORDER_5"), genericRow( "ITEM_5", 50.0, ImmutableList.of(160.0, 160.99, 98.0))) .put(genericKey("ORDER_6"), genericRow( "ITEM_8", 80.0, ImmutableList.of(1100.0, 1110.99, 970.0))) .build(); final PhysicalSchema resultSchema = PhysicalSchema.from( LogicalSchema.builder() .keyColumn(ColumnName.of("ORDERID"), SqlTypes.STRING) .valueColumn(ColumnName.of("ITEMID"), SqlTypes.STRING) .valueColumn(ColumnName.of("ORDERUNITS"), SqlTypes.DOUBLE) .valueColumn(ColumnName.of("PRICEARRAY"), SqlTypes.array(SqlTypes.DOUBLE)) .build(), SerdeFeatures.of(), SerdeFeatures.of() ); testCreateStreamAsSelect( "SELECT ORDERID, ITEMID, ORDERUNITS, PRICEARRAY " + "FROM " + ORDER_DATA_PROVIDER.sourceName() + ";", resultSchema, expectedResults ); }
String getProviderURL(LdapName baseDN) throws NamingException { StringBuffer ldapURL = new StringBuffer(); try { for ( String host : hosts ) { // Create a correctly-encoded ldap URL for the PROVIDER_URL final URI uri = new URI(sslEnabled ? "ldaps" : "ldap", null, host, port, "/" + baseDN.toString(), null, null); ldapURL.append(uri.toASCIIString()); ldapURL.append(" "); } return ldapURL.toString().trim(); } catch ( Exception e ) { Log.error( "Unable to generate provider URL for baseDN: '{}'.", baseDN, e ); throw new NamingException( "Unable to generate provider URL for baseDN: '"+baseDN+"': " + e.getMessage() ); } }
@Test public void testGetProviderURLTwoHosts() throws Exception { // Setup fixture. final Map<String, String> properties = new HashMap<>(); properties.put("ldap.host", "localhost example.org"); properties.put("ldap.port", "389"); final LdapManager manager = new LdapManager( properties ); final LdapName name = new LdapName("ou=people,dc=example,dc=org"); // Execute system under test. final String result = manager.getProviderURL( name ); // Verify result. assertEquals("ldaps://localhost:389/ou=people,dc=example,dc=org ldaps://example.org:389/ou=people,dc=example,dc=org", result); }
@Override public ComponentCreationData createProjectAndBindToDevOpsPlatform(DbSession dbSession, CreationMethod creationMethod, Boolean monorepo, @Nullable String projectKey, @Nullable String projectName) { String key = Optional.ofNullable(projectKey).orElse(generateUniqueProjectKey()); boolean isManaged = devOpsPlatformSettings.isProvisioningEnabled(); Boolean shouldProjectBePrivate = shouldProjectBePrivate(devOpsProjectCreationContext.isPublic()); ComponentCreationData componentCreationData = projectCreator.createProject(dbSession, key, getProjectName(projectName), devOpsProjectCreationContext.defaultBranchName(), creationMethod, shouldProjectBePrivate, isManaged); ProjectDto projectDto = Optional.ofNullable(componentCreationData.projectDto()).orElseThrow(); createProjectAlmSettingDto(dbSession, projectDto, devOpsProjectCreationContext.almSettingDto(), monorepo); addScanPermissionToCurrentUser(dbSession, projectDto); BranchDto mainBranchDto = Optional.ofNullable(componentCreationData.mainBranchDto()).orElseThrow(); if (isManaged) { syncProjectPermissionsWithDevOpsPlatform(projectDto, mainBranchDto); } return componentCreationData; }
@Test void createProjectAndBindToDevOpsPlatformFromApi_whenAutoProvisioningOnAndRepoPrivate_successfullyCreatesProject() { // given String projectKey = "customProjectKey"; mockGeneratedProjectKey(); ComponentCreationData componentCreationData = mockProjectCreation(projectKey); ProjectAlmSettingDao projectAlmSettingDao = mock(); when(dbClient.projectAlmSettingDao()).thenReturn(projectAlmSettingDao); when(devOpsPlatformSettings.isProvisioningEnabled()).thenReturn(true); // when ComponentCreationData actualComponentCreationData = defaultDevOpsProjectCreator.createProjectAndBindToDevOpsPlatform(dbClient.openSession(true), ALM_IMPORT_API, false, projectKey, null); // then assertThat(actualComponentCreationData).isEqualTo(componentCreationData); ComponentCreationParameters componentCreationParameters = componentCreationParametersCaptor.getValue(); assertComponentCreationParametersContainsCorrectInformation(componentCreationParameters, projectKey, ALM_IMPORT_API); assertThat(componentCreationParameters.isManaged()).isTrue(); assertThat(componentCreationParameters.newComponent().isPrivate()).isTrue(); verifyScanPermissionWasAddedToUser(actualComponentCreationData); verifyProjectSyncTaskWasCreated(actualComponentCreationData); verify(projectAlmSettingDao).insertOrUpdate(any(), projectAlmSettingDtoCaptor.capture(), eq(ALM_SETTING_KEY), eq(REPOSITORY_NAME), eq(projectKey)); ProjectAlmSettingDto projectAlmSettingDto = projectAlmSettingDtoCaptor.getValue(); assertAlmSettingsDtoContainsCorrectInformation(almSettingDto, requireNonNull(componentCreationData.projectDto()), projectAlmSettingDto); }
@Override public void checkSubjectAccess( final KsqlSecurityContext securityContext, final String subjectName, final AclOperation operation ) { checkAccess(new CacheKey(securityContext, AuthObjectType.SUBJECT, subjectName, operation)); }
@Test public void shouldThrowAuthorizationExceptionWhenBackendSubjectValidatorIsDenied() { // Given doThrow(KsqlSchemaAuthorizationException.class).when(backendValidator) .checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ); // When: assertThrows( KsqlSchemaAuthorizationException.class, () -> cache.checkSubjectAccess(securityContext, SUBJECT_1, AclOperation.READ) ); }
public static List<String> splitMarkdownParagraphs( List<String> lines, int maxTokensPerParagraph) { return internalSplitTextParagraphs( lines, maxTokensPerParagraph, (text) -> internalSplitLines( text, maxTokensPerParagraph, false, s_markdownSplitOptions)); }
@Test public void canSplitMarkdownParagraphs() { List<String> input = Arrays.asList( "This is a test of the emergency broadcast system. This is only a test.", "We repeat, this is only a test. A unit test."); List<String> expected = Arrays.asList( "This is a test of the emergency broadcast system.", "This is only a test.", "We repeat, this is only a test. A unit test."); List<String> result = TextChunker.splitMarkdownParagraphs(input, 13); Assertions.assertEquals(expected, result); }
@Override public PageResult<AdminUserDO> getUserPage(UserPageReqVO reqVO) { return userMapper.selectPage(reqVO, getDeptCondition(reqVO.getDeptId())); }
@Test public void testGetUserPage() { // mock 数据 AdminUserDO dbUser = initGetUserPageData(); // 准备参数 UserPageReqVO reqVO = new UserPageReqVO(); reqVO.setUsername("tu"); reqVO.setMobile("1560"); reqVO.setStatus(CommonStatusEnum.ENABLE.getStatus()); reqVO.setCreateTime(buildBetweenTime(2020, 12, 1, 2020, 12, 24)); reqVO.setDeptId(1L); // 其中,1L 是 2L 的父部门 // mock 方法 List<DeptDO> deptList = newArrayList(randomPojo(DeptDO.class, o -> o.setId(2L))); when(deptService.getChildDeptList(eq(reqVO.getDeptId()))).thenReturn(deptList); // 调用 PageResult<AdminUserDO> pageResult = userService.getUserPage(reqVO); // 断言 assertEquals(1, pageResult.getTotal()); assertEquals(1, pageResult.getList().size()); assertPojoEquals(dbUser, pageResult.getList().get(0)); }
@Transactional(readOnly = true) public void validateCouponExisted(final Long couponId) { if (!couponRepository.isExistedById(couponId)) { throw new CouponNotFoundException(); } }
@Test void 쿠폰이_존재하지_않으면_예외를_발생시킨다() { // when & then assertThatThrownBy(() -> couponService.validateCouponExisted(-1L)) .isInstanceOf(CouponNotFoundException.class); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = trees.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = b; for (int i = 0; i < ntrees; i++) { base += shrinkage * trees[i].predict(xj); prediction[i][j] = base; } } return prediction; }
@Test public void test2DPlanesHuber() { test(Loss.huber(0.9), "2dplanes", Planes.formula, Planes.data, 1.1080); }
@Override public Container<ScmOrganization> getOrganizations() { StaplerRequest request = Stapler.getCurrentRequest(); Objects.requireNonNull(request, "This request must be made in HTTP context"); String method = request.getMethod(); if (!"POST".equalsIgnoreCase(method)) { throw new ServiceException.MethodNotAllowedException(String.format("Request method %s is not allowed", method)); } StandardUsernamePasswordCredentials credential = getCredential(); String accessToken = credential.getPassword().getPlainText(); checkPermission(); try { GitHub github = GitHubFactory.connect(accessToken, getUri()); final Link link = getLink().rel("organizations"); Map<String, ScmOrganization> orgMap = new LinkedHashMap<>(); // preserve the same order that github org api returns for(Map.Entry<String, GHOrganization> entry: github.getMyOrganizations().entrySet()){ orgMap.put(entry.getKey(), new GithubOrganization(GithubScm.this, entry.getValue(), credential, link)); } GHMyself user = github.getMyself(); if(orgMap.get(user.getLogin()) == null){ //this is to take care of case if/when github starts reporting user login as org later on orgMap = new HashMap<>(orgMap); orgMap.put(user.getLogin(), new GithubUserOrganization(user, credential, this)); } final Map<String, ScmOrganization> orgs = orgMap; return new Container<ScmOrganization>() { @Override public ScmOrganization get(String name) { ScmOrganization org = orgs.get(name); if(org == null){ throw new ServiceException.NotFoundException(String.format("GitHub organization %s not found", name)); } return org; } @Override public Link getLink() { return link; } @Override public Iterator<ScmOrganization> iterator() { return orgs.values().iterator(); } }; } catch (IOException e) { if(e instanceof HttpException) { HttpException ex = (HttpException) e; if (ex.getResponseCode() == 401) { throw new ServiceException .PreconditionRequired("Invalid GitHub accessToken", ex); }else if(ex.getResponseCode() == 403){ throw new ServiceException .PreconditionRequired("GitHub accessToken does not have required scopes. Expected scopes 'user:email, repo'", ex); } } throw new ServiceException.UnexpectedErrorException(e.getMessage(), e); } }
@Test public void getOrganizations() { try (MockedStatic<Stapler> staplerMockedStatic = mockStatic(Stapler.class)) { StaplerRequest staplerRequest = mock(StaplerRequest.class); when(Stapler.getCurrentRequest()).thenReturn(staplerRequest); } }
@Override public Collection<RedisServer> masters() { List<Map<String, String>> masters = connection.sync(StringCodec.INSTANCE, RedisCommands.SENTINEL_MASTERS); return toRedisServersList(masters); }
@Test public void testMasters() { Collection<RedisServer> masters = connection.masters(); assertThat(masters).hasSize(1); }
public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); }
@Test public void createShouldReturnFalseWhenSuppliedNullTopicDescription() { Cluster cluster = createCluster(1); try (TopicAdmin admin = new TopicAdmin(new MockAdminClient(cluster.nodes(), cluster.nodeById(0)))) { boolean created = admin.createTopic(null); assertFalse(created); } }
@Override public void setConfig( final String storeName, final Options options, final Map<String, Object> configs) { if (!configured.get()) { throw new IllegalStateException( "Cannot use KsqlBoundedMemoryRocksDBConfigSetter before it's been configured."); } final BlockBasedTableConfig tableConfig = (BlockBasedTableConfig)options.tableFormatConfig(); tableConfig.setBlockCache(cache); tableConfig.setCacheIndexAndFilterBlocks(true); options.setWriteBufferManager(writeBufferManager); options.setCompactionStyle(compactionStyle); options.setCompressionType(compressionType); if (maxNumConcurrentJobs != -1) { options.setMaxBackgroundJobs(maxNumConcurrentJobs); } if (compactionStyle.equals(CompactionStyle.UNIVERSAL)) { if (options.compactionOptionsUniversal() == null) { final CompactionOptionsUniversal compactionOptionsUniversal = new CompactionOptionsUniversal(); compactionOptionsUniversal.setAllowTrivialMove(allowTrivialMove); options.setCompactionOptionsUniversal(compactionOptionsUniversal); } else { options.compactionOptionsUniversal().setAllowTrivialMove(allowTrivialMove); } } tableConfig.setCacheIndexAndFilterBlocksWithHighPriority(true); tableConfig.setPinTopLevelIndexAndFilter(true); options.setStatsDumpPeriodSec(0); options.setTableFormatConfig(tableConfig); }
@Test public void shouldFailWithoutConfigure() { // Expect: // When: final Exception e = assertThrows( IllegalStateException.class, () -> rocksDBConfig.setConfig("store_name", rocksOptions, emptyMap()) ); // Then: assertThat(e.getMessage(), containsString("Cannot use KsqlBoundedMemoryRocksDBConfigSetter before it's been configured.")); }
@Override public Long time(RedisClusterNode node) { RedisClient entry = getEntry(node); RFuture<Long> f = executorService.readAsync(entry, LongCodec.INSTANCE, RedisCommands.TIME_LONG); return syncFuture(f); }
@Test public void testTime() { RedisClusterNode master = getFirstMaster(); Long time = connection.time(master); assertThat(time).isGreaterThan(1000); }
@GetMapping("/status") public EmailStatusResult getEmailStatus(@RequestHeader(MijnDigidSession.MIJN_DIGID_SESSION_HEADER) String mijnDigiDsessionId){ MijnDigidSession mijnDigiDSession = retrieveMijnDigiDSession(mijnDigiDsessionId); return accountService.getEmailStatus(mijnDigiDSession.getAccountId()); }
@Test public void validEmailStatusVerified() { EmailStatusResult result = new EmailStatusResult(); result.setStatus(Status.OK); result.setError("error"); result.setEmailStatus(EmailStatus.VERIFIED); result.setEmailAddress("address"); result.setActionNeeded(true); when(accountService.getEmailStatus(anyLong())).thenReturn(result); EmailStatusResult emailStatus = emailController.getEmailStatus(mijnDigiDSession.getId()); assertEquals(Status.OK, emailStatus.getStatus()); assertEquals("error", emailStatus.getError()); assertEquals(EmailStatus.VERIFIED, emailStatus.getEmailStatus()); assertEquals("address", emailStatus.getEmailAddress()); }
@Udf public List<Integer> generateSeriesInt( @UdfParameter(description = "The beginning of the series") final int start, @UdfParameter(description = "Marks the end of the series (inclusive)") final int end ) { return generateSeriesInt(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputeIntRangeWithNegativeEvenStepInt() { final List<Integer> range = rangeUdf.generateSeriesInt(9, 0, -2); assertThat(range, hasSize(5)); int val = 9; for (final int i : range) { assertThat(val, is(i)); val -= 2; } }
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception { return newGetter(object, parent, modifier, method.getReturnType(), method::invoke, (t, et) -> new MethodGetter(parent, method, modifier, t, et)); }
@Test public void newMethodGetter_whenExtractingFromNull_Collection_AndReducerSuffixInNotEmpty_thenReturnNullGetter() throws Exception { OuterObject object = OuterObject.nullInner("name"); Getter getter = GetterFactory.newMethodGetter(object, null, innersCollectionMethod, "[any]"); assertSame(NullMultiValueGetter.NULL_MULTIVALUE_GETTER, getter); }
static void validate( final LogicalSchema schema, final GenericKey key, final GenericRow value ) { final int expectedKeyCount = schema.key().size(); final int actualKeyCount = key.values().size(); if (actualKeyCount != expectedKeyCount) { throw new IllegalArgumentException("key column count mismatch." + " expected: " + expectedKeyCount + ", got: " + actualKeyCount ); } final int expectedValueCount = schema.value().size(); final int actualValueCount = value.size(); if (expectedValueCount != actualValueCount) { throw new IllegalArgumentException("value column count mismatch." + " expected: " + expectedValueCount + ", got: " + actualValueCount ); } }
@Test public void shouldNotThrowOnMatching() { TableRowValidation.validate(SCHEMA, A_KEY, A_VALUE); }
@Override public HttpResponseOutputStream<Void> write(final Path file, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { return this.write(file, this.toHeaders(file, status, expect), status); } catch(ConflictException e) { if(expect) { if(null != status.getLockId()) { // Handle 412 Precondition Failed with expired token log.warn(String.format("Retry failure %s with lock id %s removed", e, status.getLockId())); return this.write(file, this.toHeaders(file, status.withLockId(null), expect), status); } } throw e; } catch(InteroperabilityException e) { if(expect) { // Handle 417 Expectation Failed log.warn(String.format("Retry failure %s with Expect: Continue removed", e)); return this.write(file, this.toHeaders(file, status.withLockId(null), false), status); } throw e; } }
@Test(expected = AccessDeniedException.class) @Ignore public void testWriteZeroBytesAccessDenied() throws Exception { final Path test = new Path(new DefaultHomeFinderService(session).find().getAbsolute() + "/nosuchdirectory/" + new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final HttpResponseOutputStream<Void> write = new DAVWriteFeature(session).write(test, new TransferStatus(), new DisabledConnectionCallback()); try { write.close(); write.getStatus(); } catch(IOException e) { throw (Exception) e.getCause(); } }
public static boolean isAlpha(String str) { if (str == null) { return false; } int sz = str.length(); for (int i = 0; i < sz; i++) { if (!Character.isLetter(str.charAt(i))) { return false; } } return true; }
@Test public void testIsAlpha() { assertTrue("Reported hello as non-alpha string", StringUtils.isAlpha("hello")); assertFalse("Reported hello1 as alpha string", StringUtils.isAlpha("hello1")); }
@Override @SuppressWarnings("unchecked") public void build(T instance) { super.build(instance); if (actives != null) { instance.setActives(actives); } if (async != null) { instance.setAsync(async); } if (!StringUtils.isEmpty(cache)) { instance.setCache(cache); } if (forks != null) { instance.setForks(forks); } if (!StringUtils.isEmpty(loadbalance)) { instance.setLoadbalance(loadbalance); } if (!StringUtils.isEmpty(merger)) { instance.setMerger(merger); } if (!StringUtils.isEmpty(mock)) { instance.setMock(mock); } if (retries != null) { instance.setRetries(retries); } if (sent != null) { instance.setSent(sent); } if (timeout != null) { instance.setTimeout(timeout); } if (!StringUtils.isEmpty(validation)) { instance.setValidation(validation); } if (parameters != null) { instance.setParameters(parameters); } }
@Test void build() { MethodBuilder builder = new MethodBuilder(); builder.id("id") .timeout(1) .retries(2) .actives(3) .loadbalance("mockloadbalance") .async(true) .sent(false) .mock("mock") .merger("merger") .cache("cache") .validation("validation") .appendParameter("default.num", "one"); MethodConfig config = builder.build(); MethodConfig config2 = builder.build(); Assertions.assertEquals("id", config.getId()); Assertions.assertEquals(1, config.getTimeout()); Assertions.assertEquals(2, config.getRetries()); Assertions.assertEquals(3, config.getActives()); Assertions.assertEquals("mockloadbalance", config.getLoadbalance()); Assertions.assertTrue(config.isAsync()); Assertions.assertFalse(config.getSent()); Assertions.assertEquals("mock", config.getMock()); Assertions.assertEquals("merger", config.getMerger()); Assertions.assertEquals("cache", config.getCache()); Assertions.assertEquals("validation", config.getValidation()); Assertions.assertTrue(config.getParameters().containsKey("default.num")); Assertions.assertEquals("one", config.getParameters().get("default.num")); Assertions.assertNotSame(config, config2); }
@Nonnull @Override public Iterable<EntityDescriptor> resolve(CriteriaSet criteria) { List<EntityDescriptor> matchedEntityDescriptors = new ArrayList<>(); if (entitiesDescriptor != null) { for (EntityDescriptor entityDescriptorLocal : entitiesDescriptor.getEntityDescriptors()) { if (criteria.contains(new EntityIdCriterion(entityDescriptorLocal.getEntityID()))) { matchedEntityDescriptors.add(entityDescriptorLocal); } } } if (entityDescriptor != null && criteria.contains(new EntityIdCriterion(entityDescriptor.getEntityID()))) { matchedEntityDescriptors.add(entityDescriptor); } return matchedEntityDescriptors; }
@Test public void resolveTest() throws UnmarshallingException, ResolverException { stringMetadataResolver.addMetadataString(metadata); CriteriaSet criteria = new CriteriaSet(new EntityIdCriterion("urn:nl-eid-gdi:1:0:entities:00000009999999999001")); EntityDescriptor entityDescriptor = stringMetadataResolver.resolveSingle(criteria); assertNotNull(entityDescriptor); }
public static Configuration loadConfiguration() { return loadConfiguration(new Configuration()); }
@Test void testFailIfNotLoaded() { assertThatThrownBy( () -> GlobalConfiguration.loadConfiguration( "/some/path/" + UUID.randomUUID())) .isInstanceOf(IllegalConfigurationException.class); }
public static UriTemplate create(String template, Charset charset) { return new UriTemplate(template, true, charset); }
@Test void simpleTemplate() { String template = "https://www.example.com/foo/{bar}"; UriTemplate uriTemplate = UriTemplate.create(template, Util.UTF_8); /* verify that the template has 1 variables names foo */ List<String> uriTemplateVariables = uriTemplate.getVariables(); assertThat(uriTemplateVariables).contains("bar").hasSize(1); /* expand the template */ Map<String, Object> variables = new LinkedHashMap<>(); variables.put("bar", "bar"); String expandedTemplate = uriTemplate.expand(variables); assertThat(expandedTemplate).isEqualToIgnoringCase("https://www.example.com/foo/bar"); assertThat(URI.create(expandedTemplate)).isNotNull(); }
@Override public void replay( long offset, long producerId, short producerEpoch, CoordinatorRecord record ) throws RuntimeException { ApiMessageAndVersion key = record.key(); ApiMessageAndVersion value = record.value(); switch (key.version()) { case 0: case 1: offsetMetadataManager.replay( offset, producerId, (OffsetCommitKey) key.message(), (OffsetCommitValue) Utils.messageOrNull(value) ); break; case 2: groupMetadataManager.replay( (GroupMetadataKey) key.message(), (GroupMetadataValue) Utils.messageOrNull(value) ); break; case 3: groupMetadataManager.replay( (ConsumerGroupMetadataKey) key.message(), (ConsumerGroupMetadataValue) Utils.messageOrNull(value) ); break; case 4: groupMetadataManager.replay( (ConsumerGroupPartitionMetadataKey) key.message(), (ConsumerGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 5: groupMetadataManager.replay( (ConsumerGroupMemberMetadataKey) key.message(), (ConsumerGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 6: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMetadataKey) key.message(), (ConsumerGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 7: groupMetadataManager.replay( (ConsumerGroupTargetAssignmentMemberKey) key.message(), (ConsumerGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 8: groupMetadataManager.replay( (ConsumerGroupCurrentMemberAssignmentKey) key.message(), (ConsumerGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; case 9: groupMetadataManager.replay( (ShareGroupPartitionMetadataKey) key.message(), (ShareGroupPartitionMetadataValue) Utils.messageOrNull(value) ); break; case 10: groupMetadataManager.replay( (ShareGroupMemberMetadataKey) key.message(), (ShareGroupMemberMetadataValue) Utils.messageOrNull(value) ); break; case 11: groupMetadataManager.replay( (ShareGroupMetadataKey) key.message(), (ShareGroupMetadataValue) Utils.messageOrNull(value) ); break; case 12: groupMetadataManager.replay( (ShareGroupTargetAssignmentMetadataKey) key.message(), (ShareGroupTargetAssignmentMetadataValue) Utils.messageOrNull(value) ); break; case 13: groupMetadataManager.replay( (ShareGroupTargetAssignmentMemberKey) key.message(), (ShareGroupTargetAssignmentMemberValue) Utils.messageOrNull(value) ); break; case 14: groupMetadataManager.replay( (ShareGroupCurrentMemberAssignmentKey) key.message(), (ShareGroupCurrentMemberAssignmentValue) Utils.messageOrNull(value) ); break; default: throw new IllegalStateException("Received an unknown record type " + key.version() + " in " + record); } }
@Test public void testReplayConsumerGroupMemberMetadata() { GroupMetadataManager groupMetadataManager = mock(GroupMetadataManager.class); OffsetMetadataManager offsetMetadataManager = mock(OffsetMetadataManager.class); CoordinatorMetrics coordinatorMetrics = mock(CoordinatorMetrics.class); CoordinatorMetricsShard metricsShard = mock(CoordinatorMetricsShard.class); GroupCoordinatorShard coordinator = new GroupCoordinatorShard( new LogContext(), groupMetadataManager, offsetMetadataManager, Time.SYSTEM, new MockCoordinatorTimer<>(Time.SYSTEM), mock(GroupCoordinatorConfig.class), coordinatorMetrics, metricsShard ); ConsumerGroupMemberMetadataKey key = new ConsumerGroupMemberMetadataKey(); ConsumerGroupMemberMetadataValue value = new ConsumerGroupMemberMetadataValue(); coordinator.replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, new CoordinatorRecord( new ApiMessageAndVersion(key, (short) 5), new ApiMessageAndVersion(value, (short) 0) )); verify(groupMetadataManager, times(1)).replay(key, value); }
@Override public boolean isDetected() { return "true".equalsIgnoreCase(system.envVariable("TF_BUILD")); }
@Test public void isDetected() { setEnvVariable("TF_BUILD", "True"); assertThat(underTest.isDetected()).isTrue(); setEnvVariable("TF_BUILD", "true"); assertThat(underTest.isDetected()).isTrue(); setEnvVariable("CI", "true"); setEnvVariable("APPVEYOR", null); setEnvVariable("TF_BUILD", null); assertThat(underTest.isDetected()).isFalse(); }
static void abortMultipartUploadsUsingLifecycleConfig() { Collection<LifecycleRule> lifeCycleRules = List.of(LifecycleRule.builder() .abortIncompleteMultipartUpload(b -> b. daysAfterInitiation(7)) .status("Enabled") .filter(SdkBuilder::build) // Filter element is required. .build()); // If the action is successful, the service sends back an HTTP 200 response with an empty HTTP body. PutBucketLifecycleConfigurationResponse response = s3Client.putBucketLifecycleConfiguration(b -> b .bucket(bucketName) .lifecycleConfiguration(b1 -> b1.rules(lifeCycleRules))); if (response.sdkHttpResponse().isSuccessful()) { logger.info("Rule to abort incomplete multipart uploads added to bucket."); } else { logger.error("Unsuccessfully applied rule. HTTP status code is [{}]", response.sdkHttpResponse().statusCode()); } }
@Test @Tag("IntegrationTest") void testAbortMultipartUploadsUsingLifecycleConfigHasRule(){ AbortMultipartUploadExamples.abortMultipartUploadsUsingLifecycleConfig(); GetBucketLifecycleConfigurationResponse response = AbortMultipartUploadExamples.s3Client.getBucketLifecycleConfiguration(b -> b .bucket(AbortMultipartUploadExamples.bucketName)); assertEquals(7, (int) response.rules().get(0).abortIncompleteMultipartUpload().daysAfterInitiation()); }
public static int formatFloatFast(float value, int maxFractionDigits, byte[] asciiBuffer) { if (Float.isNaN(value) || Float.isInfinite(value) || value > Long.MAX_VALUE || value <= Long.MIN_VALUE || maxFractionDigits > MAX_FRACTION_DIGITS) { return -1; } int offset = 0; long integerPart = (long) value; //handle sign if (value < 0) { asciiBuffer[offset++] = '-'; integerPart = -integerPart; } //extract fraction part long fractionPart = (long) ((Math.abs((double)value) - integerPart) * POWER_OF_TENS[maxFractionDigits] + 0.5d); //Check for rounding to next integer if (fractionPart >= POWER_OF_TENS[maxFractionDigits]) { integerPart++; fractionPart -= POWER_OF_TENS[maxFractionDigits]; } //format integer part offset = formatPositiveNumber(integerPart, getExponent(integerPart), false, asciiBuffer, offset); if (fractionPart > 0 && maxFractionDigits > 0) { asciiBuffer[offset++] = '.'; offset = formatPositiveNumber(fractionPart, maxFractionDigits - 1, true, asciiBuffer, offset); } return offset; }
@Test void testFormatOfRealValuesReturnsMinusOneIfItCannotBeFormatted() { assertEquals(-1, NumberFormatUtil.formatFloatFast(Float.NaN, 5, buffer), "NaN should not be formattable"); assertEquals(-1, NumberFormatUtil.formatFloatFast(Float.POSITIVE_INFINITY, 5, buffer), "+Infinity should not be formattable"); assertEquals(-1, NumberFormatUtil.formatFloatFast(Float.NEGATIVE_INFINITY, 5, buffer), "-Infinity should not be formattable"); assertEquals(-1, NumberFormatUtil.formatFloatFast(((float) Long.MAX_VALUE) + 1000000000000f, 5, buffer), "Too big number should not be formattable"); assertEquals(-1, NumberFormatUtil.formatFloatFast(Long.MIN_VALUE, 5, buffer), "Too big negative number should not be formattable"); }
public JmxCollector register() { return register(PrometheusRegistry.defaultRegistry); }
@Test public void testCamelLastExchangeFailureTimestamp() throws Exception { String rulePattern = "\n" + "---\n" + "rules:\n" + "- pattern: 'org.apache.camel<context=([^,]+), type=routes," + " name=\"([^\"]+)\"><>LastExchangeFailureTimestamp'\n" + " name: org.apache.camel.LastExchangeFailureTimestamp\n" + " help: Exchanges Last Failure Timestamps\n" + " type: UNTYPED\n" + " labels:\n" + " context: \"$1\"\n" + " route: \"$2\"\n" + " type: routes"; JmxCollector jc = new JmxCollector(rulePattern).register(prometheusRegistry); Double actual = getSampleValue( "org_apache_camel_LastExchangeFailureTimestamp", new String[] {"context", "route", "type"}, new String[] {"my-camel-context", "my-route-name", "routes"}); assertEquals(Camel.EXPECTED_SECONDS, actual, 0); }
public final void containsNoneOf( @Nullable Object firstExcluded, @Nullable Object secondExcluded, @Nullable Object @Nullable ... restOfExcluded) { containsNoneIn(accumulate(firstExcluded, secondExcluded, restOfExcluded)); }
@Test public void iterableContainsNoneOfFailureWithDuplicateInSubject() { expectFailureWhenTestingThat(asList(1, 2, 2, 3)).containsNoneOf(1, 2, 4); assertFailureValue("but contained", "[1, 2]"); }
public void setMinShare(Resource resource) { minShareMB.set(resource.getMemorySize()); minShareVCores.set(resource.getVirtualCores()); if (customResources != null) { customResources.setMinShare(resource); } }
@Test public void testSetMinShare() { FSQueueMetrics metrics = setupMetrics(RESOURCE_NAME); Resource res = Resource.newInstance(2048L, 4, ImmutableMap.of(RESOURCE_NAME, 20L)); metrics.setMinShare(res); assertEquals(getErrorMessage("minShareMB"), 2048L, metrics.getMinShareMB()); assertEquals(getErrorMessage("minShareVcores"), 4L, metrics.getMinShareVirtualCores()); assertEquals(getErrorMessage("minShareMB"), 2048L, metrics.getMinShare().getMemorySize()); assertEquals(getErrorMessage("minShareVcores"), 4L, metrics.getMinShare().getVirtualCores()); assertEquals(getErrorMessage("minShare for resource: " + RESOURCE_NAME), 20L, metrics.getMinShare().getResourceValue(RESOURCE_NAME)); res = Resource.newInstance(2049L, 5); metrics.setMinShare(res); assertEquals(getErrorMessage("minShareMB"), 2049L, metrics.getMinShareMB()); assertEquals(getErrorMessage("minShareVcores"), 5L, metrics.getMinShareVirtualCores()); assertEquals(getErrorMessage("minShareMB"), 2049L, metrics.getMinShare().getMemorySize()); assertEquals(getErrorMessage("minShareVcores"), 5L, metrics.getMinShare().getVirtualCores()); assertEquals(getErrorMessage("minShare for resource: " + RESOURCE_NAME), 0, metrics.getMinShare().getResourceValue(RESOURCE_NAME)); }
@ShellMethod(key = "commit showpartitions", value = "Show partition level details of a commit") public String showCommitPartitions( @ShellOption(value = {"--createView"}, help = "view name to store output table", defaultValue = "") final String exportTableName, @ShellOption(value = {"--commit"}, help = "Commit to show") final String instantTime, @ShellOption(value = {"--limit"}, help = "Limit commits", defaultValue = "-1") final Integer limit, @ShellOption(value = {"--sortBy"}, help = "Sorting Field", defaultValue = "") final String sortByField, @ShellOption(value = {"--desc"}, help = "Ordering", defaultValue = "false") final boolean descending, @ShellOption(value = {"--headeronly"}, help = "Print Header Only", defaultValue = "false") final boolean headerOnly, @ShellOption(value = {"--includeArchivedTimeline"}, help = "Include archived commits as well", defaultValue = "false") final boolean includeArchivedTimeline) throws Exception { HoodieDefaultTimeline defaultTimeline = getTimeline(HoodieCLI.getTableMetaClient(), includeArchivedTimeline); HoodieTimeline timeline = defaultTimeline.getCommitsTimeline().filterCompletedInstants(); Option<HoodieInstant> hoodieInstantOption = getCommitForInstant(timeline, instantTime); Option<HoodieCommitMetadata> commitMetadataOptional = getHoodieCommitMetadata(timeline, hoodieInstantOption); if (!commitMetadataOptional.isPresent()) { return "Commit " + instantTime + " not found in Commits " + timeline; } HoodieCommitMetadata meta = commitMetadataOptional.get(); List<Comparable[]> rows = new ArrayList<>(); for (Map.Entry<String, List<HoodieWriteStat>> entry : meta.getPartitionToWriteStats().entrySet()) { String action = hoodieInstantOption.get().getAction(); String path = entry.getKey(); List<HoodieWriteStat> stats = entry.getValue(); long totalFilesAdded = 0; long totalFilesUpdated = 0; long totalRecordsUpdated = 0; long totalRecordsInserted = 0; long totalBytesWritten = 0; long totalWriteErrors = 0; for (HoodieWriteStat stat : stats) { if (stat.getPrevCommit().equals(HoodieWriteStat.NULL_COMMIT)) { totalFilesAdded += 1; } else { totalFilesUpdated += 1; totalRecordsUpdated += stat.getNumUpdateWrites(); } totalRecordsInserted += stat.getNumInserts(); totalBytesWritten += stat.getTotalWriteBytes(); totalWriteErrors += stat.getTotalWriteErrors(); } rows.add(new Comparable[] {action, path, totalFilesAdded, totalFilesUpdated, totalRecordsInserted, totalRecordsUpdated, totalBytesWritten, totalWriteErrors}); } Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>(); fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN, entry -> NumericUtils.humanReadableByteCount((Long.parseLong(entry.toString())))); TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_ACTION) .addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION_PATH) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_ADDED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_UPDATED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_INSERTED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_UPDATED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS); return HoodiePrintHelper.print(header, fieldNameToConverterMap, sortByField, descending, limit, headerOnly, rows, exportTableName); }
@Test public void testShowCommitPartitions() throws Exception { Map<String, Integer[]> data = generateData(); String commitInstant = "101"; Object result = shell.evaluate(() -> String.format("commit showpartitions --commit %s", commitInstant)); assertTrue(ShellEvaluationResultUtil.isSuccess(result)); Integer[] value = data.get(commitInstant); List<Comparable[]> rows = new ArrayList<>(); // prevCommit not null, so add 0, update 1 Arrays.asList(HoodieTestDataGenerator.DEFAULT_FIRST_PARTITION_PATH, HoodieTestDataGenerator.DEFAULT_SECOND_PARTITION_PATH).stream().forEach(partition -> rows.add(new Comparable[] {HoodieTimeline.COMMIT_ACTION, partition, 0, 1, 0, value[1], HoodieTestCommitMetadataGenerator.DEFAULT_TOTAL_WRITE_BYTES, 0}) ); Map<String, Function<Object, String>> fieldNameToConverterMap = new HashMap<>(); fieldNameToConverterMap.put(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN, entry -> NumericUtils.humanReadableByteCount((Long.parseLong(entry.toString())))); TableHeader header = new TableHeader().addTableHeaderField(HoodieTableHeaderFields.HEADER_ACTION) .addTableHeaderField(HoodieTableHeaderFields.HEADER_PARTITION_PATH) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_ADDED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_FILES_UPDATED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_INSERTED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_RECORDS_UPDATED) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_BYTES_WRITTEN) .addTableHeaderField(HoodieTableHeaderFields.HEADER_TOTAL_ERRORS); String expected = HoodiePrintHelper.print(header, fieldNameToConverterMap, "", false, -1, false, rows); expected = removeNonWordAndStripSpace(expected); String got = removeNonWordAndStripSpace(result.toString()); assertEquals(expected, got); }
public String generate(int length) { StringBuilder sb = new StringBuilder(); random.ints(length, 0, CHARACTER.length()) .mapToObj(CHARACTER::charAt) .forEach(sb::append); return sb.toString(); }
@Test void 주어진_길이에_맞는_랜덤한_문자열을_생성한다() { // given int length = 8; RandomCodeGenerator randomCodeGenerator = new RandomCodeGenerator(); // when String actual = randomCodeGenerator.generate(length); // then assertThat(actual).matches("[a-zA-Z0-9]{%d}".formatted(length)); }
@SuppressWarnings("unchecked") @Override public <T extends Statement> ConfiguredStatement<T> inject( final ConfiguredStatement<T> statement ) { if (statement.getStatement() instanceof CreateSource) { return handleCreateSource((ConfiguredStatement<CreateSource>) statement); } return statement; }
@Test public void shouldDefaultToKafkaIfNoExplicitDefaultKeyFormat() { // Given givenConfig(ImmutableMap.of()); givenSourceProps(ImmutableMap.of( "VALUE_FORMAT", new StringLiteral("JSON") )); // When final ConfiguredStatement<?> result = injector.inject(csStatement); // Then assertThat(result.getMaskedStatementText(), containsString("KEY_FORMAT='KAFKA'")); }
private static String prefixLabel(int scale) { switch (scale) { case COIN_SCALE: return "Coin-"; case 1: return "Decicoin-"; case 2: return "Centicoin-"; case MILLICOIN_SCALE: return "Millicoin-"; case MICROCOIN_SCALE: return "Microcoin-"; case -1: return "Dekacoin-"; case -2: return "Hectocoin-"; case -3: return "Kilocoin-"; case -6: return "Megacoin-"; default: return "Fixed (" + String.valueOf(scale) + ") "; } }
@Test public void testPrefixLabel() { assertEquals(String.format("%s%s %s", expectedOutput, "format", btcFixedFormat.pattern()), btcFixedFormat.toString()); }
@Override public String toString() { return "DelegationTokenData" + "(tokenInformation=" + "[hidden]" + ")"; }
@Test public void testToString() { assertEquals("DelegationTokenData" + "(tokenInformation=" + "[hidden]" + ")", DELEGATIONTOKENDATA.get(0).toString()); }
public OutputStream createRawOutputStream() throws IOException { checkClosed(); if (isWriting) { throw new IllegalStateException("Cannot have more than one open stream writer."); } if (randomAccess != null) randomAccess.clear(); else randomAccess = getStreamCache().createBuffer(); OutputStream out = new RandomAccessOutputStream(randomAccess); isWriting = true; return new FilterOutputStream(out) { @Override public void write(byte[] b, int off, int len) throws IOException { this.out.write(b, off, len); } @Override public void close() throws IOException { super.close(); setInt(COSName.LENGTH, (int)randomAccess.length()); isWriting = false; } }; }
@Test void testCompressedStream1Decode() throws IOException { byte[] testString = "This is a test string to be used as input for TestCOSStream".getBytes(StandardCharsets.US_ASCII); byte[] testStringEncoded = encodeData(testString, COSName.FLATE_DECODE); COSStream stream = new COSStream(); try (OutputStream output = stream.createRawOutputStream()) { output.write(testStringEncoded); } stream.setItem(COSName.FILTER, COSName.FLATE_DECODE); validateDecoded(stream, testString); }
public boolean isMatch(Object[] row) { return _rowMatcher.isMatch(row); }
@Test public void testIsNullWhenNullHandlingEnabled() { QueryContext queryContext = QueryContextConverterUtils.getQueryContext( "SELECT col1, COUNT(col2) FROM testTable GROUP BY col1 HAVING col1 IS NULL OPTION(enableNullHandling=true)"); DataSchema dataSchema = new DataSchema(new String[]{"col1", "count(col2)"}, new ColumnDataType[]{ ColumnDataType.INT, ColumnDataType.INT }); PostAggregationHandler postAggregationHandler = new PostAggregationHandler(queryContext, dataSchema); HavingFilterHandler havingFilterHandler = new HavingFilterHandler(queryContext.getHavingFilter(), postAggregationHandler, true); assertTrue(havingFilterHandler.isMatch(new Object[]{null, 1})); assertFalse(havingFilterHandler.isMatch(new Object[]{1, 1})); assertFalse(havingFilterHandler.isMatch(new Object[]{Integer.MIN_VALUE, 1})); }
@Benchmark @Threads(16) public void testLargeBundleRunnersCoreStateSampler( RunnersCoreStateTracker trackerState, Blackhole bh) throws Exception { ExecutionStateTracker tracker = trackerState.tracker; Closeable c = tracker.activate(); for (int i = 0; i < 1000; ) { Closeable close1 = tracker.enterState(trackerState.state1); Closeable close2 = tracker.enterState(trackerState.state2); Closeable close3 = tracker.enterState(trackerState.state3); // trival code that is being sampled for this state i += 1; bh.consume(i); close3.close(); close2.close(); close1.close(); } c.close(); }
@Test public void testLargeBundleRunnersCoreStateSampler() throws Exception { RunnersCoreStateSampler state = new RunnersCoreStateSampler(); RunnersCoreStateTracker threadState = new RunnersCoreStateTracker(); state.setup(); threadState.setup(state); new ExecutionStateSamplerBenchmark() .testLargeBundleRunnersCoreStateSampler(threadState, blackhole); state.tearDown(); }
@CheckForNull static BundleParams getBundleParameters(String restOfPath) { if (restOfPath == null || restOfPath.length() == 0) { return null; } String[] pathTokens = restOfPath.split("/"); List<String> bundleParameters = new ArrayList<>(); for (String pathToken : pathTokens) { if (pathToken.length() > 0) { bundleParameters.add(urlDecode(pathToken)); } } // Path should be prefixed with /blue/rest/i18n. // Let's remove those. if (bundleParameters.get(0).equals("blue")) { bundleParameters.remove(0); } if (bundleParameters.get(0).equals("rest")) { bundleParameters.remove(0); } if (bundleParameters.get(0).equals("i18n")) { bundleParameters.remove(0); } if (bundleParameters.size() != 3 && bundleParameters.size() != 4) { return null; } BundleParams bundleParams = new BundleParams( bundleParameters.get(0), bundleParameters.get(1), bundleParameters.get(2) ); if (bundleParameters.size() == 4) { // https://www.w3.org/International/questions/qa-lang-priorities // in case we have regions/countries in the language query parameter String locale = bundleParameters.get(3); String[] localeTokens = locale.split("-|_"); bundleParams.language = localeTokens[0]; if (localeTokens.length > 1) { bundleParams.country = localeTokens[1]; if (localeTokens.length > 2) { bundleParams.variant = localeTokens[2]; } } } return bundleParams; }
@Test public void test_getBundleParameters_locale() { BlueI18n.BundleParams bundleParameters; bundleParameters = BlueI18n.getBundleParameters("/blue/rest/i18n/pluginx/1.0.0/pluginx.bundle/ja_JP_JP"); Assert.assertEquals("ja_JP_JP_#u-ca-japanese", bundleParameters.getLocale().toString()); bundleParameters = BlueI18n.getBundleParameters("pluginx/1.0.0/pluginx.bundle/ja-JP-JP"); Assert.assertEquals("ja_JP_JP_#u-ca-japanese", bundleParameters.getLocale().toString()); bundleParameters = BlueI18n.getBundleParameters("/blue/rest/i18n/pluginx/1.0.0/pluginx.bundle/ja_JP"); Assert.assertEquals("ja_JP", bundleParameters.getLocale().toString()); bundleParameters = BlueI18n.getBundleParameters("/blue/rest/i18n/pluginx/1.0.0/pluginx.bundle/ja-JP"); Assert.assertEquals("ja_JP", bundleParameters.getLocale().toString()); bundleParameters = BlueI18n.getBundleParameters("/blue/rest/i18n/pluginx/1.0.0/pluginx.bundle/ja"); Assert.assertEquals("ja", bundleParameters.getLocale().toString()); bundleParameters = BlueI18n.getBundleParameters("/blue/rest/i18n/pluginx/1.0.0/pluginx.bundle/ja"); Assert.assertEquals("ja", bundleParameters.getLocale().toString()); }
@Retryable(DataAccessResourceFailureException.class) @CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true) public void updateSearchIndex() { if (!isEnabled()) { return; } var stopWatch = new StopWatch(); stopWatch.start(); updateSearchIndex(false); stopWatch.stop(); logger.info("Updated search index in " + stopWatch.getTotalTimeMillis() + " ms"); }
@Test public void testSoftUpdateNotExists() { var index = mockIndex(false); mockExtensions(); search.updateSearchIndex(false); assertThat(index.created).isTrue(); assertThat(index.deleted).isFalse(); assertThat(index.entries).hasSize(3); }
@Override public int hashCode() { return key.hashCode(); }
@Test public void test_equals_and_hashcode() { ComponentImpl component = new ComponentImpl("Project1", Component.Type.PROJECT, null); ComponentImpl sameComponent = new ComponentImpl("Project1", Component.Type.PROJECT, null); ComponentImpl anotherComponent = new ComponentImpl("Project2", Component.Type.PROJECT, null); assertThat(component) .isEqualTo(component) .isEqualTo(sameComponent) .isNotEqualTo(anotherComponent) .isNotNull() .hasSameHashCodeAs(component) .hasSameHashCodeAs(sameComponent); assertThat(component.hashCode()).isNotEqualTo(anotherComponent.hashCode()); }
public static int checkGreaterThanOrEqual(int n, int expected, String name) { if (n < expected) { throw new IllegalArgumentException(name + ": " + n + " (expected: >= " + expected + ')'); } return n; }
@Test public void checkGreaterThanOrEqualMustPassIfArgumentIsGreaterThanExpected() { final int n = 1; final int actual = RangeUtil.checkGreaterThanOrEqual(n, 0, "var"); assertThat(actual, is(equalTo(n))); }
@Override public V get() { return result; }
@Test public void testGet() throws Exception { promise.set("Done"); assertThat(promise.get()).isEqualTo("Done"); }
@Override public int getUnsignedShortLE(int index) { return getShortLE(index) & 0xFFFF; }
@Test public void testGetUnsignedShortLEAfterRelease() { assertThrows(IllegalReferenceCountException.class, new Executable() { @Override public void execute() { releasedBuffer().getUnsignedShortLE(0); } }); }
public static boolean isOverlap(Date realStartTime, Date realEndTime, Date startTime, Date endTime) { // x>b||a>y 无交集 // 则有交集的逻辑为 !(x>b||a>y) // 根据德摩根公式,可化简为 x<=b && a<=y 即 realStartTime<=endTime && startTime<=realEndTime return realStartTime.compareTo(endTime) <=0 && startTime.compareTo(realEndTime) <= 0; }
@Test @SuppressWarnings("ConstantConditions") public void isOverlapTest() { final DateTime oneStartTime = DateUtil.parse("2022-01-01 10:10:10"); final DateTime oneEndTime = DateUtil.parse("2022-01-01 11:10:10"); final DateTime oneStartTime2 = DateUtil.parse("2022-01-01 11:20:10"); final DateTime oneEndTime2 = DateUtil.parse("2022-01-01 11:30:10"); final DateTime oneStartTime3 = DateUtil.parse("2022-01-01 11:40:10"); final DateTime oneEndTime3 = DateUtil.parse("2022-01-01 11:50:10"); //真实请假数据 final DateTime realStartTime = DateUtil.parse("2022-01-01 11:49:10"); final DateTime realEndTime = DateUtil.parse("2022-01-01 12:00:10"); final DateTime realStartTime1 = DateUtil.parse("2022-03-01 08:00:00"); final DateTime realEndTime1 = DateUtil.parse("2022-03-01 10:00:00"); final DateTime startTime = DateUtil.parse("2022-03-23 05:00:00"); final DateTime endTime = DateUtil.parse("2022-03-23 13:00:00"); assertFalse(DateUtil.isOverlap(oneStartTime, oneEndTime, realStartTime, realEndTime)); assertFalse(DateUtil.isOverlap(oneStartTime2, oneEndTime2, realStartTime, realEndTime)); assertTrue(DateUtil.isOverlap(oneStartTime3, oneEndTime3, realStartTime, realEndTime)); assertFalse(DateUtil.isOverlap(realStartTime1,realEndTime1,startTime,endTime)); assertFalse(DateUtil.isOverlap(startTime,endTime,realStartTime1,realEndTime1)); assertTrue(DateUtil.isOverlap(startTime,startTime,startTime,startTime)); assertTrue(DateUtil.isOverlap(startTime,startTime,startTime,endTime)); assertFalse(DateUtil.isOverlap(startTime,startTime,endTime,endTime)); assertTrue(DateUtil.isOverlap(startTime,endTime,endTime,endTime)); }
@Override @Deprecated public <K1, V1> KStream<K1, V1> flatTransform(final org.apache.kafka.streams.kstream.TransformerSupplier<? super K, ? super V, Iterable<KeyValue<K1, V1>>> transformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(transformerSupplier, "transformerSupplier can't be null"); final String name = builder.newProcessorName(TRANSFORM_NAME); return flatTransform(transformerSupplier, Named.as(name), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullTransformerSupplierOnFlatTransform() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransform(null)); assertThat(exception.getMessage(), equalTo("transformerSupplier can't be null")); }
@Override public Object[] maskObjects(Logger logger, Object... args) { /** * This function is to mask MountPOption, and those who are referring to it. * If something else need be masked, extra code change is required. * And also if a new proto message referring direct/indirect to MountPOption, * extra code should be added here. */ Object [] objects = new Object[args.length]; for (int i = 0; i < args.length; i++) { if (args[i] instanceof MountPOptions) { MountPOptions.Builder newMP = MountPOptions.newBuilder((MountPOptions) args[i]); newMP.clearProperties(); copyAndMaskProperties(newMP, ((MountPOptions) args[i]).getPropertiesMap()); objects[i] = newMP.build(); } else if (args[i] instanceof MountPRequest) { MountPRequest.Builder mpR = MountPRequest.newBuilder((MountPRequest) args[i]); MountPOptions.Builder newMP = mpR.getOptionsBuilder(); newMP.clearProperties(); copyAndMaskProperties(newMP, ((MountPRequest) args[i]).getOptions().getPropertiesMap()); objects[i] = mpR.build(); } else if (args[i] instanceof UfsInfo) { UfsInfo.Builder ufsInfo = UfsInfo.newBuilder((UfsInfo) args[i]); MountPOptions.Builder newMP = ufsInfo.getPropertiesBuilder(); newMP.clearProperties(); copyAndMaskProperties(newMP, ((UfsInfo) args[i]).getProperties().getPropertiesMap()); objects[i] = ufsInfo.build(); } else if (args[i] instanceof GetUfsInfoPResponse) { GetUfsInfoPResponse.Builder getUfsInfoResponse = GetUfsInfoPResponse.newBuilder((GetUfsInfoPResponse) args[i]); MountPOptions.Builder newMP = getUfsInfoResponse.getUfsInfoBuilder().getPropertiesBuilder(); newMP.clearProperties(); copyAndMaskProperties(newMP, ((GetUfsInfoPResponse) args[i]).getUfsInfo().getProperties().getPropertiesMap()); objects[i] = getUfsInfoResponse.build(); } else if (args[i] instanceof UpdateMountPRequest) { UpdateMountPRequest.Builder updateMountPRequest = UpdateMountPRequest.newBuilder((UpdateMountPRequest) args[i]); MountPOptions.Builder newMP = updateMountPRequest.getOptionsBuilder(); newMP.clearProperties(); copyAndMaskProperties(newMP, ((UpdateMountPRequest) args[i]) .getOptions().getPropertiesMap()); objects[i] = updateMountPRequest.build(); } else { objects[i] = args[i]; } } return objects; }
@Test public void maskObjectsAll() { { MountPOptions.Builder mpb = MountPOptions.newBuilder(); mpb.putProperties("key1", "value1"); mpb.putProperties(PropertyKey.Name.S3A_ACCESS_KEY, "mycredential"); String result = String.format("{%s}", RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(null, mpb.build())); Assert.assertEquals(true, result.contains("key1")); Assert.assertEquals(true, result.contains("value1")); Assert.assertEquals(true, result.contains("Masked")); Assert.assertEquals(true, result.contains(PropertyKey.Name.S3A_ACCESS_KEY)); Assert.assertEquals(false, result.contains("mycredential")); } { MountPRequest.Builder obj = MountPRequest.newBuilder(); MountPOptions.Builder mpb = obj.getOptionsBuilder(); mpb.putProperties("key1", "value1"); mpb.putProperties(PropertyKey.Name.S3A_ACCESS_KEY, "mycredential"); String result = String.format("{%s}", RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(null, obj.build())); Assert.assertEquals(true, result.contains("key1")); Assert.assertEquals(true, result.contains("value1")); Assert.assertEquals(true, result.contains("Masked")); Assert.assertEquals(true, result.contains(PropertyKey.Name.S3A_ACCESS_KEY)); Assert.assertEquals(false, result.contains("mycredential")); } { UfsInfo.Builder obj = UfsInfo.newBuilder(); MountPOptions.Builder mpb = obj.getPropertiesBuilder(); mpb.putProperties("key1", "value1"); mpb.putProperties(PropertyKey.Name.S3A_ACCESS_KEY, "mycredential"); String result = String.format("{%s}", RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(null, obj.build())); Assert.assertEquals(true, result.contains("key1")); Assert.assertEquals(true, result.contains("value1")); Assert.assertEquals(true, result.contains("Masked")); Assert.assertEquals(true, result.contains(PropertyKey.Name.S3A_ACCESS_KEY)); Assert.assertEquals(false, result.contains("mycredential")); } { GetUfsInfoPResponse.Builder obj = GetUfsInfoPResponse.newBuilder(); MountPOptions.Builder mpb = obj.getUfsInfoBuilder().getPropertiesBuilder(); mpb.putProperties("key1", "value1"); mpb.putProperties(PropertyKey.Name.S3A_ACCESS_KEY, "mycredential"); String result = String.format("{%s}", RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(null, obj.build())); Assert.assertEquals(true, result.contains("key1")); Assert.assertEquals(true, result.contains("value1")); Assert.assertEquals(true, result.contains("Masked")); Assert.assertEquals(true, result.contains(PropertyKey.Name.S3A_ACCESS_KEY)); Assert.assertEquals(false, result.contains("mycredential")); } { UpdateMountPRequest.Builder obj = UpdateMountPRequest.newBuilder(); MountPOptions.Builder mpb = obj.getOptionsBuilder(); mpb.putProperties("key1", "value1"); mpb.putProperties(PropertyKey.Name.S3A_ACCESS_KEY, "mycredential"); String result = String.format("{%s}", RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(null, obj.build())); Assert.assertEquals(true, result.contains("key1")); Assert.assertEquals(true, result.contains("value1")); Assert.assertEquals(true, result.contains("Masked")); Assert.assertEquals(true, result.contains(PropertyKey.Name.S3A_ACCESS_KEY)); Assert.assertEquals(false, result.contains("mycredential")); } { String astr = "astr"; String result = String.format("{%s}", RpcSensitiveConfigMask.CREDENTIAL_FIELD_MASKER.maskObjects(null, astr)); Assert.assertEquals(false, result.contains("mycredential")); Assert.assertEquals(false, result.contains("Masked")); Assert.assertEquals(true, result.contains(astr)); } }
public QuotaCounts subtract(QuotaCounts that) { nsSsCounts = modify(nsSsCounts, ec -> ec.subtract(that.nsSsCounts)); tsCounts = modify(tsCounts, ec -> ec.subtract(that.tsCounts)); return this; }
@Test public void testSubtract() throws Exception { QuotaCounts qc1 = new QuotaCounts.Builder().build(); QuotaCounts qc2 = new QuotaCounts.Builder().nameSpace(1).storageSpace(512) .typeSpaces(5).build(); qc1.subtract(qc2); assertEquals(-1, qc1.getNameSpace()); assertEquals(-512, qc1.getStorageSpace()); for (StorageType type : StorageType.values()) { assertEquals(-5, qc1.getTypeSpace(type)); } }
@Override public String getHttpMethod() { return HttpMethods.PUT; }
@Test public void testGetHttpMethod() { Assert.assertEquals("PUT", testManifestPusher.getHttpMethod()); }
public static void convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(Pipeline pipeline) { if (!(ExperimentalOptions.hasExperiment(pipeline.getOptions(), "use_sdf_read") || ExperimentalOptions.hasExperiment( pipeline.getOptions(), "use_unbounded_sdf_wrapper")) || ExperimentalOptions.hasExperiment( pipeline.getOptions(), "beam_fn_api_use_deprecated_read") || ExperimentalOptions.hasExperiment(pipeline.getOptions(), "use_deprecated_read")) { convertReadBasedSplittableDoFnsToPrimitiveReads(pipeline); } }
@Test public void testConvertIsSkippedWhenUsingUseSDFRead() { PipelineOptions pipelineOptions = PipelineOptionsFactory.create(); pipelineOptions.setRunner(CrashingRunner.class); ExperimentalOptions.addExperiment( pipelineOptions.as(ExperimentalOptions.class), "use_sdf_read"); Pipeline sdfRead = Pipeline.create(pipelineOptions); sdfRead.apply(Read.from(new FakeBoundedSource())); sdfRead.apply(Read.from(new BoundedToUnboundedSourceAdapter<>(new FakeBoundedSource()))); SplittableParDo.convertReadBasedSplittableDoFnsToPrimitiveReadsIfNecessary(sdfRead); sdfRead.traverseTopologically( new Defaults() { @Override public void visitPrimitiveTransform(Node node) { assertThat( node.getTransform(), not(instanceOf(SplittableParDo.PrimitiveBoundedRead.class))); assertThat( node.getTransform(), not(instanceOf(SplittableParDo.PrimitiveUnboundedRead.class))); } }); }
public static DateTime parse(CharSequence dateStr, DateFormat dateFormat) { return new DateTime(dateStr, dateFormat); }
@Test public void parseToDateTimeTest1() { final String dateStr1 = "2017-02-01"; final String dateStr2 = "2017/02/01"; final String dateStr3 = "2017.02.01"; final String dateStr4 = "2017年02月01日"; final DateTime dt1 = DateUtil.parse(dateStr1); final DateTime dt2 = DateUtil.parse(dateStr2); final DateTime dt3 = DateUtil.parse(dateStr3); final DateTime dt4 = DateUtil.parse(dateStr4); assertEquals(dt1, dt2); assertEquals(dt2, dt3); assertEquals(dt3, dt4); }
public static <T> Collection<List<T>> partition(Collection<T> elements, int numBuckets) { Map<Integer, List<T>> buckets = newHashMapWithExpectedSize(numBuckets); int initialCapacity = elements.size() / numBuckets; int index = 0; for (T element : elements) { int bucket = index % numBuckets; buckets.computeIfAbsent(bucket, key -> new ArrayList<>(initialCapacity)).add(element); index++; } return buckets.values(); }
@Test void testPartition() { List<Integer> list = Arrays.asList(1, 2, 3, 4); Collection<List<Integer>> partitioned = CollectionUtil.partition(list, 4); assertThat(partitioned) .as("List partitioned into the an incorrect number of partitions") .hasSize(4); assertThat(partitioned).allSatisfy(partition -> assertThat(partition).hasSize(1)); }
public static String version() { if (null == VERSION.get()) { String detectedVersion; try { detectedVersion = versionFromJar(); // use unknown version in case exact implementation version can't be found from the jar // (this can happen if the DataStream class appears multiple times in the same classpath // such as with shading) detectedVersion = detectedVersion != null ? detectedVersion : FLINK_UNKNOWN_VERSION; } catch (Exception e) { detectedVersion = FLINK_UNKNOWN_VERSION; } VERSION.set(detectedVersion); } return VERSION.get(); }
@Test public void testVersion() { assertThat(FlinkPackage.version()).isEqualTo("1.20.0"); }
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations() && isApplicableType(field)) { if (node.has("minimum")) { final Class<? extends Annotation> decimalMinClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? DecimalMin.class : javax.validation.constraints.DecimalMin.class; JAnnotationUse annotation = field.annotate(decimalMinClass); annotation.param("value", node.get("minimum").asText()); } if (node.has("maximum")) { final Class<? extends Annotation> decimalMaxClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? DecimalMax.class : javax.validation.constraints.DecimalMax.class; JAnnotationUse annotation = field.annotate(decimalMaxClass); annotation.param("value", node.get("maximum").asText()); } } return field; }
@Test public void testMinimum() { when(config.isIncludeJsr303Annotations()).thenReturn(true); final String minValue = Integer.toString(new Random().nextInt()); when(subNode.asText()).thenReturn(minValue); when(node.get("minimum")).thenReturn(subNode); when(fieldVar.annotate(decimalMinClass)).thenReturn(annotationMin); when(node.has("minimum")).thenReturn(true); when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName()); JFieldVar result = rule.apply("node", node, null, fieldVar, null); assertSame(fieldVar, result); verify(fieldVar, times(isApplicable ? 1 : 0)).annotate(decimalMinClass); verify(annotationMin, times(isApplicable ? 1 : 0)).param("value", minValue); verify(fieldVar, never()).annotate(decimalMaxClass); verify(annotationMax, never()).param(eq("value"), anyString()); }
public static Properties loadProperties(Set<ClassLoader> classLoaders, String fileName) { return loadProperties(classLoaders, fileName, false, false); }
@Test void testLoadPropertiesMultiFileNotRootPath() throws Exception { Properties p = ConfigUtils.loadProperties( Collections.emptySet(), "META-INF/dubbo/internal/org.apache.dubbo.common.status.StatusChecker", true); Properties expected = new Properties(); expected.put("memory", "org.apache.dubbo.common.status.support.MemoryStatusChecker"); expected.put("load", "org.apache.dubbo.common.status.support.LoadStatusChecker"); expected.put("aa", "12"); assertEquals(expected, p); }
public static int higher(Integer orderSource, int offset) { if (offset <= 0) { throw new IllegalArgumentException("offset must be greater than 0"); } if (orderSource == null) { orderSource = Ordered.LOWEST_PRECEDENCE; } if (Ordered.HIGHEST_PRECEDENCE + offset > orderSource) { return Ordered.HIGHEST_PRECEDENCE; } return orderSource - offset; }
@Test public void test_higher() { assertThat(OrderUtil.higher(1, 1)).isEqualTo(0); assertThat(OrderUtil.higher(Ordered.HIGHEST_PRECEDENCE + 1, 2)).isEqualTo(Ordered.HIGHEST_PRECEDENCE); assertThat(OrderUtil.higher(Ordered.HIGHEST_PRECEDENCE, 1)).isEqualTo(Ordered.HIGHEST_PRECEDENCE); Assertions.assertThrows(IllegalArgumentException.class, () -> OrderUtil.higher(1, -1)); }
static int getQueryFlags(Query query) { int flags = 0; flags |= query.properties().getBoolean(Model.ESTIMATE) ? 0x00000080 : 0; flags |= (query.getRanking().getFreshness() != null) ? 0x00002000 : 0; flags |= 0x00008000; flags |= query.getNoCache() ? 0x00010000 : 0; flags |= 0x00020000; // was PARALLEL flags |= query.properties().getBoolean(Ranking.RANKFEATURES,false) ? 0x00040000 : 0; return flags; }
@Test void testGetQueryFlags() { assertEquals(0x00028000, StreamingVisitor.getQueryFlags(new Query("/?query=test"))); assertEquals(0x00028080, StreamingVisitor.getQueryFlags(new Query("/?query=test&hitcountestimate=true"))); assertEquals(0x00068000, StreamingVisitor.getQueryFlags(new Query("/?query=test&rankfeatures=true"))); assertEquals(0x00068080, StreamingVisitor.getQueryFlags(new Query("/?query=test&hitcountestimate=true&rankfeatures=true"))); Query query = new Query("/?query=test"); assertEquals(0x00028000, StreamingVisitor.getQueryFlags(query)); query.setNoCache(true); assertEquals(0x00038000, StreamingVisitor.getQueryFlags(query)); query.getRanking().setFreshness("now"); assertEquals(0x0003a000, StreamingVisitor.getQueryFlags(query)); }
public static String removeTrailingSlash(String url) { if (url.endsWith("/")) { url = url.substring(0, url.length() - 1); } return url; }
@Test void testRemoveTrailingSlash() { final String url = "http://localhost/"; final String result = FeedUtils.removeTrailingSlash(url); Assertions.assertEquals("http://localhost", result); }
public void notifyStart() { LOG.debug("Notify {} handlers...", ServerStartHandler.class.getSimpleName()); for (ServerStartHandler handler : startHandlers) { handler.onServerStart(server); } }
@Test public void notifyOnStart() { ServerLifecycleNotifier notifier = new ServerLifecycleNotifier(server, new ServerStartHandler[] {start1, start2}, new ServerStopHandler[] {stop2}); notifier.notifyStart(); verify(start1).onServerStart(server); verify(start2).onServerStart(server); verify(stop1, never()).onServerStop(server); }
@GetMapping(value = "/{appId}/{clusterName}/{namespace:.+}") public ResponseEntity<String> queryConfigAsProperties(@PathVariable String appId, @PathVariable String clusterName, @PathVariable String namespace, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "ip", required = false) String clientIp, @RequestParam(value = "label", required = false) String clientLabel, HttpServletRequest request, HttpServletResponse response) throws IOException { String result = queryConfig(ConfigFileOutputFormat.PROPERTIES, appId, clusterName, namespace, dataCenter, clientIp, clientLabel, request, response); if (result == null) { return NOT_FOUND_RESPONSE; } return new ResponseEntity<>(result, propertiesResponseHeaders, HttpStatus.OK); }
@Test public void testQueryConfigAsProperties() throws Exception { String someKey = "someKey"; String someValue = "someValue"; String anotherKey = "anotherKey"; String anotherValue = "anotherValue"; String someWatchKey = "someWatchKey"; String anotherWatchKey = "anotherWatchKey"; Set<String> watchKeys = Sets.newHashSet(someWatchKey, anotherWatchKey); String cacheKey = configFileController .assembleCacheKey(ConfigFileController.ConfigFileOutputFormat.PROPERTIES, someAppId, someClusterName, someNamespace, someDataCenter); Map<String, String> configurations = ImmutableMap.of(someKey, someValue, anotherKey, anotherValue); ApolloConfig someApolloConfig = mock(ApolloConfig.class); when(someApolloConfig.getConfigurations()).thenReturn(configurations); when(configController .queryConfig(someAppId, someClusterName, someNamespace, someDataCenter, "-1", someClientIp, someClientLabel, null, someRequest, someResponse)).thenReturn(someApolloConfig); when(watchKeysUtil .assembleAllWatchKeys(someAppId, someClusterName, someNamespace, someDataCenter)) .thenReturn(watchKeys); ResponseEntity<String> response = configFileController .queryConfigAsProperties(someAppId, someClusterName, someNamespace, someDataCenter, someClientIp, someClientLabel, someRequest, someResponse); assertEquals(2, watchedKeys2CacheKey.size()); assertEquals(2, cacheKey2WatchedKeys.size()); assertTrue(watchedKeys2CacheKey.containsEntry(someWatchKey, cacheKey)); assertTrue(watchedKeys2CacheKey.containsEntry(anotherWatchKey, cacheKey)); assertTrue(cacheKey2WatchedKeys.containsEntry(cacheKey, someWatchKey)); assertTrue(cacheKey2WatchedKeys.containsEntry(cacheKey, anotherWatchKey)); assertEquals(HttpStatus.OK, response.getStatusCode()); assertTrue(response.getBody().contains(String.format("%s=%s", someKey, someValue))); assertTrue(response.getBody().contains(String.format("%s=%s", anotherKey, anotherValue))); ResponseEntity<String> anotherResponse = configFileController .queryConfigAsProperties(someAppId, someClusterName, someNamespace, someDataCenter, someClientIp, someClientLabel, someRequest, someResponse); assertEquals(response, anotherResponse); verify(configController, times(1)) .queryConfig(someAppId, someClusterName, someNamespace, someDataCenter, "-1", someClientIp, someClientLabel,null, someRequest, someResponse); }
@SuppressWarnings("unchecked") @Override public Result execute(Query query, Target target) { Query adjustedQuery = adjustQuery(query); switch (target.mode()) { case ALL_NODES: adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getAllPartitionIds()).build(); return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES); case LOCAL_NODE: adjustedQuery = Query.of(adjustedQuery).partitionIdSet(getLocalPartitionIds()).build(); return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.LOCAL_NODE); case PARTITION_OWNER: int solePartition = target.partitions().solePartition(); adjustedQuery = Query.of(adjustedQuery).partitionIdSet(target.partitions()).build(); if (solePartition >= 0) { return runOnGivenPartition(adjustedQuery, solePartition); } else { return runOnGivenPartitions(adjustedQuery, adjustedQuery.getPartitionIdSet(), TargetMode.ALL_NODES); } default: throw new IllegalArgumentException("Illegal target " + target); } }
@Test public void runQueryOnAllPartitions_key() { Predicate<Object, Object> predicate = Predicates.equal("this", value); Query query = Query.of().mapName(map.getName()).predicate(predicate).iterationType(KEY).build(); QueryResult result = queryEngine.execute(query, Target.ALL_NODES); assertEquals(1, result.size()); assertEquals(key, toObject(result.iterator().next().getKey())); }
@Override public Optional<ScmInfo> getScmInfo(Component component) { requireNonNull(component, "Component cannot be null"); if (component.getType() != Component.Type.FILE) { return Optional.empty(); } return scmInfoCache.computeIfAbsent(component, this::getScmInfoForComponent); }
@Test public void read_from_report() { addChangesetInReport("john", DATE_1, "rev-1"); ScmInfo scmInfo = underTest.getScmInfo(FILE).get(); assertThat(scmInfo.getAllChangesets()).hasSize(1); Changeset changeset = scmInfo.getChangesetForLine(1); assertThat(changeset.getAuthor()).isEqualTo("john"); assertThat(changeset.getDate()).isEqualTo(DATE_1); assertThat(changeset.getRevision()).isEqualTo("rev-1"); assertThat(logTester.logs(TRACE)).containsOnly("Reading SCM info from report for file 'FILE_KEY'"); verifyNoInteractions(dbLoader); verifyNoInteractions(fileStatuses); verifyNoInteractions(diff); }
public static List<AclEntry> filterDefaultAclEntries( List<AclEntry> existingAcl) throws AclException { ArrayList<AclEntry> aclBuilder = Lists.newArrayListWithCapacity(MAX_ENTRIES); for (AclEntry existingEntry: existingAcl) { if (existingEntry.getScope() == DEFAULT) { // Default entries sort after access entries, so we can exit early. break; } aclBuilder.add(existingEntry); } return buildAndValidateAcl(aclBuilder); }
@Test public void testFilterDefaultAclEntriesUnchanged() throws AclException { List<AclEntry> existing = new ImmutableList.Builder<AclEntry>() .add(aclEntry(ACCESS, USER, ALL)) .add(aclEntry(ACCESS, USER, "bruce", ALL)) .add(aclEntry(ACCESS, GROUP, READ_EXECUTE)) .add(aclEntry(ACCESS, GROUP, "sales", ALL)) .add(aclEntry(ACCESS, MASK, ALL)) .add(aclEntry(ACCESS, OTHER, NONE)) .build(); assertEquals(existing, filterDefaultAclEntries(existing)); }
static String generateClientId(AdminClientConfig config) { String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG); if (!clientId.isEmpty()) return clientId; return "adminclient-" + ADMIN_CLIENT_ID_SEQUENCE.getAndIncrement(); }
@Test public void testGenerateClientId() { Set<String> ids = new HashSet<>(); for (int i = 0; i < 10; i++) { String id = KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "")); assertFalse(ids.contains(id), "Got duplicate id " + id); ids.add(id); } assertEquals("myCustomId", KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "myCustomId"))); }
@Override public IndexRange calculateRange(String index) { checkIfHealthy(indices.waitForRecovery(index), (status) -> new RuntimeException("Unable to calculate range for index <" + index + ">, index is unhealthy: " + status)); final DateTime now = DateTime.now(DateTimeZone.UTC); final Stopwatch sw = Stopwatch.createStarted(); final IndexRangeStats stats = indices.indexRangeStatsOfIndex(index); final int duration = Ints.saturatedCast(sw.stop().elapsed(TimeUnit.MILLISECONDS)); LOG.info("Calculated range of [{}] in [{}ms].", index, duration); return MongoIndexRange.create(index, stats.min(), stats.max(), now, duration, stats.streamIds()); }
@Test(expected = ElasticsearchException.class) public void calculateRangeFailsIfIndexIsNotHealthy() throws Exception { final String index = "graylog"; when(indices.waitForRecovery(index)).thenThrow(new ElasticsearchException("TEST")); indexRangeService.calculateRange(index); }
public static String decapitalize(String string) { return string == null ? null : string.substring( 0, 1 ).toLowerCase( Locale.ROOT ) + string.substring( 1 ); }
@Test @DefaultLocale("en") public void decapitalizeEnglish() { String international = Strings.decapitalize( "International" ); assertThat( international ).isEqualTo( "international" ); }