focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static String getValidPropertyName(String inputString) { String answer; if (inputString == null) { answer = null; } else if (inputString.length() < 2) { answer = inputString.toLowerCase(Locale.US); } else { if (Character.isUpperCase(inputString.charAt(0)) && !Character.isUpperCase(inputString.charAt(1))) { answer = inputString.substring(0, 1).toLowerCase(Locale.US) + inputString.substring(1); } else { answer = inputString; } } return answer; }
@Test void testGetValidPropertyName() { assertEquals("eMail", getValidPropertyName("eMail")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("firstName", getValidPropertyName("firstName")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("URL", getValidPropertyName("URL")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("XAxis", getValidPropertyName("XAxis")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("a", getValidPropertyName("a")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("b", getValidPropertyName("B")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("yaxis", getValidPropertyName("Yaxis")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("i_PARAM_INT_1", getValidPropertyName("I_PARAM_INT_1")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("_fred", getValidPropertyName("_fred")); //$NON-NLS-1$ //$NON-NLS-2$ assertEquals("accountType", getValidPropertyName("AccountType")); //$NON-NLS-1$ //$NON-NLS-2$ }
@Description("Inverse of Beta cdf given a, b parameters and probability") @ScalarFunction @SqlType(StandardTypes.DOUBLE) public static double inverseBetaCdf( @SqlType(StandardTypes.DOUBLE) double a, @SqlType(StandardTypes.DOUBLE) double b, @SqlType(StandardTypes.DOUBLE) double p) { checkCondition(p >= 0 && p <= 1, INVALID_FUNCTION_ARGUMENT, "inverseBetaCdf Function: p must be in the interval [0, 1]"); checkCondition(a > 0, INVALID_FUNCTION_ARGUMENT, "inverseBetaCdf Function: a must be > 0"); checkCondition(b > 0, INVALID_FUNCTION_ARGUMENT, "inverseBetaCdf Function: b must be > 0"); BetaDistribution distribution = new BetaDistribution(null, a, b, BetaDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); return distribution.inverseCumulativeProbability(p); }
@Test public void testInverseBetaCdf() { assertFunction("inverse_beta_cdf(3, 3.6, 0.0)", DOUBLE, 0.0); assertFunction("inverse_beta_cdf(3, 3.6, 1.0)", DOUBLE, 1.0); assertFunction("inverse_beta_cdf(3, 3.6, 0.3)", DOUBLE, 0.3469675485440618); assertFunction("inverse_beta_cdf(3, 3.6, 0.95)", DOUBLE, 0.7600272463100223); assertInvalidFunction("inverse_beta_cdf(0, 3, 0.5)", "inverseBetaCdf Function: a must be > 0"); assertInvalidFunction("inverse_beta_cdf(3, 0, 0.5)", "inverseBetaCdf Function: b must be > 0"); assertInvalidFunction("inverse_beta_cdf(3, 5, -0.1)", "inverseBetaCdf Function: p must be in the interval [0, 1]"); assertInvalidFunction("inverse_beta_cdf(3, 5, 1.1)", "inverseBetaCdf Function: p must be in the interval [0, 1]"); }
@Override public void notifyTopicPublished(final MqttPublishMessage msg, final String clientID, final String username) { msg.retain(); executor.execute(() -> { try { int messageId = msg.variableHeader().messageId(); String topic = msg.variableHeader().topicName(); for (InterceptHandler handler : handlers.get(InterceptPublishMessage.class)) { LOG.debug("Notifying MQTT PUBLISH message to interceptor. CId={}, messageId={}, topic={}, " + "interceptorId={}", clientID, messageId, topic, handler.getID()); // Sending to the outside, make a retainedDuplicate. handler.onPublish(new InterceptPublishMessage(msg.retainedDuplicate(), clientID, username)); } } finally { ReferenceCountUtil.release(msg); } }); }
@Test public void testNotifyTopicPublished() throws Exception { final ByteBuf payload = Unpooled.copiedBuffer("Hello".getBytes(UTF_8)); // Internal function call, will not release buffers. interceptor.notifyTopicPublished( MqttMessageBuilders.publish().qos(MqttQoS.AT_MOST_ONCE) .payload(payload).build(), "cli1234", "cli1234"); interval(); assertEquals(60, n.get()); payload.release(); }
@Udf(description = "Converts a TIMESTAMP value from one timezone to another") public Timestamp convertTz( @UdfParameter( description = "The TIMESTAMP value.") final Timestamp timestamp, @UdfParameter( description = "The fromTimeZone in java.util.TimeZone ID format. For example: \"UTC\"," + " \"America/Los_Angeles\", \"PST\", \"Europe/London\"") final String fromTimeZone, @UdfParameter( description = "The toTimeZone in java.util.TimeZone ID format. For example: \"UTC\"," + " \"America/Los_Angeles\", \"PST\", \"Europe/London\"") final String toTimeZone ) { if (timestamp == null || fromTimeZone == null || toTimeZone == null) { return null; } try { final long offset = TimeZone.getTimeZone(ZoneId.of(toTimeZone)).getOffset(timestamp.getTime()) - TimeZone.getTimeZone(ZoneId.of(fromTimeZone)).getOffset(timestamp.getTime()); return new Timestamp(timestamp.getTime() + offset); } catch (DateTimeException e) { throw new KsqlFunctionException("Invalid time zone: " + e.getMessage()); } }
@Test public void shouldReturnNullForNullFromTimeZone() { // When: final Object result = udf.convertTz(Timestamp.valueOf("2000-01-01 00:00:00"), null, "America/New_York"); // Then: assertNull(result); }
public void writeBytesLenenc(final byte[] value) { if (0 == value.length) { byteBuf.writeByte(0); return; } writeIntLenenc(value.length); byteBuf.writeBytes(value); }
@Test void assertWriteBytesLenencWithEmpty() { new MySQLPacketPayload(byteBuf, StandardCharsets.UTF_8).writeBytesLenenc("".getBytes()); verify(byteBuf).writeByte(0); }
public Map<Long, String> getEvaluatedResultsFromForeach( String foreachInlineWorkflowId, String stepId, String paramName) { Map<Long, String> idParams = new HashMap<>(); return withMetricLogError( () -> withRetryableQuery( GET_EVALUATED_RESULTS_FROM_FOREACH_TEMPLATE, stmt -> { int idx = 0; stmt.setString(++idx, paramName); stmt.setString(++idx, foreachInlineWorkflowId); stmt.setString(++idx, stepId); }, result -> { while (result.next()) { String val = result.getString(PAYLOAD_COLUMN); if (val != null) { idParams.put(result.getLong(ID_COLUMN), val); } } return idParams; }), "getEvaluatedResultsFromForeach", "Failed to get the evaluated results of param [{}] from foreach step [{}][{}]", paramName, foreachInlineWorkflowId, stepId); }
@Test public void testGetEvaluatedResultsFromForeach() throws Exception { si = loadObject("fixtures/instances/sample-step-instance-succeeded.json", StepInstance.class); si.setStepAttemptId(10); stepDao.insertOrUpsertStepInstance(si, false); assertEquals( Collections.singletonMap(1L, "15"), stepDao.getEvaluatedResultsFromForeach("sample-dag-test-3", "job1", "sleep_seconds")); }
public abstract Comparator<Schedulable> getComparator();
@Test public void testFairShareComparatorTransitivity() { FairSharePolicy policy = new FairSharePolicy(); Comparator<Schedulable> fairShareComparator = policy.getComparator(); FairShareComparatorTester tester = new FairShareComparatorTester(fairShareComparator); tester.testTransitivity(); }
@Override public PermissionTicket createTicket(ResourceSet resourceSet, Set<String> scopes) { // check to ensure that the scopes requested are a subset of those in the resource set if (!scopeService.scopesMatch(resourceSet.getScopes(), scopes)) { throw new InsufficientScopeException("Scopes of resource set are not enough for requested permission."); } Permission perm = new Permission(); perm.setResourceSet(resourceSet); perm.setScopes(scopes); PermissionTicket ticket = new PermissionTicket(); ticket.setPermission(perm); ticket.setTicket(UUID.randomUUID().toString()); ticket.setExpiration(new Date(System.currentTimeMillis() + permissionExpirationSeconds * 1000L)); return repository.save(ticket); }
@Test public void testCreate_differentTicketsSameClient() { PermissionTicket perm1 = permissionService.createTicket(rs1, scopes1); PermissionTicket perm2 = permissionService.createTicket(rs1, scopes1); assertNotNull(perm1.getTicket()); assertNotNull(perm2.getTicket()); // make sure these are different from each other assertThat(perm1.getTicket(), not(equalTo(perm2.getTicket()))); }
@Override public AWSCredentials getCredentials() { Credentials sessionCredentials = credentials.get(); if (Duration.between(clock.instant(), sessionCredentials.expiry).compareTo(REFRESH_INTERVAL)<0) { refresh(); sessionCredentials = credentials.get(); } return sessionCredentials; }
@Test void refreshes_credentials() throws IOException { Instant originalExpiry = clock.instant().plus(Duration.ofHours(12)); writeCredentials(credentialsPath, originalExpiry); VespaAwsCredentialsProvider credentialsProvider = new VespaAwsCredentialsProvider(credentialsPath, clock); AWSCredentials credentials = credentialsProvider.getCredentials(); assertExpiryEquals(originalExpiry, credentials); Instant updatedExpiry = clock.instant().plus(Duration.ofHours(24)); writeCredentials(credentialsPath, updatedExpiry); // File updated, but old credentials still valid credentials = credentialsProvider.getCredentials(); assertExpiryEquals(originalExpiry, credentials); // Credentials refreshes when it is < 30 minutes left until expiry clock.advance(Duration.ofHours(11).plus(Duration.ofMinutes(31))); credentials = credentialsProvider.getCredentials(); assertExpiryEquals(updatedExpiry, credentials); // Credentials refreshes when they are long expired (since noone asked for them for a long time) updatedExpiry = clock.instant().plus(Duration.ofDays(12)); writeCredentials(credentialsPath, updatedExpiry); clock.advance(Duration.ofDays(11)); credentials = credentialsProvider.getCredentials(); assertExpiryEquals(updatedExpiry, credentials); }
@Override public String doLayout(ILoggingEvent event) { StringWriter output = new StringWriter(); try (JsonWriter json = new JsonWriter(output)) { json.beginObject(); if (!"".equals(nodeName)) { json.name("nodename").value(nodeName); } json.name("process").value(processKey); for (Map.Entry<String, String> entry : event.getMDCPropertyMap().entrySet()) { if (entry.getValue() != null && !exclusions.contains(entry.getKey())) { json.name(entry.getKey()).value(entry.getValue()); } } json .name("timestamp").value(DATE_FORMATTER.format(Instant.ofEpochMilli(event.getTimeStamp()))) .name("severity").value(event.getLevel().toString()) .name("logger").value(event.getLoggerName()) .name("message").value(NEWLINE_REGEXP.matcher(event.getFormattedMessage()).replaceAll("\r")); IThrowableProxy tp = event.getThrowableProxy(); if (tp != null) { json.name("stacktrace").beginArray(); int nbOfTabs = 0; while (tp != null) { printFirstLine(json, tp, nbOfTabs); render(json, tp, nbOfTabs); tp = tp.getCause(); nbOfTabs++; } json.endArray(); } json.endObject(); } catch (Exception e) { e.printStackTrace(); throw new IllegalStateException("BUG - fail to create JSON", e); } output.write(System.lineSeparator()); return output.toString(); }
@Test public void test_log_with_MDC() { try { LoggingEvent event = new LoggingEvent("org.foundation.Caller", (Logger) LoggerFactory.getLogger("the.logger"), Level.WARN, "the message", null, new Object[0]); MDC.put("fromMdc", "foo"); String log = underTest.doLayout(event); JsonLog json = new Gson().fromJson(log, JsonLog.class); assertThat(json.fromMdc).isEqualTo("foo"); } finally { MDC.clear(); } }
@Override @DataPermission(enable = false) // 发送短信时,无需考虑数据权限 public Long sendSingleSmsToAdmin(String mobile, Long userId, String templateCode, Map<String, Object> templateParams) { // 如果 mobile 为空,则加载用户编号对应的手机号 if (StrUtil.isEmpty(mobile)) { AdminUserDO user = adminUserService.getUser(userId); if (user != null) { mobile = user.getMobile(); } } // 执行发送 return sendSingleSms(mobile, userId, UserTypeEnum.ADMIN.getValue(), templateCode, templateParams); }
@Test public void testSendSingleSmsToAdmin() { // 准备参数 Long userId = randomLongId(); String templateCode = randomString(); Map<String, Object> templateParams = MapUtil.<String, Object>builder().put("code", "1234") .put("op", "login").build(); // mock adminUserService 的方法 AdminUserDO user = randomPojo(AdminUserDO.class, o -> o.setMobile("15601691300")); when(adminUserService.getUser(eq(userId))).thenReturn(user); // mock SmsTemplateService 的方法 SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> { o.setStatus(CommonStatusEnum.ENABLE.getStatus()); o.setContent("验证码为{code}, 操作为{op}"); o.setParams(Lists.newArrayList("code", "op")); }); when(smsTemplateService.getSmsTemplateByCodeFromCache(eq(templateCode))).thenReturn(template); String content = randomString(); when(smsTemplateService.formatSmsTemplateContent(eq(template.getContent()), eq(templateParams))) .thenReturn(content); // mock SmsChannelService 的方法 SmsChannelDO smsChannel = randomPojo(SmsChannelDO.class, o -> o.setStatus(CommonStatusEnum.ENABLE.getStatus())); when(smsChannelService.getSmsChannel(eq(template.getChannelId()))).thenReturn(smsChannel); // mock SmsLogService 的方法 Long smsLogId = randomLongId(); when(smsLogService.createSmsLog(eq(user.getMobile()), eq(userId), eq(UserTypeEnum.ADMIN.getValue()), eq(Boolean.TRUE), eq(template), eq(content), eq(templateParams))).thenReturn(smsLogId); // 调用 Long resultSmsLogId = smsSendService.sendSingleSmsToAdmin(null, userId, templateCode, templateParams); // 断言 assertEquals(smsLogId, resultSmsLogId); // 断言调用 verify(smsProducer).sendSmsSendMessage(eq(smsLogId), eq(user.getMobile()), eq(template.getChannelId()), eq(template.getApiTemplateId()), eq(Lists.newArrayList(new KeyValue<>("code", "1234"), new KeyValue<>("op", "login")))); }
@GetMapping public DeferredResult<ResponseEntity<List<ApolloConfigNotification>>> pollNotification( @RequestParam(value = "appId") String appId, @RequestParam(value = "cluster") String cluster, @RequestParam(value = "notifications") String notificationsAsString, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "ip", required = false) String clientIp) { List<ApolloConfigNotification> notifications = null; try { notifications = gson.fromJson(notificationsAsString, notificationsTypeReference); } catch (Throwable ex) { Tracer.logError(ex); } if (CollectionUtils.isEmpty(notifications)) { throw BadRequestException.invalidNotificationsFormat(notificationsAsString); } Map<String, ApolloConfigNotification> filteredNotifications = filterNotifications(appId, notifications); if (CollectionUtils.isEmpty(filteredNotifications)) { throw BadRequestException.invalidNotificationsFormat(notificationsAsString); } DeferredResultWrapper deferredResultWrapper = new DeferredResultWrapper(bizConfig.longPollingTimeoutInMilli()); Set<String> namespaces = Sets.newHashSetWithExpectedSize(filteredNotifications.size()); Map<String, Long> clientSideNotifications = Maps.newHashMapWithExpectedSize(filteredNotifications.size()); for (Map.Entry<String, ApolloConfigNotification> notificationEntry : filteredNotifications.entrySet()) { String normalizedNamespace = notificationEntry.getKey(); ApolloConfigNotification notification = notificationEntry.getValue(); namespaces.add(normalizedNamespace); clientSideNotifications.put(normalizedNamespace, notification.getNotificationId()); if (!Objects.equals(notification.getNamespaceName(), normalizedNamespace)) { deferredResultWrapper.recordNamespaceNameNormalizedResult(notification.getNamespaceName(), normalizedNamespace); } } Multimap<String, String> watchedKeysMap = watchKeysUtil.assembleAllWatchKeys(appId, cluster, namespaces, dataCenter); Set<String> watchedKeys = Sets.newHashSet(watchedKeysMap.values()); /** * 1、set deferredResult before the check, for avoid more waiting * If the check before setting deferredResult,it may receive a notification the next time * when method handleMessage is executed between check and set deferredResult. */ deferredResultWrapper .onTimeout(() -> logWatchedKeys(watchedKeys, "Apollo.LongPoll.TimeOutKeys")); deferredResultWrapper.onCompletion(() -> { //unregister all keys for (String key : watchedKeys) { deferredResults.remove(key, deferredResultWrapper); } logWatchedKeys(watchedKeys, "Apollo.LongPoll.CompletedKeys"); }); //register all keys for (String key : watchedKeys) { this.deferredResults.put(key, deferredResultWrapper); } logWatchedKeys(watchedKeys, "Apollo.LongPoll.RegisteredKeys"); logger.debug("Listening {} from appId: {}, cluster: {}, namespace: {}, datacenter: {}", watchedKeys, appId, cluster, namespaces, dataCenter); /** * 2、check new release */ List<ReleaseMessage> latestReleaseMessages = releaseMessageService.findLatestReleaseMessagesGroupByMessages(watchedKeys); /** * Manually close the entity manager. * Since for async request, Spring won't do so until the request is finished, * which is unacceptable since we are doing long polling - means the db connection would be hold * for a very long time */ entityManagerUtil.closeEntityManager(); List<ApolloConfigNotification> newNotifications = getApolloConfigNotifications(namespaces, clientSideNotifications, watchedKeysMap, latestReleaseMessages); if (!CollectionUtils.isEmpty(newNotifications)) { deferredResultWrapper.setResult(newNotifications); } return deferredResultWrapper.getResult(); }
@Test public void testPollNotificationWithMultipleNamespaces() throws Exception { String defaultNamespaceAsFile = defaultNamespace + ".properties"; String somePublicNamespaceAsFile = somePublicNamespace + ".xml"; when(namespaceUtil.filterNamespaceName(defaultNamespaceAsFile)).thenReturn(defaultNamespace); when(namespaceUtil.filterNamespaceName(somePublicNamespaceAsFile)).thenReturn(somePublicNamespaceAsFile); when(namespaceUtil.normalizeNamespace(someAppId, somePublicNamespaceAsFile)).thenReturn(somePublicNamespaceAsFile); String someWatchKey = "someKey"; String anotherWatchKey = "anotherKey"; String somePublicWatchKey = "somePublicWatchKey"; String somePublicFileWatchKey = "somePublicFileWatchKey"; Multimap<String, String> watchKeysMap = assembleMultiMap(defaultNamespace, Lists.newArrayList(someWatchKey, anotherWatchKey)); watchKeysMap .putAll(assembleMultiMap(somePublicNamespace, Lists.newArrayList(somePublicWatchKey))); watchKeysMap .putAll(assembleMultiMap(somePublicNamespaceAsFile, Lists.newArrayList(somePublicFileWatchKey))); String notificationAsString = transformApolloConfigNotificationsToString(defaultNamespaceAsFile, someNotificationId, somePublicNamespace, someNotificationId, somePublicNamespaceAsFile, someNotificationId); when(watchKeysUtil .assembleAllWatchKeys(someAppId, someCluster, Sets.newHashSet(defaultNamespace, somePublicNamespace, somePublicNamespaceAsFile), someDataCenter)).thenReturn( watchKeysMap); DeferredResult<ResponseEntity<List<ApolloConfigNotification>>> deferredResult = controller .pollNotification(someAppId, someCluster, notificationAsString, someDataCenter, someClientIp); assertEquals(watchKeysMap.size(), deferredResults.size()); assertWatchKeys(watchKeysMap, deferredResult); verify(watchKeysUtil, times(1)).assembleAllWatchKeys(someAppId, someCluster, Sets.newHashSet(defaultNamespace, somePublicNamespace, somePublicNamespaceAsFile), someDataCenter); }
@Override public List<Application> findAll(final String filename) { final String extension = Path.getExtension(filename); if(StringUtils.isEmpty(extension)) { return Collections.emptyList(); } if(!defaultApplicationListCache.contains(extension)) { final List<Application> applications = new ArrayList<Application>(); for(String identifier : this.findAllForType(extension)) { applications.add(this.getDescription(identifier)); } // Because of the different API used the default opening application may not be included // in the above list returned. Always add the default application anyway. final Application defaultApplication = this.find(filename); if(this.isInstalled(defaultApplication)) { if(!applications.contains(defaultApplication)) { applications.add(defaultApplication); } } defaultApplicationListCache.put(extension, applications); } return defaultApplicationListCache.get(extension); }
@Test public void testFindAll() { ApplicationFinder f = new LaunchServicesApplicationFinder(); final List<Application> applications = f.findAll("file.txt"); assertFalse(applications.isEmpty()); assertTrue(applications.contains(new Application("com.apple.TextEdit", "T"))); // assertTrue(applications.contains(new Application("com.macromates.textmate", "T"))); }
@Override public Instance selectOneHealthyInstance(String serviceName) throws NacosException { return selectOneHealthyInstance(serviceName, new ArrayList<>()); }
@Test void testSelectOneHealthyInstance7() throws NacosException { //given Instance healthyInstance = new Instance(); healthyInstance.setIp("1.1.1.1"); healthyInstance.setPort(1000); List<Instance> hosts = new ArrayList<>(); hosts.add(healthyInstance); ServiceInfo infoWithHealthyInstance = new ServiceInfo(); infoWithHealthyInstance.setHosts(hosts); when(proxy.queryInstancesOfService(anyString(), anyString(), anyString(), anyBoolean())).thenReturn( infoWithHealthyInstance); String serviceName = "service1"; List<String> clusterList = Arrays.asList("cluster1", "cluster2"); //when client.selectOneHealthyInstance(serviceName, clusterList, false); //then verify(proxy, times(1)).queryInstancesOfService(serviceName, Constants.DEFAULT_GROUP, "cluster1,cluster2", false); }
@JsonCreator public static WindowInfo of( @JsonProperty(value = "type", required = true) final WindowType type, @JsonProperty(value = "size") final Optional<Duration> size, @JsonProperty(value = "emitStrategy") final Optional<OutputRefinement> emitStrategy) { return new WindowInfo(type, size, emitStrategy); }
@Test public void shouldImplementEquals() { new EqualsTester() .addEqualityGroup( WindowInfo.of(SESSION, Optional.empty(), Optional.empty()), WindowInfo.of(SESSION, Optional.empty(), Optional.empty()) ) .addEqualityGroup( WindowInfo.of(TUMBLING, Optional.of(Duration.ofMillis(19)), Optional.empty()), WindowInfo.of(TUMBLING, Optional.of(Duration.ofMillis(19)), Optional.empty()) ) .addEqualityGroup( WindowInfo.of(HOPPING, Optional.of(Duration.ofMillis(19)), Optional.empty()), WindowInfo.of(HOPPING, Optional.of(Duration.ofMillis(19)), Optional.empty()) ) .addEqualityGroup( WindowInfo.of(TUMBLING, Optional.of(Duration.ofMillis(1010)), Optional.empty()), WindowInfo.of(TUMBLING, Optional.of(Duration.ofMillis(1010)), Optional.of(OutputRefinement.CHANGES)), WindowInfo.of(TUMBLING, Optional.of(Duration.ofMillis(1010)), Optional.of(OutputRefinement.FINAL)) ) .testEquals(); }
public int nextSetBit(int fromIndex) { if (fromIndex < 0) throw new IndexOutOfBoundsException("fromIndex < 0: " + fromIndex); int segmentPosition = fromIndex >>> log2SegmentSize; /// which segment -- div by num bits per segment ThreadSafeBitSetSegments segments = this.segments.get(); if(segmentPosition >= segments.numSegments()) return -1; int longPosition = (fromIndex >>> 6) & segmentMask; /// which long in the segment -- remainder of div by num bits per segment int bitPosition = fromIndex & 0x3F; /// which bit in the long -- remainder of div by num bits in long (64) AtomicLongArray segment = segments.getSegment(segmentPosition); long word = segment.get(longPosition) & (0xffffffffffffffffL << bitPosition); while (true) { if (word != 0) return (segmentPosition << (log2SegmentSize)) + (longPosition << 6) + Long.numberOfTrailingZeros(word); if (++longPosition > segmentMask) { segmentPosition++; if(segmentPosition >= segments.numSegments()) return -1; segment = segments.getSegment(segmentPosition); longPosition = 0; } word = segment.get(longPosition); } }
@Test public void testNextSetBit() { ThreadSafeBitSet set1 = new ThreadSafeBitSet(); set1.set(100); set1.set(101); set1.set(103); set1.set(100000); set1.set(1000000); Assert.assertEquals(100, set1.nextSetBit(0)); Assert.assertEquals(101, set1.nextSetBit(101)); Assert.assertEquals(103, set1.nextSetBit(102)); Assert.assertEquals(100000, set1.nextSetBit(104)); Assert.assertEquals(1000000, set1.nextSetBit(100001)); Assert.assertEquals(-1, set1.nextSetBit(1000001)); Assert.assertEquals(-1, set1.nextSetBit(1015809)); set1.clearAll(); set1.set(555555); Assert.assertEquals(555555, set1.nextSetBit(0)); Assert.assertEquals(-1, set1.nextSetBit(555556)); }
public void completeTx(SendRequest req) throws InsufficientMoneyException, CompletionException { lock.lock(); try { checkArgument(!req.completed, () -> "given SendRequest has already been completed"); log.info("Completing send tx with {} outputs totalling {} and a fee of {}/vkB", req.tx.getOutputs().size(), req.tx.getOutputSum().toFriendlyString(), req.feePerKb.toFriendlyString()); // Calculate a list of ALL potential candidates for spending and then ask a coin selector to provide us // with the actual outputs that'll be used to gather the required amount of value. In this way, users // can customize coin selection policies. The call below will ignore immature coinbases and outputs // we don't have the keys for. List<TransactionOutput> prelimCandidates = calculateAllSpendCandidates(true, req.missingSigsMode == MissingSigsMode.THROW); // Connect (add a value amount) unconnected inputs List<TransactionInput> inputs = connectInputs(prelimCandidates, req.tx.getInputs()); req.tx.clearInputs(); inputs.forEach(req.tx::addInput); // Warn if there are remaining unconnected inputs whose value we do not know // TODO: Consider throwing if there are inputs that we don't have a value for if (req.tx.getInputs().stream() .map(TransactionInput::getValue) .anyMatch(Objects::isNull)) log.warn("SendRequest transaction already has inputs but we don't know how much they are worth - they will be added to fee."); // If any inputs have already been added, we don't need to get their value from wallet Coin totalInput = req.tx.getInputSum(); // Calculate the amount of value we need to import. Coin valueNeeded = req.tx.getOutputSum().subtract(totalInput); // Enforce the OP_RETURN limit if (req.tx.getOutputs().stream() .filter(o -> ScriptPattern.isOpReturn(o.getScriptPubKey())) .count() > 1) // Only 1 OP_RETURN per transaction allowed. throw new MultipleOpReturnRequested(); // Check for dusty sends if (req.ensureMinRequiredFee && !req.emptyWallet) { // Min fee checking is handled later for emptyWallet. if (req.tx.getOutputs().stream().anyMatch(TransactionOutput::isDust)) throw new DustySendRequested(); } // Filter out candidates that are already included in the transaction inputs List<TransactionOutput> candidates = prelimCandidates.stream() .filter(output -> alreadyIncluded(req.tx.getInputs(), output)) .collect(StreamUtils.toUnmodifiableList()); CoinSelection bestCoinSelection; TransactionOutput bestChangeOutput = null; List<Coin> updatedOutputValues = null; if (!req.emptyWallet) { // This can throw InsufficientMoneyException. FeeCalculation feeCalculation = calculateFee(req, valueNeeded, req.ensureMinRequiredFee, candidates); bestCoinSelection = feeCalculation.bestCoinSelection; bestChangeOutput = feeCalculation.bestChangeOutput; updatedOutputValues = feeCalculation.updatedOutputValues; } else { // We're being asked to empty the wallet. What this means is ensuring "tx" has only a single output // of the total value we can currently spend as determined by the selector, and then subtracting the fee. checkState(req.tx.getOutputs().size() == 1, () -> "empty wallet TX must have a single output only"); CoinSelector selector = req.coinSelector == null ? coinSelector : req.coinSelector; bestCoinSelection = selector.select((Coin) network.maxMoney(), candidates); candidates = null; // Selector took ownership and might have changed candidates. Don't access again. req.tx.getOutput(0).setValue(bestCoinSelection.totalValue()); log.info(" emptying {}", bestCoinSelection.totalValue().toFriendlyString()); } bestCoinSelection.outputs() .forEach(req.tx::addInput); if (req.emptyWallet) { if (!adjustOutputDownwardsForFee(req.tx, bestCoinSelection, req.feePerKb, req.ensureMinRequiredFee)) throw new CouldNotAdjustDownwards(); } if (updatedOutputValues != null) { for (int i = 0; i < updatedOutputValues.size(); i++) { req.tx.getOutput(i).setValue(updatedOutputValues.get(i)); } } if (bestChangeOutput != null) { req.tx.addOutput(bestChangeOutput); log.info(" with {} change", bestChangeOutput.getValue().toFriendlyString()); } // Now shuffle the outputs to obfuscate which is the change. if (req.shuffleOutputs) req.tx.shuffleOutputs(); // Now sign the inputs, thus proving that we are entitled to redeem the connected outputs. if (req.signInputs) signTransaction(req); // Check size. final int size = req.tx.messageSize(); if (size > Transaction.MAX_STANDARD_TX_SIZE) throw new ExceededMaxTransactionSize(); // Label the transaction as being self created. We can use this later to spend its change output even before // the transaction is confirmed. We deliberately won't bother notifying listeners here as there's not much // point - the user isn't interested in a confidence transition they made themselves. getConfidence(req.tx).setSource(TransactionConfidence.Source.SELF); // Label the transaction as being a user requested payment. This can be used to render GUI wallet // transaction lists more appropriately, especially when the wallet starts to generate transactions itself // for internal purposes. req.tx.setPurpose(Transaction.Purpose.USER_PAYMENT); // Record the exchange rate that was valid when the transaction was completed. req.tx.setExchangeRate(req.exchangeRate); req.tx.setMemo(req.memo); req.completed = true; log.info(" completed: {}", req.tx); } finally { lock.unlock(); } }
@Test(expected = Wallet.MultipleOpReturnRequested.class) @Parameters({"false, false", "false, true", "true, false", "true, true"}) public void twoOpReturnsPerTransactionTest(boolean ensureMinRequiredFee, boolean emptyWallet) throws Exception { // Tests sending transaction where there are 2 attempts to write OP_RETURN scripts - this should fail and throw MultipleOpReturnRequested. receiveATransaction(wallet, myAddress); Transaction tx = new Transaction(); Coin messagePrice = Coin.ZERO; Script script1 = ScriptBuilder.createOpReturnScript("hello world 1!".getBytes()); Script script2 = ScriptBuilder.createOpReturnScript("hello world 2!".getBytes()); tx.addOutput(messagePrice, script1); tx.addOutput(messagePrice, script2); SendRequest request = SendRequest.forTx(tx); request.ensureMinRequiredFee = ensureMinRequiredFee; request.emptyWallet = emptyWallet; wallet.completeTx(request); }
private void reconfigure(ZookeeperServerConfig newConfig) { Instant reconfigTriggered = Instant.now(); String newServers = servers(newConfig); log.log(Level.INFO, "Will reconfigure ZooKeeper cluster." + "\nServers in active config:" + servers(activeConfig) + "\nServers in new config:" + newServers); String connectionSpec = vespaZooKeeperAdmin.localConnectionSpec(activeConfig); Instant now = Instant.now(); // For reconfig to succeed, the current and resulting ensembles must have a majority. When an ensemble grows and // the joining servers outnumber the existing ones, we have to wait for enough of them to start to have a majority. Instant end = now.plus(timeout); // Loop reconfiguring since we might need to wait until another reconfiguration is finished before we can succeed for (int attempt = 1; ; attempt++) { try { Instant reconfigStarted = Instant.now(); vespaZooKeeperAdmin.reconfigure(connectionSpec, newServers); Instant reconfigEnded = Instant.now(); log.log(Level.INFO, "Reconfiguration completed in " + Duration.between(reconfigTriggered, reconfigEnded) + ", after " + attempt + " attempt(s). ZooKeeper reconfig call took " + Duration.between(reconfigStarted, reconfigEnded)); activeConfig = newConfig; return; } catch (ReconfigException e) { Duration delay = backoff.delay(attempt); now = Instant.now(); if (now.isBefore(end)) { log.log(Level.INFO, "Reconfiguration attempt " + attempt + " failed. Retrying in " + delay + ", time left " + Duration.between(now, end) + ": " + Exceptions.toMessageString(e)); sleeper.sleep(delay); } else { log.log(Level.SEVERE, "Reconfiguration attempt " + attempt + " failed, and was failing for " + timeout + "; giving up now: " + Exceptions.toMessageString(e)); shutdown(); if (haltOnFailure) Process.logAndDie("Reconfiguration did not complete within timeout " + timeout + ". Forcing container shutdown."); else throw e; } } } }
@Test public void testReconfigure() { ZookeeperServerConfig initialConfig = createConfig(3, true); reconfigurer.startOrReconfigure(initialConfig); assertSame(initialConfig, reconfigurer.activeConfig()); // Cluster grows ZookeeperServerConfig nextConfig = createConfig(5, true); reconfigurer.startOrReconfigure(nextConfig); assertEquals("node1:2181", reconfigurer.connectionSpec()); assertEquals("server.0=node0:2182:2183;2181,server.1=node1:2182:2183;2181,server.2=node2:2182:2183;2181,server.3=node3:2182:2183;2181,server.4=node4:2182:2183;2181", reconfigurer.servers()); assertEquals(2, reconfigurer.reconfigurations()); assertSame(nextConfig, reconfigurer.activeConfig()); // Cluster shrinks nextConfig = createConfig(3, true); reconfigurer.startOrReconfigure(nextConfig); assertEquals(3, reconfigurer.reconfigurations()); assertEquals("node1:2181", reconfigurer.connectionSpec()); assertEquals("server.0=node0:2182:2183;2181,server.1=node1:2182:2183;2181,server.2=node2:2182:2183;2181", reconfigurer.servers()); assertSame(nextConfig, reconfigurer.activeConfig()); // Cluster loses node1, but node3 joins. Indices are shuffled. nextConfig = createConfig(3, true, 1); reconfigurer.startOrReconfigure(nextConfig); assertEquals(4, reconfigurer.reconfigurations()); assertEquals("server.0=node0:2182:2183;2181,server.1=node2:2182:2183;2181,server.2=node3:2182:2183;2181", reconfigurer.servers()); assertSame(nextConfig, reconfigurer.activeConfig()); }
public void updateCheckboxes( EnumSet<RepositoryFilePermission> permissionEnumSet ) { updateCheckboxes( false, permissionEnumSet ); }
@Test public void testUpdateCheckboxesManagePermissionsAppropriateFalse() { permissionsCheckboxHandler.updateCheckboxes( false, EnumSet.of( RepositoryFilePermission.ACL_MANAGEMENT, RepositoryFilePermission.DELETE, RepositoryFilePermission.WRITE, RepositoryFilePermission.READ ) ); verify( readCheckbox, times( 1 ) ).setChecked( true ); verify( writeCheckbox, times( 1 ) ).setChecked( true ); verify( deleteCheckbox, times( 1 ) ).setChecked( true ); verify( manageCheckbox, times( 1 ) ).setChecked( true ); verify( readCheckbox, times( 1 ) ).setDisabled( true ); verify( writeCheckbox, times( 1 ) ).setDisabled( true ); verify( deleteCheckbox, times( 1 ) ).setDisabled( true ); verify( manageCheckbox, times( 1 ) ).setDisabled( true ); }
static ApiError validateQuotaKeyValue( Map<String, ConfigDef.ConfigKey> validKeys, String key, double value ) { // Ensure we have an allowed quota key ConfigDef.ConfigKey configKey = validKeys.get(key); if (configKey == null) { return new ApiError(Errors.INVALID_REQUEST, "Invalid configuration key " + key); } if (value <= 0.0) { return new ApiError(Errors.INVALID_REQUEST, "Quota " + key + " must be greater than 0"); } // Ensure the quota value is valid switch (configKey.type()) { case DOUBLE: return ApiError.NONE; case SHORT: if (value > Short.MAX_VALUE) { return new ApiError(Errors.INVALID_REQUEST, "Proposed value for " + key + " is too large for a SHORT."); } return getErrorForIntegralQuotaValue(value, key); case INT: if (value > Integer.MAX_VALUE) { return new ApiError(Errors.INVALID_REQUEST, "Proposed value for " + key + " is too large for an INT."); } return getErrorForIntegralQuotaValue(value, key); case LONG: { if (value > Long.MAX_VALUE) { return new ApiError(Errors.INVALID_REQUEST, "Proposed value for " + key + " is too large for a LONG."); } return getErrorForIntegralQuotaValue(value, key); } default: return new ApiError(Errors.UNKNOWN_SERVER_ERROR, "Unexpected config type " + configKey.type() + " should be Long or Double"); } }
@Test public void testValidateQuotaKeyValueForValidConsumerByteRate2() { assertEquals(ApiError.NONE, ClientQuotaControlManager.validateQuotaKeyValue( VALID_CLIENT_ID_QUOTA_KEYS, "consumer_byte_rate", 1235.0000001)); }
@Override public <T> void register(Class<T> remoteInterface, T object) { register(remoteInterface, object, 1); }
@Test public void testAsync() { RedissonClient r1 = createInstance(); r1.getRemoteService().register(RemoteInterface.class, new RemoteImpl()); RedissonClient r2 = createInstance(); RemoteInterfaceAsync ri = r2.getRemoteService().get(RemoteInterfaceAsync.class); RFuture<Void> f = ri.voidMethod("someName", 100L); f.toCompletableFuture().join(); RFuture<Long> resFuture = ri.resultMethod(100L); resFuture.toCompletableFuture().join(); assertThat(resFuture.toCompletableFuture().join()).isEqualTo(200); r1.shutdown(); r2.shutdown(); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { try { final AttributedList<Path> children = new AttributedList<>(); ListFolderResult result; this.parse(directory, listener, children, result = new DbxUserFilesRequests(session.getClient(directory)).listFolder(containerService.getKey(directory))); // If true, then there are more entries available. Pass the cursor to list_folder/continue to retrieve the rest. while(result.getHasMore()) { this.parse(directory, listener, children, result = new DbxUserFilesRequests(session.getClient(directory)).listFolderContinue(result.getCursor())); } return children; } catch(DbxException e) { throw new DropboxExceptionMappingService().map("Listing directory {0} failed", e, directory); } }
@Test public void testFilenameColon() throws Exception { final Path home = new DefaultHomeFinderService(session).find(); final Path file = new DropboxTouchFeature(session).touch(new Path(home, String.format("%s:name", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); final Path folder = new DropboxDirectoryFeature(session).mkdir(new Path(home, String.format("%s:name", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory)), new TransferStatus()); final AttributedList<Path> list = new DropboxListService(session).list(home, new DisabledListProgressListener()); assertNotSame(AttributedList.emptyList(), list); assertFalse(list.isEmpty()); assertTrue(list.contains(file)); assertSame(home, list.get(file).getParent()); assertTrue(list.contains(folder)); assertSame(home, list.get(folder).getParent()); new DropboxDeleteFeature(session).delete(Arrays.asList(file, folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public static void validateImageInDaemonConf(Map<String, Object> conf) { List<String> allowedImages = getAllowedImages(conf, true); if (allowedImages.isEmpty()) { LOG.debug("{} is not configured; skip image validation", DaemonConfig.STORM_OCI_ALLOWED_IMAGES); } else { String defaultImage = (String) conf.get(DaemonConfig.STORM_OCI_IMAGE); validateImage(allowedImages, defaultImage, DaemonConfig.STORM_OCI_IMAGE); } }
@Test public void validateImageInDaemonConfNotInAllowedList() { assertThrows(IllegalArgumentException.class, () -> { Map<String, Object> conf = new HashMap<>(); List<String> allowedImages = new ArrayList<>(); allowedImages.add("storm/rhel7:dev_test"); conf.put(DaemonConfig.STORM_OCI_ALLOWED_IMAGES, allowedImages); conf.put(DaemonConfig.STORM_OCI_IMAGE, "storm/rhel7:wow"); OciUtils.validateImageInDaemonConf(conf); }); }
@Override public List<ParamInfo> extractParam(HttpServletRequest request) throws NacosRuntimeException { ArrayList<ParamInfo> paramInfos = new ArrayList<>(); String listenConfigs = request.getParameter("Listening-Configs"); if (StringUtils.isBlank(listenConfigs)) { return paramInfos; } try { listenConfigs = URLDecoder.decode(listenConfigs, Constants.ENCODE); } catch (UnsupportedEncodingException e) { throw new NacosRuntimeException(ErrorCode.UnKnowError.getCode(), e); } if (StringUtils.isBlank(listenConfigs)) { return paramInfos; } String[] lines = listenConfigs.split(Character.toString(LINE_SEPARATOR_CHAR)); for (String line : lines) { ParamInfo paramInfo = new ParamInfo(); String[] words = line.split(Character.toString(WORD_SEPARATOR_CHAR)); if (words.length < 2 || words.length > 4) { throw new IllegalArgumentException("invalid probeModify"); } paramInfo.setDataId(words[0]); paramInfo.setGroup(words[1]); if (words.length == 4) { paramInfo.setNamespaceId(words[3]); } paramInfos.add(paramInfo); } return paramInfos; }
@Test void testError() { String listenerConfigsString = getErrorListenerConfigsString(); Mockito.when(httpServletRequest.getParameter(eq("Listening-Configs"))).thenReturn(listenerConfigsString); configListenerHttpParamExtractor = new ConfigListenerHttpParamExtractor(); try { configListenerHttpParamExtractor.extractParam(httpServletRequest); assertTrue(false); } catch (Throwable throwable) { throwable.printStackTrace(); assertTrue(throwable instanceof IllegalArgumentException); } }
public Expression rewrite(final Expression expression) { return new ExpressionTreeRewriter<>(new OperatorPlugin()::process) .rewrite(expression, null); }
@Test public void shouldReplaceComparisonInReverseOrder() { // Given: final Expression predicate = getPredicate( "SELECT * FROM orders where '2017-01-01T00:00:00.000' < ROWTIME;"); // When: final Expression rewritten = rewriter.rewrite(predicate); // Then: assertThat(rewritten.toString(), is(String.format("(%d < ORDERS.ROWTIME)", A_TIMESTAMP))); }
public static String analytical(String expression, String language) throws Exception { return analytical(expression, new HashMap<>(), language); }
@Test public void testArray() { String expression = ExpressionUtils.analytical("test-${array}", Collections.singletonMap("array", Arrays.asList(1,2,3)), "spel"); Assert.assertEquals(expression,"test-[1, 2, 3]"); }
public static void retainMatching(Collection<String> values, String... patterns) { retainMatching(values, Arrays.asList(patterns)); }
@Test public void testRetainMatchingWithNoPatterns() throws Exception { Collection<String> values = stringToList("A"); StringCollectionUtil.retainMatching(values); assertTrue(values.contains("A")); }
Mono<ResponseEntity<Void>> save(Post post) { return client.post() .uri("/posts") .contentType(MediaType.APPLICATION_JSON) .bodyValue(post) .exchangeToMono(response -> { if (response.statusCode().equals(HttpStatus.CREATED)) { return response.toBodilessEntity(); } return response.createError(); }); }
@SneakyThrows @Test public void testCreatePost() { var id = UUID.randomUUID(); var data = new Post(null, "title1", "content1", Status.DRAFT, null); stubFor(post("/posts") .willReturn( aResponse() .withHeader("Location", "/posts/" + id) .withStatus(201) ) ); postClient.save(data) .as(StepVerifier::create) .consumeNextWith( entity -> { assertThat(entity.getHeaders().getLocation().toString()).isEqualTo("/posts/" + id); assertThat(entity.getStatusCode().value()).isEqualTo(201); } ) .verifyComplete(); verify(postRequestedFor(urlEqualTo("/posts")) .withHeader("Content-Type", equalTo("application/json")) .withRequestBody(equalToJson(Json.write(data))) ); }
public static byte[] encodeBase64(byte[] binaryData) { return encodeBase64(binaryData, false, false, Integer.MAX_VALUE); }
@Test void testEncodeOverMaxLength() { assertThrows(IllegalArgumentException.class, () -> { String a = "very large characters to test chunk encoding and see if the result is expected or not"; Base64.encodeBase64(a.getBytes(StandardCharsets.UTF_8), false, false, 10); }); }
public IssuesChangesNotification newIssuesChangesNotification(Set<DefaultIssue> issues, Map<String, UserDto> assigneesByUuid) { AnalysisChange change = new AnalysisChange(analysisMetadataHolder.getAnalysisDate()); Set<ChangedIssue> changedIssues = issues.stream() .map(issue -> new ChangedIssue.Builder(issue.key()) .setAssignee(getAssignee(issue.assignee(), assigneesByUuid)) .setNewStatus(issue.status()) .setNewIssueStatus(issue.status() != null ? IssueStatus.of(issue.status(), issue.resolution()) : null) .setRule(getRuleByRuleKey(issue.ruleKey())) .setProject(getProject()) .build()) .collect(Collectors.toSet()); return issuesChangesSerializer.serialize(new IssuesChangesNotificationBuilder(changedIssues, change)); }
@Test public void newIssuesChangesNotification_creates_rule_from_RuleRepository() { RuleKey ruleKey = RuleKey.of("foo", "bar"); DefaultIssue issue = new DefaultIssue() .setRuleKey(ruleKey) .setKey("issueKey") .setStatus(STATUS_OPEN); Map<String, UserDto> assigneesByUuid = nonEmptyAssigneesByUuid(); ReportComponent project = ReportComponent.builder(PROJECT, 1).build(); String branchName = randomAlphabetic(12); ruleRepository.add(ruleKey); treeRootHolder.setRoot(project); analysisMetadata.setAnalysisDate(new Random().nextLong()); analysisMetadata.setBranch(newNonMainBranch(BranchType.BRANCH, branchName)); IssuesChangesNotification expected = mock(IssuesChangesNotification.class); when(issuesChangesSerializer.serialize(any(IssuesChangesNotificationBuilder.class))).thenReturn(expected); IssuesChangesNotification notification = underTest.newIssuesChangesNotification(ImmutableSet.of(issue), assigneesByUuid); assertThat(notification).isSameAs(expected); IssuesChangesNotificationBuilder builder = verifyAndCaptureIssueChangeNotificationBuilder(); assertThat(builder.getIssues()).hasSize(1); ChangedIssue changeIssue = builder.getIssues().iterator().next(); assertThat(changeIssue.getRule().getKey()).isEqualTo(ruleKey); assertThat(changeIssue.getRule().getName()).isEqualTo(ruleRepository.getByKey(ruleKey).getName()); }
public static ClusterId getContentClusterName(ApplicationInstance application, HostName hostName) { Set<ClusterId> contentClusterIdsOnHost = application.serviceClusters().stream() .filter(VespaModelUtil::isContent) .filter(cluster -> clusterHasInstanceOnHost(cluster, hostName)) .map(ServiceCluster::clusterId) .collect(Collectors.toSet()); if (contentClusterIdsOnHost.size() != 1) { throw new IllegalArgumentException("Expected exactly one content cluster within application " + application.applicationInstanceId() + " and host " + hostName + ", but found " + contentClusterIdsOnHost.size() + ": " + contentClusterIdsOnHost + ", application: " + application); } return contentClusterIdsOnHost.iterator().next(); }
@Test public void testGetContentClusterName() { ClusterId contentClusterName = VespaModelUtil.getContentClusterName(application, distributor0.hostName()); assertEquals(CONTENT_CLUSTER_ID, contentClusterName); }
public static FunctionDetails convert(SinkConfig sinkConfig, ExtractedSinkDetails sinkDetails) throws IOException { FunctionDetails.Builder functionDetailsBuilder = FunctionDetails.newBuilder(); boolean isBuiltin = !org.apache.commons.lang3.StringUtils.isEmpty(sinkConfig.getArchive()) && sinkConfig.getArchive() .startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN); if (sinkConfig.getTenant() != null) { functionDetailsBuilder.setTenant(sinkConfig.getTenant()); } if (sinkConfig.getNamespace() != null) { functionDetailsBuilder.setNamespace(sinkConfig.getNamespace()); } if (sinkConfig.getName() != null) { functionDetailsBuilder.setName(sinkConfig.getName()); } if (sinkConfig.getLogTopic() != null) { functionDetailsBuilder.setLogTopic(sinkConfig.getLogTopic()); } functionDetailsBuilder.setRuntime(FunctionDetails.Runtime.JAVA); if (sinkConfig.getParallelism() != null) { functionDetailsBuilder.setParallelism(sinkConfig.getParallelism()); } else { functionDetailsBuilder.setParallelism(1); } if (sinkDetails.getFunctionClassName() != null) { functionDetailsBuilder.setClassName(sinkDetails.getFunctionClassName()); } else { functionDetailsBuilder.setClassName(IdentityFunction.class.getName()); } if (sinkConfig.getTransformFunctionConfig() != null) { functionDetailsBuilder.setUserConfig(sinkConfig.getTransformFunctionConfig()); } if (sinkConfig.getProcessingGuarantees() != null) { functionDetailsBuilder.setProcessingGuarantees( convertProcessingGuarantee(sinkConfig.getProcessingGuarantees())); } else { functionDetailsBuilder.setProcessingGuarantees(Function.ProcessingGuarantees.ATLEAST_ONCE); } // set source spec // source spec classname should be empty so that the default pulsar source will be used Function.SourceSpec.Builder sourceSpecBuilder = Function.SourceSpec.newBuilder(); sourceSpecBuilder.setSubscriptionType(Function.SubscriptionType.SHARED); if (sinkConfig.getInputs() != null) { sinkConfig.getInputs().forEach(topicName -> sourceSpecBuilder.putInputSpecs(topicName, Function.ConsumerSpec.newBuilder() .setIsRegexPattern(false) .build())); } if (!StringUtils.isEmpty(sinkConfig.getTopicsPattern())) { sourceSpecBuilder.putInputSpecs(sinkConfig.getTopicsPattern(), Function.ConsumerSpec.newBuilder() .setIsRegexPattern(true) .build()); } if (sinkConfig.getTopicToSerdeClassName() != null) { sinkConfig.getTopicToSerdeClassName().forEach((topicName, serde) -> { sourceSpecBuilder.putInputSpecs(topicName, Function.ConsumerSpec.newBuilder() .setSerdeClassName(serde == null ? "" : serde) .setIsRegexPattern(false) .build()); }); } if (sinkConfig.getTopicToSchemaType() != null) { sinkConfig.getTopicToSchemaType().forEach((topicName, schemaType) -> { sourceSpecBuilder.putInputSpecs(topicName, Function.ConsumerSpec.newBuilder() .setSchemaType(schemaType == null ? "" : schemaType) .setIsRegexPattern(false) .build()); }); } if (sinkConfig.getInputSpecs() != null) { sinkConfig.getInputSpecs().forEach((topic, spec) -> { Function.ConsumerSpec.Builder bldr = Function.ConsumerSpec.newBuilder() .setIsRegexPattern(spec.isRegexPattern()); if (StringUtils.isNotBlank(spec.getSchemaType())) { bldr.setSchemaType(spec.getSchemaType()); } else if (StringUtils.isNotBlank(spec.getSerdeClassName())) { bldr.setSerdeClassName(spec.getSerdeClassName()); } if (spec.getReceiverQueueSize() != null) { bldr.setReceiverQueueSize(Function.ConsumerSpec.ReceiverQueueSize.newBuilder() .setValue(spec.getReceiverQueueSize()).build()); } if (spec.getCryptoConfig() != null) { bldr.setCryptoSpec(CryptoUtils.convert(spec.getCryptoConfig())); } bldr.putAllConsumerProperties(spec.getConsumerProperties()); bldr.setPoolMessages(spec.isPoolMessages()); sourceSpecBuilder.putInputSpecs(topic, bldr.build()); }); } if (sinkDetails.getTypeArg() != null) { sourceSpecBuilder.setTypeClassName(sinkDetails.getTypeArg()); } if (isNotBlank(sinkConfig.getSourceSubscriptionName())) { sourceSpecBuilder.setSubscriptionName(sinkConfig.getSourceSubscriptionName()); } // Set subscription type Function.SubscriptionType subType; if ((sinkConfig.getRetainOrdering() != null && sinkConfig.getRetainOrdering()) || FunctionConfig.ProcessingGuarantees.EFFECTIVELY_ONCE.equals(sinkConfig.getProcessingGuarantees())) { subType = Function.SubscriptionType.FAILOVER; } else if (sinkConfig.getRetainKeyOrdering() != null && sinkConfig.getRetainKeyOrdering()) { subType = Function.SubscriptionType.KEY_SHARED; } else { subType = Function.SubscriptionType.SHARED; } sourceSpecBuilder.setSubscriptionType(subType); if (sinkConfig.getAutoAck() != null) { functionDetailsBuilder.setAutoAck(sinkConfig.getAutoAck()); } else { functionDetailsBuilder.setAutoAck(true); } if (sinkConfig.getTimeoutMs() != null) { sourceSpecBuilder.setTimeoutMs(sinkConfig.getTimeoutMs()); } if (sinkConfig.getCleanupSubscription() != null) { sourceSpecBuilder.setCleanupSubscription(sinkConfig.getCleanupSubscription()); } else { sourceSpecBuilder.setCleanupSubscription(true); } if (sinkConfig.getNegativeAckRedeliveryDelayMs() != null && sinkConfig.getNegativeAckRedeliveryDelayMs() > 0) { sourceSpecBuilder.setNegativeAckRedeliveryDelayMs(sinkConfig.getNegativeAckRedeliveryDelayMs()); } if (sinkConfig.getSourceSubscriptionPosition() == SubscriptionInitialPosition.Earliest) { sourceSpecBuilder.setSubscriptionPosition(Function.SubscriptionPosition.EARLIEST); } else { sourceSpecBuilder.setSubscriptionPosition(Function.SubscriptionPosition.LATEST); } functionDetailsBuilder.setSource(sourceSpecBuilder); if (sinkConfig.getRetainKeyOrdering() != null) { functionDetailsBuilder.setRetainKeyOrdering(sinkConfig.getRetainKeyOrdering()); } if (sinkConfig.getRetainOrdering() != null) { functionDetailsBuilder.setRetainOrdering(sinkConfig.getRetainOrdering()); } if (sinkConfig.getMaxMessageRetries() != null && sinkConfig.getMaxMessageRetries() > 0) { Function.RetryDetails.Builder retryDetails = Function.RetryDetails.newBuilder(); retryDetails.setMaxMessageRetries(sinkConfig.getMaxMessageRetries()); if (StringUtils.isNotBlank(sinkConfig.getDeadLetterTopic())) { retryDetails.setDeadLetterTopic(sinkConfig.getDeadLetterTopic()); } functionDetailsBuilder.setRetryDetails(retryDetails); } // set up sink spec Function.SinkSpec.Builder sinkSpecBuilder = Function.SinkSpec.newBuilder(); if (sinkDetails.getSinkClassName() != null) { sinkSpecBuilder.setClassName(sinkDetails.getSinkClassName()); } if (isBuiltin) { String builtin = sinkConfig.getArchive().replaceFirst("^builtin://", ""); sinkSpecBuilder.setBuiltin(builtin); } if (!isEmpty(sinkConfig.getTransformFunction()) && sinkConfig.getTransformFunction().startsWith(org.apache.pulsar.common.functions.Utils.BUILTIN)) { functionDetailsBuilder.setBuiltin(sinkConfig.getTransformFunction().replaceFirst("^builtin://", "")); } if (sinkConfig.getConfigs() != null) { sinkSpecBuilder.setConfigs(new Gson().toJson(sinkConfig.getConfigs())); } if (sinkConfig.getSecrets() != null && !sinkConfig.getSecrets().isEmpty()) { functionDetailsBuilder.setSecretsMap(new Gson().toJson(sinkConfig.getSecrets())); } if (sinkDetails.getTypeArg() != null) { sinkSpecBuilder.setTypeClassName(sinkDetails.getTypeArg()); } functionDetailsBuilder.setSink(sinkSpecBuilder); // use default resources if resources not set Resources resources = Resources.mergeWithDefault(sinkConfig.getResources()); Function.Resources.Builder bldr = Function.Resources.newBuilder(); bldr.setCpu(resources.getCpu()); bldr.setRam(resources.getRam()); bldr.setDisk(resources.getDisk()); functionDetailsBuilder.setResources(bldr); if (isNotBlank(sinkConfig.getRuntimeFlags())) { functionDetailsBuilder.setRuntimeFlags(sinkConfig.getRuntimeFlags()); } functionDetailsBuilder.setComponentType(FunctionDetails.ComponentType.SINK); if (!StringUtils.isEmpty(sinkConfig.getCustomRuntimeOptions())) { functionDetailsBuilder.setCustomRuntimeOptions(sinkConfig.getCustomRuntimeOptions()); } return FunctionConfigUtils.validateFunctionDetails(functionDetailsBuilder.build()); }
@Test public void testAutoAckConvertFailed() throws IOException { SinkConfig sinkConfig = new SinkConfig(); sinkConfig.setAutoAck(false); sinkConfig.setProcessingGuarantees(FunctionConfig.ProcessingGuarantees.ATMOST_ONCE); assertThrows(IllegalArgumentException.class, () -> { SinkConfigUtils.convert(sinkConfig, new SinkConfigUtils.ExtractedSinkDetails(null, null, null)); }); }
@Override public void init(final DiscoveryConfig config) { if (this.namingService != null) { LOGGER.info("Nacos naming service already registered"); return; } Properties properties = config.getProps(); Properties nacosProperties = new Properties(); this.groupName = properties.getProperty("groupName", "SHENYU_GROUP"); String serverAddr = config.getServerList(); nacosProperties.put(PropertyKeyConst.SERVER_ADDR, serverAddr); nacosProperties.put(PropertyKeyConst.NAMESPACE, properties.getProperty(NAMESPACE, "")); nacosProperties.put(PropertyKeyConst.USERNAME, properties.getProperty(PropertyKeyConst.USERNAME, "")); nacosProperties.put(PropertyKeyConst.PASSWORD, properties.getProperty(PropertyKeyConst.PASSWORD, "")); nacosProperties.put(PropertyKeyConst.ACCESS_KEY, properties.getProperty(PropertyKeyConst.ACCESS_KEY, "")); nacosProperties.put(PropertyKeyConst.SECRET_KEY, properties.getProperty(PropertyKeyConst.SECRET_KEY, "")); try { this.namingService = NamingFactory.createNamingService(nacosProperties); LOGGER.info("Nacos naming service initialized success"); } catch (NacosException e) { LOGGER.error("Error initializing Nacos naming service", e); throw new ShenyuException(e); } }
@Test void testInit() throws NoSuchFieldException, IllegalAccessException { // Set the discovery config setField(nacosDiscoveryServiceUnderTest.getClass(), "namingService", null); DiscoveryConfig config = new DiscoveryConfig(); Properties properties = new Properties(); config.setServerList("127.0.0.1:8848"); properties.setProperty("groupName", "SHENYU_GROUP"); config.setProps(properties); try (MockedStatic<NamingFactory> mockedNamingFactory = mockStatic(NamingFactory.class)) { // Mock the successful creation of NamingService mockedNamingFactory.when(() -> NamingFactory.createNamingService(any(Properties.class))) .thenReturn(namingService); nacosDiscoveryServiceUnderTest.init(config); mockedNamingFactory.verify(() -> NamingFactory.createNamingService(any(Properties.class))); assertEquals(namingService, getField(nacosDiscoveryServiceUnderTest, "namingService")); // Mock the situation where NamingService fails to be created and throws an exception mockedNamingFactory.when(() -> NamingFactory.createNamingService(any(Properties.class))) .thenThrow(new NacosException()); assertDoesNotThrow(() -> nacosDiscoveryServiceUnderTest.init(config)); } }
@Override public ByteOrder getByteOrder() { return byteOrder; }
@Test public void testGetByteOrder() throws Exception { ByteOrder byteOrderActual = dataOutputStream.getByteOrder(); assertEquals(serializationService.getByteOrder(), byteOrderActual); }
@Override public boolean canHandleReturnType(Class<?> returnType) { return Flux.class.isAssignableFrom(returnType) || Mono.class.isAssignableFrom(returnType); }
@Test public void testCheckTypes() { assertThat(reactorTimeLimiterAspectExt.canHandleReturnType(Mono.class)).isTrue(); assertThat(reactorTimeLimiterAspectExt.canHandleReturnType(Flux.class)).isTrue(); }
@Override public void event(IntentEvent event) { // this is the fast path for CORRUPT intents, retry on event notification. //TODO we might consider using the timer to back off for subsequent retries if (enabled && event.type() == IntentEvent.Type.CORRUPT) { Key key = event.subject().key(); if (store.isMaster(key)) { IntentData data = store.getIntentData(event.subject().key()); resubmitCorrupt(data, true); } } }
@Test public void corruptEvent() { IntentStoreDelegate mockDelegate = new IntentStoreDelegate() { @Override public void process(IntentData intentData) { intentData.setState(CORRUPT); store.write(intentData); } @Override public void notify(IntentEvent event) { cleanup.event(event); } }; store.setDelegate(mockDelegate); Intent intent = new MockIntent(1L); IntentData data = new IntentData(intent, INSTALL_REQ, null); store.addPending(data); assertEquals("Expect number of submits incorrect", 1, service.submitCounter()); }
@Override public boolean match(String attributeValue) { if (attributeValue == null) { return false; } switch (type) { case Equals: return attributeValue.equals(value); case StartsWith: return (length == -1 || length == attributeValue.length()) && attributeValue.startsWith(value); case EndsWith: return (length == -1 || length == attributeValue.length()) && attributeValue.endsWith(value); case Contains: return attributeValue.contains(value); case Regexp: return regexPattern.matcher(attributeValue).matches(); default: throw new IllegalStateException("Unexpected type " + type); } }
@Test public void testGeneralMetacharEscaping() { assertTrue(new LikeCondition("a%(b").match("aaa(b")); assertTrue(new LikeCondition("a%)b").match("aaa)b")); assertTrue(new LikeCondition("a%[b").match("aaa[b")); assertTrue(new LikeCondition("a%]b").match("aaa]b")); assertTrue(new LikeCondition("a%{b").match("aaa{b")); assertTrue(new LikeCondition("a%}b").match("aaa}b")); assertTrue(new LikeCondition("a%$b").match("aaa$b")); assertTrue(new LikeCondition("a%^b").match("aaa^b")); assertTrue(new LikeCondition("a%.b").match("aaa.b")); assertTrue(new LikeCondition("a%|b").match("aaa|b")); assertTrue(new LikeCondition("a%\\b").match("aaa\\b")); }
@Nullable @Override public String getMainClassFromJarPlugin() { Plugin mavenJarPlugin = project.getPlugin("org.apache.maven.plugins:maven-jar-plugin"); if (mavenJarPlugin != null) { return getChildValue( (Xpp3Dom) mavenJarPlugin.getConfiguration(), "archive", "manifest", "mainClass") .orElse(null); } return null; }
@Test public void testGetMainClassFromJar_success() { when(mockMavenProject.getPlugin("org.apache.maven.plugins:maven-jar-plugin")) .thenReturn(mockPlugin); when(mockPlugin.getConfiguration()).thenReturn(pluginConfiguration); Xpp3Dom archive = new Xpp3Dom("archive"); Xpp3Dom manifest = new Xpp3Dom("manifest"); pluginConfiguration.addChild(archive); archive.addChild(manifest); manifest.addChild(newXpp3Dom("mainClass", "some.main.class")); assertThat(mavenProjectProperties.getMainClassFromJarPlugin()).isEqualTo("some.main.class"); }
@Override public String doSharding(final Collection<String> availableTargetNames, final PreciseShardingValue<Comparable<?>> shardingValue) { ShardingSpherePreconditions.checkNotNull(shardingValue.getValue(), NullShardingValueException::new); return doSharding(availableTargetNames, Range.singleton(shardingValue.getValue())).stream().findFirst().orElse(null); }
@Test void assertRangeDoShardingByYearsInYearMonth() { Collection<String> availableTargetNames = new LinkedList<>(); for (int i = 2016; i <= 2021; i++) { for (int j = 1; j <= 12; j++) { availableTargetNames.add(String.format("t_order_%04d%02d", i, j)); } } Collection<String> actualAsYearMonth = createAlgorithm("yyyy-MM", "2016-01", "2021-12", "yyyyMM", 2, "Years") .doSharding(availableTargetNames, createShardingValue(YearMonth.of(2016, 1), YearMonth.of(2020, 1))); assertThat(actualAsYearMonth.size(), is(3)); }
OpenAPI loadOpenApiModel(String apiDoc) throws Exception { final OpenAPIV3Parser openApiParser = new OpenAPIV3Parser(); final SwaggerParseResult openApi = openApiParser.readLocation(apiDoc, null, null); if (openApi != null && openApi.getOpenAPI() != null) { return openApi.getOpenAPI(); } // In theory there should be a message in the parse result but it has disappeared... throw new IllegalArgumentException( "The given OpenApi specification could not be loaded from `" + apiDoc + "`."); }
@Test public void shouldLoadOpenApiPetStoreModelV3() throws Exception { OpenApiRestProducerFactory factory = new OpenApiRestProducerFactory(); assertNotNull(factory.loadOpenApiModel("petstore-v3.json")); }
public B register(Boolean register) { this.register = register; return getThis(); }
@Test void register() { ServiceBuilder builder = new ServiceBuilder(); builder.register(true); Assertions.assertTrue(builder.build().isRegister()); builder.register(false); Assertions.assertFalse(builder.build().isRegister()); }
@Override public Optional<SimpleLock> lock(LockConfiguration lockConfiguration) { boolean lockObtained = doLock(lockConfiguration); if (lockObtained) { return Optional.of(new StorageLock(lockConfiguration, storageAccessor)); } else { return Optional.empty(); } }
@Test void doNotReturnLockIfUpdatedZeroRows() { when(storageAccessor.insertRecord(LOCK_CONFIGURATION)).thenReturn(false); when(storageAccessor.updateRecord(LOCK_CONFIGURATION)).thenReturn(false); assertThat(lockProvider.lock(LOCK_CONFIGURATION)).isEmpty(); }
static int encode( final UnsafeBuffer encodingBuffer, final int offset, final int captureLength, final int length, final DirectBuffer srcBuffer, final int srcOffset) { final int encodedLength = encodeLogHeader(encodingBuffer, offset, captureLength, length); encodingBuffer.putBytes(offset + encodedLength, srcBuffer, srcOffset, captureLength); return encodedLength + captureLength; }
@Test void encodeBufferSmallerThanMaxCaptureSize() { final UnsafeBuffer srcBuffer = new UnsafeBuffer(new byte[256]); final int offset = 24; final int srcOffset = 20; final int length = 128; srcBuffer.setMemory(srcOffset, length, (byte)111); final int encodedLength = encode(buffer, offset, length, length, srcBuffer, srcOffset); assertEquals(LOG_HEADER_LENGTH + length, encodedLength); assertEquals(length, buffer.getInt(offset, LITTLE_ENDIAN)); assertEquals(length, buffer.getInt(offset + SIZE_OF_INT, LITTLE_ENDIAN)); assertNotEquals(0, buffer.getLong(offset + SIZE_OF_INT * 2, LITTLE_ENDIAN)); for (int i = 0; i < length; i++) { assertEquals(111, buffer.getByte(offset + LOG_HEADER_LENGTH + i)); } }
Record convert(Object data) { return convert(data, null); }
@Test public void testMissingColumnDetectionStructMapValue() { Table table = mock(Table.class); when(table.schema()).thenReturn(STRUCT_IN_MAP_BASIC_SCHEMA); RecordConverter converter = new RecordConverter(table, config); Struct nestedData = createNestedStructData(); Struct struct = new Struct(CONNECT_STRUCT_IN_MAP_SCHEMA) .put("stma", ImmutableMap.of("key1", nestedData, "key2", nestedData)); SchemaUpdate.Consumer consumer = new SchemaUpdate.Consumer(); converter.convert(struct, consumer); Collection<AddColumn> addCols = consumer.addColumns(); assertThat(addCols).hasSize(1); AddColumn addCol = addCols.iterator().next(); assertThat(addCol.parentName()).isEqualTo("stma.value"); assertThat(addCol.name()).isEqualTo("st"); StructType nestedValueType = addCol.type().asStructType(); assertThat(nestedValueType.fields()).hasSize(MAPPED_CNT); assertTypesAddedFromStruct(col -> nestedValueType.field(col).type()); }
@Override public Long clusterCountKeysInSlot(int slot) { RedisClusterNode node = clusterGetNodeForSlot(slot); MasterSlaveEntry entry = executorService.getConnectionManager().getEntry(new InetSocketAddress(node.getHost(), node.getPort())); RFuture<Long> f = executorService.readAsync(entry, StringCodec.INSTANCE, RedisCommands.CLUSTER_COUNTKEYSINSLOT, slot); return syncFuture(f); }
@Test public void testClusterCountKeysInSlot() { Long t = connection.clusterCountKeysInSlot(1); assertThat(t).isZero(); }
public void executeInInteractiveMode() { executeInInteractiveMode(null); }
@Test void testCancelExecutionInteractiveMode() throws Exception { final MockExecutor mockExecutor = new MockExecutor(new SqlParserHelper(), true); Path historyFilePath = historyTempFile(); InputStream inputStream = new ByteArrayInputStream("SELECT 1;\nHELP;\n ".getBytes()); OutputStream outputStream = new ByteArrayOutputStream(248); try (Terminal terminal = TerminalUtils.createDumbTerminal(inputStream, outputStream); CliClient client = new CliClient(() -> terminal, mockExecutor, historyFilePath, null)) { CheckedThread thread = new CheckedThread() { @Override public void go() { client.executeInInteractiveMode(); } }; thread.start(); while (!mockExecutor.isAwait) { Thread.sleep(10); } terminal.raise(Terminal.Signal.INT); CommonTestUtils.waitUntilCondition( () -> outputStream.toString().contains(CliStrings.MESSAGE_HELP)); // Prevent NPE when closing the terminal. See FLINK-33116 for more information. thread.sync(); } }
@Override public void writeTo(ByteBuf byteBuf) throws LispWriterException { WRITER.writeTo(byteBuf, this); }
@Test public void testSerialization() throws LispReaderException, LispWriterException, LispParseError { ByteBuf byteBuf = Unpooled.buffer(); InfoReplyWriter writer = new InfoReplyWriter(); writer.writeTo(byteBuf, reply1); InfoReplyReader reader = new InfoReplyReader(); LispInfoReply deserialized = reader.readFrom(byteBuf); new EqualsTester() .addEqualityGroup(reply1, deserialized).testEquals(); }
@Override public DeviceRuntimeSpec onDevicesAllocated(Set<Device> allocatedDevices, YarnRuntimeType yarnRuntime) throws Exception { LOG.debug("Generating runtime spec for allocated devices: {}, {}", allocatedDevices, yarnRuntime.getName()); if (yarnRuntime == YarnRuntimeType.RUNTIME_DOCKER) { String nvidiaRuntime = "nvidia"; String nvidiaVisibleDevices = "NVIDIA_VISIBLE_DEVICES"; StringBuilder gpuMinorNumbersSB = new StringBuilder(); for (Device device : allocatedDevices) { gpuMinorNumbersSB.append(device.getMinorNumber() + ","); } String minorNumbers = gpuMinorNumbersSB.toString(); LOG.info("Nvidia Docker v2 assigned GPU: " + minorNumbers); return DeviceRuntimeSpec.Builder.newInstance() .addEnv(nvidiaVisibleDevices, minorNumbers.substring(0, minorNumbers.length() - 1)) .setContainerRuntime(nvidiaRuntime) .build(); } return null; }
@Test public void testOnDeviceAllocated() throws Exception { NvidiaGPUPluginForRuntimeV2 plugin = new NvidiaGPUPluginForRuntimeV2(); Set<Device> allocatedDevices = new TreeSet<>(); DeviceRuntimeSpec spec = plugin.onDevicesAllocated(allocatedDevices, YarnRuntimeType.RUNTIME_DEFAULT); Assert.assertNull(spec); // allocate one device allocatedDevices.add(Device.Builder.newInstance() .setId(0).setHealthy(true) .setBusID("00000000:04:00.0") .setDevPath("/dev/nvidia0") .setMajorNumber(195) .setMinorNumber(0).build()); spec = plugin.onDevicesAllocated(allocatedDevices, YarnRuntimeType.RUNTIME_DOCKER); Assert.assertEquals("nvidia", spec.getContainerRuntime()); Assert.assertEquals("0", spec.getEnvs().get("NVIDIA_VISIBLE_DEVICES")); // two device allowed allocatedDevices.add(Device.Builder.newInstance() .setId(0).setHealthy(true) .setBusID("00000000:82:00.0") .setDevPath("/dev/nvidia1") .setMajorNumber(195) .setMinorNumber(1).build()); spec = plugin.onDevicesAllocated(allocatedDevices, YarnRuntimeType.RUNTIME_DOCKER); Assert.assertEquals("nvidia", spec.getContainerRuntime()); Assert.assertEquals("0,1", spec.getEnvs().get("NVIDIA_VISIBLE_DEVICES")); }
public static List<WeightedHostAddress> prioritize(WeightedHostAddress[] records) { final List<WeightedHostAddress> result = new LinkedList<>(); // sort by priority (ascending) SortedMap<Integer, Set<WeightedHostAddress>> byPriority = new TreeMap<>(); for(final WeightedHostAddress record : records) { if (byPriority.containsKey(record.getPriority())) { byPriority.get(record.getPriority()).add(record); } else { final Set<WeightedHostAddress> set = new HashSet<>(); set.add(record); byPriority.put(record.getPriority(), set); } } // now, randomize each priority set by weight. for(Map.Entry<Integer, Set<WeightedHostAddress>> weights : byPriority.entrySet()) { List<WeightedHostAddress> zeroWeights = new LinkedList<>(); int totalWeight = 0; final Iterator<WeightedHostAddress> i = weights.getValue().iterator(); while (i.hasNext()) { final WeightedHostAddress next = i.next(); if (next.weight == 0) { // set aside, as these should be considered last according to the RFC. zeroWeights.add(next); i.remove(); continue; } totalWeight += next.getWeight(); } int iterationWeight = totalWeight; Iterator<WeightedHostAddress> iter = weights.getValue().iterator(); while (iter.hasNext()) { int needle = new Random().nextInt(iterationWeight); while (true) { final WeightedHostAddress record = iter.next(); needle -= record.getWeight(); if (needle <= 0) { result.add(record); iter.remove(); iterationWeight -= record.getWeight(); break; } } iter = weights.getValue().iterator(); } // finally, append the hosts with zero priority (shuffled) Collections.shuffle(zeroWeights); result.addAll(zeroWeights); } return result; }
@Test public void testZeroPriority() throws Exception { // setup final DNSUtil.WeightedHostAddress hostA = new DNSUtil.WeightedHostAddress("hostA", 5222, false, 0, 1); final DNSUtil.WeightedHostAddress hostB = new DNSUtil.WeightedHostAddress("hostB", 5222, false, 2, 1); final DNSUtil.WeightedHostAddress hostC = new DNSUtil.WeightedHostAddress("hostC", 5222, false, 1, 1); // do magic final List<DNSUtil.WeightedHostAddress> result = DNSUtil.prioritize(new DNSUtil.WeightedHostAddress[]{hostA, hostB, hostC}); // verify assertEquals(3, result.size()); assertEquals(hostA, result.get(0)); assertEquals(hostC, result.get(1)); assertEquals(hostB, result.get(2)); }
public static boolean isInvalidStanzaSentPriorToResourceBinding(final Packet stanza, final ClientSession session) { // Openfire sets 'authenticated' only after resource binding. if (session.getStatus() == Session.Status.AUTHENTICATED) { return false; } // Beware, the 'to' address in the stanza will have been overwritten by the final JID intendedRecipient = stanza.getTo(); final JID serverDomain = new JID(XMPPServer.getInstance().getServerInfo().getXMPPDomain()); // If there's no 'to' address, then the stanza is implicitly addressed at the user itself. if (intendedRecipient == null) { return false; } // TODO: after authentication (but prior to resource binding), it should be possible to verify that the // intended recipient's bare JID corresponds with the authorized user. Openfire currently does not have an API // that can be used to obtain the authorized username, prior to resource binding. if (intendedRecipient.equals(serverDomain)) { return false; } return true; }
@Test public void testIsInvalid_addressedAtThirdPartyUser_unauthenticated() throws Exception { // Setup test fixture. final Packet stanza = new Message(); stanza.setTo(new JID("foobar123", XMPPServer.getInstance().getServerInfo().getXMPPDomain(), "test123")); final LocalClientSession session = mock(LocalClientSession.class, withSettings().strictness(Strictness.LENIENT)); when(session.getStatus()).thenReturn(Session.Status.CONNECTED); // Openfire sets 'AUTHENTICATED' only after resource binding has been done. // Execute system under test. final boolean result = SessionPacketRouter.isInvalidStanzaSentPriorToResourceBinding(stanza, session); // Verify results. assertTrue(result); }
public Optional<User> login(String nameOrEmail, String password) { if (nameOrEmail == null || password == null) { return Optional.empty(); } User user = userDAO.findByName(nameOrEmail); if (user == null) { user = userDAO.findByEmail(nameOrEmail); } if (user != null && !user.isDisabled()) { boolean authenticated = encryptionService.authenticate(password, user.getPassword(), user.getSalt()); if (authenticated) { performPostLoginActivities(user); return Optional.of(user); } } return Optional.empty(); }
@Test void callingLoginShouldNotReturnUserObjectWhenGivenNullNameOrEmail() { Optional<User> user = userService.login(null, "password"); Assertions.assertFalse(user.isPresent()); }
public boolean isScheduled() { return (future != null) && !future.isDone(); }
@Test public void isScheduled_inFlight() { pacer.future = new CompletableFuture<>(); assertThat(pacer.isScheduled()).isTrue(); }
public List<Favorite> search(String userId, String appId, Pageable page) { boolean isUserIdEmpty = Strings.isNullOrEmpty(userId); boolean isAppIdEmpty = Strings.isNullOrEmpty(appId); if (isAppIdEmpty && isUserIdEmpty) { throw new BadRequestException("user id and app id can't be empty at the same time"); } if (!isUserIdEmpty) { UserInfo loginUser = userInfoHolder.getUser(); //user can only search his own favorite app if (!Objects.equals(loginUser.getUserId(), userId)) { userId = loginUser.getUserId(); } } //search by userId if (isAppIdEmpty && !isUserIdEmpty) { return favoriteRepository.findByUserIdOrderByPositionAscDataChangeCreatedTimeAsc(userId, page); } //search by appId if (!isAppIdEmpty && isUserIdEmpty) { return favoriteRepository.findByAppIdOrderByPositionAscDataChangeCreatedTimeAsc(appId, page); } //search by userId and appId return Collections.singletonList(favoriteRepository.findByUserIdAndAppId(userId, appId)); }
@Test @Sql(scripts = "/sql/favorites/favorites.sql", executionPhase = Sql.ExecutionPhase.BEFORE_TEST_METHOD) @Sql(scripts = "/sql/cleanup.sql", executionPhase = Sql.ExecutionPhase.AFTER_TEST_METHOD) public void testSearchByAppIdAndUserId() { List<Favorite> favorites = favoriteService.search(testUser, "test0621-04", PageRequest.of(0, 10)); Assert.assertEquals(1, favorites.size()); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void slidingWindowZeroArgCountShouldPreserveTopologyStructure() { final StreamsBuilder builder = new StreamsBuilder(); builder.stream("input-topic") .groupByKey() .windowedBy(TimeWindows.of(ofMillis(1))) .count(); final Topology topology = builder.build(); final TopologyDescription describe = topology.describe(); assertEquals( "Topologies:\n" + " Sub-topology: 0\n" + " Source: KSTREAM-SOURCE-0000000000 (topics: [input-topic])\n" + " --> KSTREAM-AGGREGATE-0000000002\n" + " Processor: KSTREAM-AGGREGATE-0000000002 (stores: [KSTREAM-AGGREGATE-STATE-STORE-0000000001])\n" + " --> none\n" + " <-- KSTREAM-SOURCE-0000000000\n\n", describe.toString() ); topology.internalTopologyBuilder.setStreamsConfig(streamsConfig); assertThat(topology.internalTopologyBuilder.setApplicationId("test").buildTopology().hasPersistentLocalStore(), is(true)); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema == null && value == null) { return null; } JsonNode jsonValue = config.schemasEnabled() ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
@Test public void nullSchemaAndMapNonStringKeysToJson() { // This still needs to do conversion of data, null schema means "anything goes". Make sure we mix and match // types to verify conversion still works. Map<Object, Object> input = new HashMap<>(); input.put("string", 12); input.put(52, "string"); input.put(false, true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, input)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isArray()); ArrayNode payload = (ArrayNode) converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertEquals(3, payload.size()); Set<JsonNode> payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add("string").add(12), JsonNodeFactory.instance.arrayNode().add(52).add("string"), JsonNodeFactory.instance.arrayNode().add(false).add(true))), payloadEntries ); }
public String getCustomError(HttpRequestWrapper req, HttpResponseWrapper res) { for (MatcherAndError m : matchersAndLogs) { if (m.getMatcher().matchResponse(req, res)) { return m.getCustomError().customError(req, res); } } return null; }
@Test public void testMatchesCode() throws IOException { HttpRequestWrapper request = createHttpRequest(BQ_TABLES_LIST_URL); HttpResponseWrapper response = createHttpResponse(403); CustomHttpErrors.Builder builder = new CustomHttpErrors.Builder(); builder.addErrorForCode(403, "Custom Error Msg"); CustomHttpErrors customErrors = builder.build(); String errorMessage = customErrors.getCustomError(request, response); assertEquals("Custom Error Msg", errorMessage); }
@Override public boolean contains(PipelineConfig o) { for (PipelineConfigs part : this.parts) { if (part.contains(o)) return true; } return false; }
@Test public void shouldReturnFalseWhenDoesNotContainPipeline() { PipelineConfig pipe1 = PipelineConfigMother.pipelineConfig("pipeline1"); PipelineConfigs group = new MergePipelineConfigs( new BasicPipelineConfigs(pipe1), new BasicPipelineConfigs()); assertThat(group.contains(PipelineConfigMother.pipelineConfig("pipeline2")), is(false)); }
@Override public <T> T clone(T object) { if (object instanceof String) { return object; } else if (object instanceof Collection) { Object firstElement = findFirstNonNullElement((Collection) object); if (firstElement != null && !(firstElement instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass()); return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } else if (object instanceof Map) { Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object); if (firstEntry != null) { Object key = firstEntry.getKey(); Object value = firstEntry.getValue(); if (!(key instanceof Serializable) || !(value instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass()); return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } } else if (object instanceof JsonNode) { return (T) ((JsonNode) object).deepCopy(); } if (object instanceof Serializable) { try { return (T) SerializationHelper.clone((Serializable) object); } catch (SerializationException e) { //it is possible that object itself implements java.io.Serializable, but underlying structure does not //in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization } } return jsonClone(object); }
@Test public void should_clone_map_with_null_value() { Map<String, Object> original = new HashMap<>(); original.put("null", null); Object cloned = serializer.clone(original); assertEquals(original, cloned); assertNotSame(original, cloned); }
@Override public KsqlSecurityContext provide(final ApiSecurityContext apiSecurityContext) { final Optional<KsqlPrincipal> principal = apiSecurityContext.getPrincipal(); final Optional<String> authHeader = apiSecurityContext.getAuthHeader(); final List<Entry<String, String>> requestHeaders = apiSecurityContext.getRequestHeaders(); // A user context is not necessary if a user context provider is not present or the user // principal is missing. If a failed authentication attempt results in a missing principle, // then the authentication plugin will have already failed the connection before calling // this method. Therefore, if we've reached this method with a missing principle, then this // must be a valid connection that does not require authentication. // For these cases, we create a default service context that the missing user can use. final boolean requiresUserContext = securityExtension != null && securityExtension.getUserContextProvider().isPresent() && principal.isPresent(); if (!requiresUserContext) { return new KsqlSecurityContext( principal, defaultServiceContextFactory.create( ksqlConfig, authHeader, schemaRegistryClientFactory, connectClientFactory, sharedClient, requestHeaders, principal) ); } return securityExtension.getUserContextProvider() .map(provider -> new KsqlSecurityContext( principal, userServiceContextFactory.create( ksqlConfig, authHeader, provider.getKafkaClientSupplier(principal.get()), provider.getSchemaRegistryClientFactory(principal.get()), connectClientFactory, sharedClient, requestHeaders, principal))) .get(); }
@Test public void shouldPassAuthHeaderToUserFactory() { // Given: when(securityExtension.getUserContextProvider()).thenReturn(Optional.of(userContextProvider)); when(apiSecurityContext.getAuthHeader()).thenReturn(Optional.of("some-auth")); // When: ksqlSecurityContextProvider.provide(apiSecurityContext); // Then: verify(userServiceContextFactory) .create(any(), eq(Optional.of("some-auth")), any(), any(), any(), any(), any(), any()); }
@Override public void close() { closeUninterruptibly(); }
@Test void testClose() { ResourceGuard resourceGuard = new ResourceGuard(); assertThat(resourceGuard.isClosed()).isFalse(); resourceGuard.close(); assertThat(resourceGuard.isClosed()).isTrue(); assertThatThrownBy(resourceGuard::acquireResource).isInstanceOf(IOException.class); }
@Override public List<ParsedStatement> parse(final String sql) { return primaryContext.parse(sql); }
@Test public void shouldHandleCommandsSpreadOverMultipleLines() { setupKsqlEngineWithSharedRuntimeEnabled(); final String runScriptContent = "CREATE STREAM S1 \n" + "(COL1 BIGINT, COL2 VARCHAR)\n" + " WITH \n" + "(KAFKA_TOPIC = 's1_topic', VALUE_FORMAT = 'JSON', KEY_FORMAT = 'KAFKA');\n"; final List<?> parsedStatements = ksqlEngine.parse(runScriptContent); assertThat(parsedStatements, hasSize(1)); }
@SuppressWarnings("NullAway") static @Nullable <V> V getIfReady(@Nullable CompletableFuture<V> future) { return isReady(future) ? future.join() : null; }
@Test(dataProvider = "unsuccessful") public void getIfReady_fails(CompletableFuture<Integer> future) { assertThat(Async.getIfReady(future)).isNull(); }
@ScalarOperator(CAST) @SqlType(StandardTypes.DOUBLE) public static double castToDouble(@SqlType(StandardTypes.TINYINT) long value) { return value; }
@Test public void testCastToDouble() { assertFunction("cast(TINYINT'37' as double)", DOUBLE, 37.0); assertFunction("cast(TINYINT'17' as double)", DOUBLE, 17.0); }
public static boolean isShadow(final HintShadowAlgorithm<Comparable<?>> shadowAlgorithm, final ShadowDetermineCondition shadowCondition, final ShadowRule shadowRule) { Collection<PreciseHintShadowValue<Comparable<?>>> noteShadowValues = createNoteShadowValues(shadowCondition); for (PreciseHintShadowValue<Comparable<?>> each : noteShadowValues) { if (shadowAlgorithm.isShadow(shadowRule.getAllShadowTableNames(), each)) { return true; } } return false; }
@SuppressWarnings({"rawtypes", "unchecked"}) @Test void assertIsShadow() { HintShadowAlgorithm hintShadowAlgorithm = (HintShadowAlgorithm) TypedSPILoader.getService(ShadowAlgorithm.class, "SQL_HINT", new Properties()); assertTrue(HintShadowAlgorithmDeterminer.isShadow(hintShadowAlgorithm, createShadowDetermineCondition(), new ShadowRule(createShadowRuleConfiguration()))); }
@Udf public Integer length(@UdfParameter final String jsonArray) { if (jsonArray == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonArray); if (node.isMissingNode() || !node.isArray()) { return null; } return node.size(); }
@Test public void shouldReturnNullForNull() { // When: final Integer result = udf.length(null); // Then: assertNull(result); }
@Override public boolean isOperational() { if (nodeOperational) { return true; } boolean flag = false; try { flag = checkOperational(); } catch (InterruptedException e) { LOG.trace("Interrupted while checking ES node is operational", e); Thread.currentThread().interrupt(); } finally { if (flag) { esConnector.stop(); nodeOperational = true; } } return nodeOperational; }
@Test public void isOperational_should_retry_if_Elasticsearch_is_unreachable() { EsConnector esConnector = mock(EsConnector.class); when(esConnector.getClusterHealthStatus()) .thenReturn(Optional.empty()) .thenReturn(Optional.of(ClusterHealthStatus.GREEN)); EsManagedProcess underTest = new EsManagedProcess(mock(Process.class), ProcessId.ELASTICSEARCH, esConnector, WAIT_FOR_UP_TIMEOUT); assertThat(underTest.isOperational()).isTrue(); }
static void setFieldValue(Object object, String fieldName, Object value) throws IllegalAccessException { getAccessibleField(object, fieldName).set(object, value); }
@Test public void testSetFieldValue() throws IllegalAccessException { // sqlCounter est un champ privé qui existe comme un autre final Object value = JdbcWrapperHelper.getFieldValue(JdbcWrapper.SINGLETON, "sqlCounter"); JdbcWrapperHelper.setFieldValue(JdbcWrapper.SINGLETON, "sqlCounter", value); }
@Override public JFieldVar apply(String nodeName, JsonNode node, JsonNode parent, JFieldVar field, Schema currentSchema) { if (ruleFactory.getGenerationConfig().isIncludeJsr303Annotations() && (node.has("minItems") || node.has("maxItems")) && isApplicableType(field)) { final Class<? extends Annotation> sizeClass = ruleFactory.getGenerationConfig().isUseJakartaValidation() ? Size.class : javax.validation.constraints.Size.class; JAnnotationUse annotation = field.annotate(sizeClass); if (node.has("minItems")) { annotation.param("min", node.get("minItems").asInt()); } if (node.has("maxItems")) { annotation.param("max", node.get("maxItems").asInt()); } } return field; }
@Test public void testNotUsed() { when(config.isIncludeJsr303Annotations()).thenReturn(true); when(node.has("minItems")).thenReturn(false); when(node.has("maxItems")).thenReturn(false); when(fieldVar.type().boxify().fullName()).thenReturn(fieldClass.getTypeName()); JFieldVar result = rule.apply("node", node, null, fieldVar, null); assertSame(fieldVar, result); verify(fieldVar, never()).annotate(sizeClass); verify(annotation, never()).param(anyString(), anyInt()); }
public static ConditionStatus create(EvaluationStatus status, String value) { requireNonNull(status, "status can not be null"); checkArgument(status != EvaluationStatus.NO_VALUE, "EvaluationStatus 'NO_VALUE' can not be used with this method, use constant ConditionStatus.NO_VALUE_STATUS instead."); requireNonNull(value, "value can not be null"); return new ConditionStatus(status, value); }
@Test public void create_throws_IAE_if_status_argument_is_NO_VALUE() { assertThatThrownBy(() -> ConditionStatus.create(NO_VALUE, SOME_VALUE)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("EvaluationStatus 'NO_VALUE' can not be used with this method, use constant ConditionStatus.NO_VALUE_STATUS instead."); }
static boolean needWrap(MethodDescriptor methodDescriptor, Class<?>[] parameterClasses, Class<?> returnClass) { String methodName = methodDescriptor.getMethodName(); // generic call must be wrapped if (CommonConstants.$INVOKE.equals(methodName) || CommonConstants.$INVOKE_ASYNC.equals(methodName)) { return true; } // echo must be wrapped if ($ECHO.equals(methodName)) { return true; } boolean returnClassProtobuf = isProtobufClass(returnClass); // Response foo() if (parameterClasses.length == 0) { return !returnClassProtobuf; } int protobufParameterCount = 0; int javaParameterCount = 0; int streamParameterCount = 0; boolean secondParameterStream = false; // count normal and protobuf param for (int i = 0; i < parameterClasses.length; i++) { Class<?> parameterClass = parameterClasses[i]; if (isProtobufClass(parameterClass)) { protobufParameterCount++; } else { if (isStreamType(parameterClass)) { if (i == 1) { secondParameterStream = true; } streamParameterCount++; } else { javaParameterCount++; } } } // more than one stream param if (streamParameterCount > 1) { throw new IllegalStateException("method params error: more than one Stream params. method=" + methodName); } // protobuf only support one param if (protobufParameterCount >= 2) { throw new IllegalStateException("method params error: more than one protobuf params. method=" + methodName); } // server stream support one normal param and one stream param if (streamParameterCount == 1) { if (javaParameterCount + protobufParameterCount > 1) { throw new IllegalStateException( "method params error: server stream does not support more than one normal param." + " method=" + methodName); } // server stream: void foo(Request, StreamObserver<Response>) if (!secondParameterStream) { throw new IllegalStateException( "method params error: server stream's second param must be StreamObserver." + " method=" + methodName); } } if (methodDescriptor.getRpcType() != MethodDescriptor.RpcType.UNARY) { if (MethodDescriptor.RpcType.SERVER_STREAM == methodDescriptor.getRpcType()) { if (!secondParameterStream) { throw new IllegalStateException( "method params error:server stream's second param must be StreamObserver." + " method=" + methodName); } } // param type must be consistent if (returnClassProtobuf) { if (javaParameterCount > 0) { throw new IllegalStateException( "method params error: both normal and protobuf param found. method=" + methodName); } } else { if (protobufParameterCount > 0) { throw new IllegalStateException("method params error method=" + methodName); } } } else { if (streamParameterCount > 0) { throw new IllegalStateException( "method params error: unary method should not contain any StreamObserver." + " method=" + methodName); } if (protobufParameterCount > 0 && returnClassProtobuf) { return false; } // handler reactor or rxjava only consider gen by proto if (isMono(returnClass) || isRx(returnClass)) { return false; } if (protobufParameterCount <= 0 && !returnClassProtobuf) { return true; } // handle grpc stub only consider gen by proto if (GRPC_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName()) && protobufParameterCount == 1) { return false; } // handle dubbo generated method if (TRI_ASYNC_RETURN_CLASS.equalsIgnoreCase(returnClass.getName())) { Class<?> actualReturnClass = (Class<?>) ((ParameterizedType) methodDescriptor.getMethod().getGenericReturnType()) .getActualTypeArguments()[0]; boolean actualReturnClassProtobuf = isProtobufClass(actualReturnClass); if (actualReturnClassProtobuf && protobufParameterCount == 1) { return false; } if (!actualReturnClassProtobuf && protobufParameterCount == 0) { return true; } } // todo remove this in future boolean ignore = checkNeedIgnore(returnClass); if (ignore) { return protobufParameterCount != 1; } throw new IllegalStateException("method params error method=" + methodName); } // java param should be wrapped return javaParameterCount > 0; }
@Test void testErrorServerStream() throws Exception { Method method = DescriptorService.class.getMethod("testErrorServerStream", StreamObserver.class, HelloReply.class); assertThrows(IllegalStateException.class, () -> { MethodDescriptor descriptor = new ReflectionMethodDescriptor(method); needWrap(descriptor); }); Method method2 = DescriptorService.class.getMethod( "testErrorServerStream2", HelloReply.class, HelloReply.class, StreamObserver.class); assertThrows(IllegalStateException.class, () -> { MethodDescriptor descriptor = new ReflectionMethodDescriptor(method2); needWrap(descriptor); }); Method method3 = DescriptorService.class.getMethod("testErrorServerStream3", String.class, StreamObserver.class); assertThrows(IllegalStateException.class, () -> { MethodDescriptor descriptor = new ReflectionMethodDescriptor(method3); needWrap(descriptor); }); Method method4 = DescriptorService.class.getMethod( "testErrorServerStream4", String.class, String.class, StreamObserver.class); assertThrows(IllegalStateException.class, () -> { MethodDescriptor descriptor = new ReflectionMethodDescriptor(method4); needWrap(descriptor); }); }
public static Set<Long> getLongSetOrNull(String property, JsonNode node) { if (!node.hasNonNull(property)) { return null; } return getLongSet(property, node); }
@Test public void getLongSetOrNull() throws JsonProcessingException { assertThat(JsonUtil.getLongSetOrNull("items", JsonUtil.mapper().readTree("{}"))).isNull(); assertThat(JsonUtil.getLongSetOrNull("items", JsonUtil.mapper().readTree("{\"items\": null}"))) .isNull(); assertThatThrownBy( () -> JsonUtil.getLongSetOrNull( "items", JsonUtil.mapper().readTree("{\"items\": [13, \"23\"]}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse long from non-long value in items: \"23\""); assertThat( JsonUtil.getLongSetOrNull("items", JsonUtil.mapper().readTree("{\"items\": [23, 45]}"))) .containsExactlyElementsOf(Arrays.asList(23L, 45L)); }
public long requireActiveSessionOf(ApplicationId applicationId) { return activeSessionOf(applicationId) .orElseThrow(() -> new IllegalArgumentException("Application '" + applicationId + "' has no active session.")); }
@Test(expected = IllegalArgumentException.class) public void require_that_requesting_session_for_empty_application_throws_exception() throws Exception { ApplicationId baz = createApplicationId("baz"); // No data in node curatorFramework.create().creatingParentsIfNeeded() .forPath(TenantRepository.getApplicationsPath(tenantName).append(baz.serializedForm()).getAbsolute()); TenantApplications repo = createZKAppRepo(); repo.requireActiveSessionOf(baz); }
public static void delete(final File file, final boolean ignoreFailures) { if (file.exists()) { if (file.isDirectory()) { final File[] files = file.listFiles(); if (null != files) { for (final File f : files) { delete(f, ignoreFailures); } } } if (!file.delete() && !ignoreFailures) { try { Files.delete(file.toPath()); } catch (final IOException ex) { LangUtil.rethrowUnchecked(ex); } } } }
@Test void deleteErrorHandlerDirectory() throws IOException { final ErrorHandler errorHandler = mock(ErrorHandler.class); final Path dir2 = tempDir.resolve("dir1").resolve("dir2"); Files.createDirectories(dir2); Files.createFile(dir2.resolve("file2.txt")); Files.createFile(dir2.getParent().resolve("file1.txt")); final File dir = dir2.getParent().toFile(); IoUtil.delete(dir, errorHandler); assertFalse(dir.exists()); assertFalse(Files.exists(dir2)); verifyNoInteractions(errorHandler); }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { //Set MIME type as grib2 metadata.set(Metadata.CONTENT_TYPE, GRIB_MIME_TYPE); //grib was not cleaning up its temp files no matter what we tried //this is a work around the creates a temp directory then copies the full input file //into that tmp directory. We then delete the directory in the finally statement. Path tmpDir = Files.createTempDirectory("tika-grib-"); try { XHTMLContentHandler xhtml; Path gribFile = Files.createTempFile(tmpDir, "tika-file", ".grib2"); Files.copy(stream, gribFile, StandardCopyOption.REPLACE_EXISTING); try (NetcdfFile ncFile = NetcdfDataset.openFile(gribFile.toString(), null)) { // first parse out the set of global attributes for (Attribute attr : ncFile.getGlobalAttributes()) { Property property = resolveMetadataKey(attr.getFullName()); if (attr.getDataType().isString()) { metadata.add(property, attr.getStringValue()); } else if (attr.getDataType().isNumeric()) { int value = attr.getNumericValue().intValue(); metadata.add(property, String.valueOf(value)); } } xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); xhtml.newline(); xhtml.startElement("ul"); xhtml.characters("dimensions:"); xhtml.newline(); for (Dimension dim : ncFile.getDimensions()) { xhtml.element("li", dim.getFullName() + "=" + String.valueOf(dim.getLength()) + ";"); xhtml.newline(); } xhtml.startElement("ul"); xhtml.characters("variables:"); xhtml.newline(); for (Variable var : ncFile.getVariables()) { xhtml.element("p", String.valueOf(var.getDataType()) + var.getNameAndDimensions() + ";"); for (Attribute element : var.getAttributes()) { xhtml.element("li", " :" + element + ";"); xhtml.newline(); } } } xhtml.endElement("ul"); xhtml.endElement("ul"); xhtml.endDocument(); } catch (IOException e) { throw new TikaException("NetCDF parse error", e); } finally { FileUtils.deleteDirectory(tmpDir.toFile()); } }
@Test public void testParseGlobalMetadata() throws Exception { Parser parser = new GribParser(); Metadata metadata = new Metadata(); ContentHandler handler = new BodyContentHandler(); try (InputStream stream = GribParser.class .getResourceAsStream("/test-documents/gdas1.forecmwf.2014062612.grib2")) { parser.parse(stream, handler, metadata, new ParseContext()); } assertNotNull(metadata); String content = handler.toString(); assertTrue(content.contains("dimensions:")); assertTrue(content.contains("variables:")); }
public void update(int value) { update((long) value); }
@Test public void updatesTheReservoir() throws Exception { histogram.update(1); verify(reservoir).update(1); }
public static List<Term> parse(Map<String, Object> map) { if (MapUtils.isEmpty(map)) { return Collections.emptyList(); } List<Term> terms = new ArrayList<>(map.size()); for (Map.Entry<String, Object> entry : map.entrySet()) { String key = entry.getKey(); Object value = entry.getValue(); boolean isOr = false; Term term = new Term(); //嵌套 if (key.startsWith("$nest") || (isOr = key.startsWith("$orNest"))) { @SuppressWarnings("all") List<Term> nest = value instanceof Map ? parse(((Map<String, Object>) value)) : parse(String.valueOf(value)); term.setTerms(nest); } //普通 else { if (key.startsWith("$or$")) { isOr = true; key = key.substring(4); } term.setColumn(key); term.setValue(value); } if (isOr) { term.setType(Term.Type.or); } terms.add(term); } return terms; }
@Test public void testUrl(){ List<Term> terms = TermExpressionParser.parse("type=email%20and%20provider=test"); assertEquals(terms.get(0).getTermType(), TermType.eq); assertEquals(terms.get(0).getColumn(), "type"); assertEquals(terms.get(0).getValue(), "email"); assertEquals(terms.get(1).getTermType(), TermType.eq); assertEquals(terms.get(1).getColumn(), "provider"); assertEquals(terms.get(1).getValue(), "test"); }
public MatchResult match(MatchResult reuse, byte[] sequence, int start, int length, int node) { if (node == 0) { reuse.reset(MatchResult.NO_MATCH, start, node); return reuse; } final FST fst = _fst; final int end = start + length; for (int i = start; i < end; i++) { final int arc = fst.getArc(node, sequence[i]); if (arc != 0) { if (i + 1 == end && fst.isArcFinal(arc)) { /* The automaton has an exact match of the input sequence. */ reuse.reset(MatchResult.EXACT_MATCH, i, node); return reuse; } if (fst.isArcTerminal(arc)) { /* The automaton contains a prefix of the input sequence. */ reuse.reset(MatchResult.AUTOMATON_HAS_PREFIX, i + 1, node); return reuse; } // Make a transition along the arc. node = fst.getEndNode(arc); } else { if (i > start) { reuse.reset(MatchResult.AUTOMATON_HAS_PREFIX, i, node); } else { reuse.reset(MatchResult.NO_MATCH, i, node); } return reuse; } } /* The sequence is a prefix of at least one sequence in the automaton. */ reuse.reset(MatchResult.SEQUENCE_IS_A_PREFIX, 0, node); return reuse; }
@Test public void testAutomatonHasPrefixBug() throws Exception { FST fst = FSTBuilder.build( Arrays.asList("a".getBytes(UTF_8), "ab".getBytes(UTF_8), "abc".getBytes(UTF_8), "ad".getBytes(UTF_8), "bcd".getBytes(UTF_8), "bce".getBytes(UTF_8)), new int[]{10, 11, 12, 13, 14, 15}); byte[] fstData = new FSTSerializerImpl().withNumbers().serialize(fst, new ByteArrayOutputStream()).toByteArray(); fst = FST.read(new ByteArrayInputStream(fstData), ImmutableFST.class, true); FSTTraversal fstTraversal = new FSTTraversal(fst); assertEquals(fstTraversal.match("a".getBytes(UTF_8))._kind, EXACT_MATCH); assertEquals(fstTraversal.match("ab".getBytes(UTF_8))._kind, EXACT_MATCH); assertEquals(fstTraversal.match("abc".getBytes(UTF_8))._kind, EXACT_MATCH); assertEquals(fstTraversal.match("ad".getBytes(UTF_8))._kind, EXACT_MATCH); assertEquals(fstTraversal.match("b".getBytes(UTF_8))._kind, SEQUENCE_IS_A_PREFIX); assertEquals(fstTraversal.match("bc".getBytes(UTF_8))._kind, SEQUENCE_IS_A_PREFIX); MatchResult m = fstTraversal.match("abcd".getBytes(UTF_8)); assertEquals(m._kind, AUTOMATON_HAS_PREFIX); assertEquals(m._index, 3); m = fstTraversal.match("ade".getBytes(UTF_8)); assertEquals(m._kind, AUTOMATON_HAS_PREFIX); assertEquals(m._index, 2); m = fstTraversal.match("ax".getBytes(UTF_8)); assertEquals(m._kind, AUTOMATON_HAS_PREFIX); assertEquals(m._index, 1); assertEquals(fstTraversal.match("d".getBytes(UTF_8))._kind, NO_MATCH); }
public String failureMessage( int epoch, OptionalLong deltaUs, boolean isActiveController, long lastCommittedOffset ) { StringBuilder bld = new StringBuilder(); if (deltaUs.isPresent()) { bld.append("event failed with "); } else { bld.append("event unable to start processing because of "); } bld.append(internalException.getClass().getSimpleName()); if (externalException.isPresent()) { bld.append(" (treated as "). append(externalException.get().getClass().getSimpleName()).append(")"); } if (causesFailover()) { bld.append(" at epoch ").append(epoch); } if (deltaUs.isPresent()) { bld.append(" in ").append(deltaUs.getAsLong()).append(" microseconds"); } if (causesFailover()) { if (isActiveController) { bld.append(". Renouncing leadership and reverting to the last committed offset "); bld.append(lastCommittedOffset); } else { bld.append(". The controller is already in standby mode"); } } bld.append("."); if (!isFault && internalException.getMessage() != null) { bld.append(" Exception message: "); bld.append(internalException.getMessage()); } return bld.toString(); }
@Test public void testInterruptedExceptionFailureMessageWhenActive() { assertEquals("event unable to start processing because of InterruptedException (treated as " + "UnknownServerException) at epoch 123. Renouncing leadership and reverting to the " + "last committed offset 456.", INTERRUPTED.failureMessage(123, OptionalLong.empty(), true, 456L)); }
public R execute(Retryable<R> retryable) throws ExecutionException { long endMs = time.milliseconds() + retryBackoffMaxMs; int currAttempt = 0; ExecutionException error = null; while (time.milliseconds() <= endMs) { currAttempt++; try { return retryable.call(); } catch (UnretryableException e) { // We've deemed this error to not be worth retrying, so collect the error and // fail immediately. if (error == null) error = new ExecutionException(e); break; } catch (ExecutionException e) { log.warn("Error during retry attempt {}", currAttempt, e); if (error == null) error = e; long waitMs = retryBackoffMs * (long) Math.pow(2, currAttempt - 1); long diff = endMs - time.milliseconds(); waitMs = Math.min(waitMs, diff); if (waitMs <= 0) break; String message = String.format("Attempt %d to make call resulted in an error; sleeping %d ms before retrying", currAttempt, waitMs); log.warn(message, e); time.sleep(waitMs); } } if (error == null) // Really shouldn't ever get to here, but... error = new ExecutionException(new IllegalStateException("Exhausted all retry attempts but no attempt returned value or encountered exception")); throw error; }
@Test public void test() throws ExecutionException { Exception[] attempts = new Exception[] { new IOException("pretend connect error"), new IOException("pretend timeout error"), new IOException("pretend read error"), null // success! }; long retryWaitMs = 1000; long maxWaitMs = 10000; Retryable<String> call = createRetryable(attempts); Time time = new MockTime(0, 0, 0); assertEquals(0L, time.milliseconds()); Retry<String> r = new Retry<>(time, retryWaitMs, maxWaitMs); r.execute(call); long secondWait = retryWaitMs * 2; long thirdWait = retryWaitMs * 4; long totalWait = retryWaitMs + secondWait + thirdWait; assertEquals(totalWait, time.milliseconds()); }
public static boolean safeSleep(Number millis) { if (millis == null) { return true; } return safeSleep(millis.longValue()); }
@Test public void safeSleepTest() { final long sleepMillis = RandomUtil.randomLong(1, 1000); // 随机sleep时长,确保sleep时间足够 final long l = System.currentTimeMillis(); ThreadUtil.safeSleep(sleepMillis); assertTrue(System.currentTimeMillis() - l >= sleepMillis); }
@Override public List<AdminUserDO> getUserListByDeptIds(Collection<Long> deptIds) { if (CollUtil.isEmpty(deptIds)) { return Collections.emptyList(); } return userMapper.selectListByDeptIds(deptIds); }
@Test public void testGetUserListByDeptIds() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(o -> o.setDeptId(1L)); userMapper.insert(dbUser); // 测试 deptId 不匹配 userMapper.insert(cloneIgnoreId(dbUser, o -> o.setDeptId(2L))); // 准备参数 Collection<Long> deptIds = singleton(1L); // 调用 List<AdminUserDO> list = userService.getUserListByDeptIds(deptIds); // 断言 assertEquals(1, list.size()); assertEquals(dbUser, list.get(0)); }
@Override public Acl getPermission(final Path file) throws BackgroundException { try { if(file.getType().contains(Path.Type.upload)) { // Incomplete multipart upload has no ACL set return Acl.EMPTY; } final Path bucket = containerService.getContainer(file); final Acl acl; if(containerService.isContainer(file)) { // This method can be performed by anonymous services, but can only succeed if the // bucket's existing ACL already allows write access by the anonymous user. // In general, you can only access the ACL of a bucket if the ACL already in place // for that bucket (in S3) allows you to do so. acl = this.toAcl(session.getClient().getBucketAcl(bucket.isRoot() ? StringUtils.EMPTY : bucket.getName())); } else { acl = this.toAcl(session.getClient().getVersionedObjectAcl(file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } if(this.isBucketOwnerEnforced(bucket)) { acl.setEditable(false); } return acl; } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(file.isDirectory()) { if(failure instanceof NotfoundException) { // No placeholder file may exist, but we just have a common prefix return Acl.EMPTY; } } if(failure instanceof InteroperabilityException) { // The specified method is not allowed against this resource. The case for delete markers in versioned buckets. return Acl.EMPTY; } throw failure; } }
@Test public void testReadWithDelimiter() throws Exception { final Path container = new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new S3TouchFeature(session, new S3AccessControlListFeature(session)).touch(new Path(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory)), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); final S3AccessControlListFeature f = new S3AccessControlListFeature(session); assertNotNull(f.getPermission(test)); new S3DefaultDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public boolean supportsGroupByBeyondSelect() { return false; }
@Test void assertSupportsGroupByBeyondSelect() { assertFalse(metaData.supportsGroupByBeyondSelect()); }
@Override public void registerInstance(Service service, Instance instance, String clientId) throws NacosException { NamingUtils.checkInstanceIsLegal(instance); Service singleton = ServiceManager.getInstance().getSingleton(service); if (!singleton.isEphemeral()) { throw new NacosRuntimeException(NacosException.INVALID_PARAM, String.format("Current service %s is persistent service, can't register ephemeral instance.", singleton.getGroupedServiceName())); } Client client = clientManager.getClient(clientId); checkClientIsLegal(client, clientId); InstancePublishInfo instanceInfo = getPublishInfo(instance); client.addServiceInstance(singleton, instanceInfo); client.setLastUpdatedTime(); client.recalculateRevision(); NotifyCenter.publishEvent(new ClientOperationEvent.ClientRegisterServiceEvent(singleton, clientId)); NotifyCenter .publishEvent(new MetadataEvent.InstanceMetadataEvent(singleton, instanceInfo.getMetadataId(), false)); }
@Test void testDeRegisterWhenClientNull() throws NacosException { assertThrows(NacosRuntimeException.class, () -> { // Test register instance ephemeralClientOperationServiceImpl.registerInstance(service, instance, ipPortBasedClientId); when(clientManager.getClient(anyString())).thenReturn(null); // Excepted exception ephemeralClientOperationServiceImpl.registerInstance(service, instance, ipPortBasedClientId); }); }
@Override public void write(final OutputStream out) { // CHECKSTYLE_RULES.ON: CyclomaticComplexity try { out.write("[".getBytes(StandardCharsets.UTF_8)); write(out, buildHeader()); final BlockingRowQueue rowQueue = queryMetadata.getRowQueue(); while (!connectionClosed && queryMetadata.isRunning() && !limitReached && !complete) { final KeyValueMetadata<List<?>, GenericRow> row = rowQueue.poll( disconnectCheckInterval, TimeUnit.MILLISECONDS ); if (row != null) { write(out, buildRow(row)); } else { // If no new rows have been written, the user may have terminated the connection without // us knowing. Check by trying to write a single newline. out.write("\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } drainAndThrowOnError(out); } if (connectionClosed) { return; } drain(out); if (limitReached) { objectMapper.writeValue(out, StreamedRow.finalMessage("Limit Reached")); } else if (complete) { objectMapper.writeValue(out, StreamedRow.finalMessage("Query Completed")); } out.write("]\n".getBytes(StandardCharsets.UTF_8)); out.flush(); } catch (final EOFException exception) { // The user has terminated the connection; we can stop writing log.warn("Query terminated due to exception:" + exception.toString()); } catch (final InterruptedException exception) { // The most likely cause of this is the server shutting down. Should just try to close // gracefully, without writing any more to the connection stream. log.warn("Interrupted while writing to connection stream"); } catch (final Exception exception) { log.error("Exception occurred while writing to connection stream: ", exception); outputException(out, exception); } finally { close(); } }
@Test public void shouldExitAndDrainIfQueryStopsRunning() { // Given: when(queryMetadata.isRunning()).thenReturn(false); doAnswer(streamRows("Row1", "Row2", "Row3")) .when(rowQueue).drainTo(any()); createWriter(); // When: writer.write(out); // Then: final List<String> lines = getOutput(out); assertThat(lines, is(Arrays.asList( "[{\"header\":{\"queryId\":\"id\",\"schema\":\"`col1` STRING\"}},", "{\"row\":{\"columns\":[\"Row1\"]}},", "{\"row\":{\"columns\":[\"Row2\"]}},", "{\"row\":{\"columns\":[\"Row3\"]}},", "]" ))); }
@Override protected boolean doSetValue(String value) { if (! value.equalsIgnoreCase("false") && ! value.equalsIgnoreCase("true")) { return false; } this.value = Boolean.valueOf(value); return true; }
@Test void testSetValue() { BooleanNode n = new BooleanNode(); assertTrue(n.doSetValue("true")); assertTrue(n.doSetValue("TRUE")); assertTrue(n.doSetValue("false")); assertTrue(n.doSetValue("FALSE")); assertFalse(n.doSetValue("FALSEa")); assertFalse(n.doSetValue("aFALSE")); }
public void close() throws Exception { if (!isRunning.compareAndSet(true, false)) { log.warn("Coordinator runtime is already shutting down."); return; } log.info("Closing coordinator runtime."); Utils.closeQuietly(loader, "loader"); Utils.closeQuietly(timer, "timer"); // This close the processor, drain all the pending events and // reject any new events. Utils.closeQuietly(processor, "event processor"); // Unload all the coordinators. coordinators.forEach((tp, context) -> { context.lock.lock(); try { context.transitionTo(CoordinatorState.CLOSED); } finally { context.lock.unlock(); } }); coordinators.clear(); Utils.closeQuietly(runtimeMetrics, "runtime metrics"); log.info("Coordinator runtime closed."); }
@Test public void testClose() throws Exception { MockCoordinatorLoader loader = spy(new MockCoordinatorLoader()); MockTimer timer = new MockTimer(); CoordinatorRuntime<MockCoordinatorShard, String> runtime = new CoordinatorRuntime.Builder<MockCoordinatorShard, String>() .withTime(timer.time()) .withTimer(timer) .withLoader(loader) .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) .withEventProcessor(new DirectEventProcessor()) .withPartitionWriter(new MockPartitionWriter()) .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) .withSerializer(new StringSerializer()) .build(); // Loads the coordinator. runtime.scheduleLoadOperation(TP, 10); // Check initial state. CoordinatorRuntime<MockCoordinatorShard, String>.CoordinatorContext ctx = runtime.contextOrThrow(TP); assertEquals(0, ctx.coordinator.lastWrittenOffset()); assertEquals(0, ctx.coordinator.lastCommittedOffset()); // Write #1. CompletableFuture<String> write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, state -> new CoordinatorResult<>(Arrays.asList("record1", "record2"), "response1")); // Write #2. CompletableFuture<String> write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, state -> new CoordinatorResult<>(Arrays.asList("record3", "record4"), "response2")); // Writes are inflight. assertFalse(write1.isDone()); assertFalse(write2.isDone()); // The coordinator timer should be empty. assertEquals(0, ctx.timer.size()); // Timer #1. This is never executed. ctx.timer.schedule("timer-1", 10, TimeUnit.SECONDS, true, () -> new CoordinatorResult<>(Arrays.asList("record5", "record6"), null)); // The coordinator timer should have one pending task. assertEquals(1, ctx.timer.size()); // Close the runtime. runtime.close(); // All the pending operations are completed with NotCoordinatorException. assertFutureThrows(write1, NotCoordinatorException.class); assertFutureThrows(write2, NotCoordinatorException.class); // Verify that the loader was closed. verify(loader).close(); // The coordinator timer should be empty. assertEquals(0, ctx.timer.size()); }
public void validateUserCanReceivePushEventForProjectUuids(String userUuid, Set<String> projectUuids) { UserDto userDto; try (DbSession dbSession = dbClient.openSession(false)) { userDto = dbClient.userDao().selectByUuid(dbSession, userUuid); } if (userDto == null) { throw new ForbiddenException("User does not exist"); } UserSession userSession = userSessionFactory.create(userDto, false); List<ProjectDto> projectDtos; try (DbSession dbSession = dbClient.openSession(false)) { projectDtos = dbClient.projectDao().selectByUuids(dbSession, projectUuids); } validateProjectPermissions(userSession, projectDtos); }
@Test public void validate_givenUserActivatedAndWithRequiredPermissions_dontThrowException() { UserDto userDto = new UserDto(); when(userDao.selectByUuid(any(), any())).thenReturn(userDto); when(userSession.isActive()).thenReturn(true); assertThatCode(() -> underTest.validateUserCanReceivePushEventForProjectUuids(USER_UUID, exampleProjectuuids)) .doesNotThrowAnyException(); }
@Override public Optional<WorkItem> getWorkItem() throws IOException { List<String> workItemTypes = ImmutableList.of( WORK_ITEM_TYPE_MAP_TASK, WORK_ITEM_TYPE_SEQ_MAP_TASK, WORK_ITEM_TYPE_REMOTE_SOURCE_TASK); // All remote sources require the "remote_source" capability. Dataflow's // custom sources are further tagged with the format "custom_source". List<String> capabilities = new ArrayList<String>( Arrays.asList( options.getWorkerId(), CAPABILITY_REMOTE_SOURCE, PropertyNames.CUSTOM_SOURCE_FORMAT)); if (options.getWorkerPool() != null) { capabilities.add(options.getWorkerPool()); } Optional<WorkItem> workItem = getWorkItemInternal(workItemTypes, capabilities); if (!workItem.isPresent()) { // Normal case, this means that the response contained no work, i.e. no work is available // at this time. return Optional.empty(); } if (workItem.get().getId() == null) { logger.debug("Discarding invalid work item {}", workItem.get()); return Optional.empty(); } WorkItem work = workItem.get(); final String stage; if (work.getMapTask() != null) { stage = work.getMapTask().getStageName(); logger.info("Starting MapTask stage {}", stage); } else if (work.getSeqMapTask() != null) { stage = work.getSeqMapTask().getStageName(); logger.info("Starting SeqMapTask stage {}", stage); } else if (work.getSourceOperationTask() != null) { stage = work.getSourceOperationTask().getStageName(); logger.info("Starting SourceOperationTask stage {}", stage); } else { stage = null; } DataflowWorkerLoggingMDC.setStageName(stage); stageStartTime.set(DateTime.now()); DataflowWorkerLoggingMDC.setWorkId(Long.toString(work.getId())); return workItem; }
@Test public void testCloudServiceCallSeqMapTaskStagePropagation() throws Exception { // Publish and acquire a seq map task work item, and verify we're now processing that stage. final String stageName = "test_stage_name"; SeqMapTask seqMapTask = new SeqMapTask(); seqMapTask.setStageName(stageName); WorkItem workItem = createWorkItem(PROJECT_ID, JOB_ID); workItem.setSeqMapTask(seqMapTask); MockLowLevelHttpResponse response = generateMockResponse(workItem); MockLowLevelHttpRequest request = new MockLowLevelHttpRequest().setResponse(response); MockHttpTransport transport = new MockHttpTransport.Builder().setLowLevelHttpRequest(request).build(); DataflowWorkerHarnessOptions pipelineOptions = createPipelineOptionsWithTransport(transport); WorkUnitClient client = new DataflowWorkUnitClient(pipelineOptions, LOG); assertEquals(Optional.of(workItem), client.getWorkItem()); assertEquals(stageName, DataflowWorkerLoggingMDC.getStageName()); }
static void validateDataAndHeader(HttpRequest request, List<String> supportedContentTypes) { if (request.getData() == null) throw new BadRequestException("Request contains no data"); String header = request.getHeader(ApplicationApiHandler.contentTypeHeader); if (header == null) throw new BadRequestException("Request contains no " + ApplicationApiHandler.contentTypeHeader + " header"); ContentType contentType = ContentType.parse(header); if ( ! supportedContentTypes.contains(contentType.getMimeType())) throw new BadRequestException("Request contains invalid " + ApplicationApiHandler.contentTypeHeader + " header (" + contentType.getMimeType() + "), only '[" + String.join(", ", supportedContentTypes) + "]' are supported"); }
@Test public void require_that_content_type_is_parsed_correctly() throws FileNotFoundException { HttpRequest request = post(new ByteArrayInputStream("foo".getBytes(StandardCharsets.UTF_8)), Map.of("Content-Type", "multipart/form-data; charset=ISO-8859-1; boundary=g5gJAzUWl_t6"), Map.of()); // Valid header should validate ok SessionCreateHandler.validateDataAndHeader(request, List.of(ContentType.MULTIPART_FORM_DATA.getMimeType())); // Accepting only application/json should fail: try { SessionCreateHandler.validateDataAndHeader(request, List.of(ContentType.APPLICATION_JSON.getMimeType())); fail("Request contained invalid content type, but validated ok"); } catch (Exception expected) {} }
public static Map<String, AggregationFunctionType> getAggregationTypes(Map<String, String> taskConfig) { Map<String, AggregationFunctionType> aggregationTypes = new HashMap<>(); for (Map.Entry<String, String> entry : taskConfig.entrySet()) { String key = entry.getKey(); if (key.endsWith(MergeTask.AGGREGATION_TYPE_KEY_SUFFIX)) { String column = key.substring(0, key.length() - AGGREGATION_TYPE_KEY_SUFFIX_LENGTH); aggregationTypes.put(column, AggregationFunctionType.getAggregationFunctionType(entry.getValue())); } } return aggregationTypes; }
@Test public void testGetAggregationTypes() { Map<String, String> taskConfig = new HashMap<>(); taskConfig.put("colA.aggregationType", "sum"); taskConfig.put("colB.aggregationType", "Min"); taskConfig.put("colC.aggregationType", "MaX"); Map<String, AggregationFunctionType> aggregationTypes = MergeTaskUtils.getAggregationTypes(taskConfig); assertEquals(aggregationTypes.size(), 3); assertEquals(aggregationTypes.get("colA"), AggregationFunctionType.SUM); assertEquals(aggregationTypes.get("colB"), AggregationFunctionType.MIN); assertEquals(aggregationTypes.get("colC"), AggregationFunctionType.MAX); taskConfig.put("colD.aggregationType", "unsupported"); try { MergeTaskUtils.getAggregationTypes(taskConfig); fail(); } catch (IllegalArgumentException e) { // Expected } }
@Override public List<QualityProfile> load(String projectKey) { StringBuilder url = new StringBuilder(WS_URL + "?project=").append(encodeForUrl(projectKey)); return handleErrors(url, () -> String.format("Failed to load the quality profiles of project '%s'", projectKey), true); }
@Test public void load_gets_all_profiles_for_specified_project() throws IOException { prepareCallWithResults(); underTest.load("foo"); verifyCalledPath("/api/qualityprofiles/search.protobuf?project=foo"); }
public Optional<Search> getForUser(String id, SearchUser searchUser) { final Optional<Search> search = dbService.get(id); search.ifPresent(s -> checkPermission(searchUser, s)); return search; }
@Test public void loadsSearchIfSearchIsPermittedViaViews() { final Search search = mockSearchWithOwner("someone else"); final SearchUser searchUser = mock(SearchUser.class); final ViewDTO viewDTO = mock(ViewDTO.class); when(viewService.forSearch(anyString())).thenReturn(ImmutableList.of(viewDTO)); when(searchUser.canReadView(viewDTO)).thenReturn(true); final Optional<Search> result = sut.getForUser(search.id(), searchUser); assertThat(result).isEqualTo(Optional.of(search)); }
static void maybeReportHybridDiscoveryIssue(PluginDiscoveryMode discoveryMode, PluginScanResult serviceLoadingScanResult, PluginScanResult mergedResult) { SortedSet<PluginDesc<?>> missingPlugins = new TreeSet<>(); mergedResult.forEach(missingPlugins::add); serviceLoadingScanResult.forEach(missingPlugins::remove); if (missingPlugins.isEmpty()) { if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN || discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) { log.warn("All plugins have ServiceLoader manifests, consider reconfiguring {}={}", WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD); } } else { String message = String.format( "One or more plugins are missing ServiceLoader manifests may not be usable with %s=%s: %s%n" + "Read the documentation at %s for instructions on migrating your plugins " + "to take advantage of the performance improvements of %s mode.", WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.SERVICE_LOAD, missingPlugins.stream() .map(pluginDesc -> pluginDesc.location() + "\t" + pluginDesc.className() + "\t" + pluginDesc.type() + "\t" + pluginDesc.version()) .collect(Collectors.joining("\n", "[\n", "\n]")), "https://kafka.apache.org/documentation.html#connect_plugindiscovery", PluginDiscoveryMode.SERVICE_LOAD ); if (discoveryMode == PluginDiscoveryMode.HYBRID_WARN) { log.warn("{} To silence this warning, set {}={} in the worker config.", message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.ONLY_SCAN); } else if (discoveryMode == PluginDiscoveryMode.HYBRID_FAIL) { throw new ConnectException(String.format("%s To silence this error, set %s=%s in the worker config.", message, WorkerConfig.PLUGIN_DISCOVERY_CONFIG, PluginDiscoveryMode.HYBRID_WARN)); } } }
@Test public void testHybridWarnMissingPlugins() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Plugins.class)) { Plugins.maybeReportHybridDiscoveryIssue(PluginDiscoveryMode.HYBRID_WARN, empty, nonEmpty); assertTrue(logCaptureAppender.getEvents().stream().anyMatch(e -> e.getLevel().equals("WARN") && e.getMessage().contains(missingPluginClass) && e.getMessage().contains(WorkerConfig.PLUGIN_DISCOVERY_CONFIG) )); } }
@Override @PublicAPI(usage = ACCESS) public String getName() { return descriptor.getFullyQualifiedClassName(); }
@Test public void predicate_simpleNameStartingWith() { JavaClass input = importClassWithContext(Parent.class); assertThat(simpleNameStartingWith("P")).accepts(input); assertThat(simpleNameStartingWith("Pa")).accepts(input); assertThat(simpleNameStartingWith("PA")).rejects(input); assertThat(simpleNameStartingWith(".P")).rejects(input); assertThat(simpleNameStartingWith("")).accepts(input); assertThat(simpleNameStartingWith("wrong")).rejects(input); // Full match test assertThat(simpleNameStartingWith(input.getName())).rejects(input); assertThat(simpleNameStartingWith(input.getName().substring(0, 2))).rejects(input); assertThat(simpleNameStartingWith("Prefix")).hasDescription("simple name starting with 'Prefix'"); }
public LoggerContext apply(LogLevelConfig logLevelConfig, Props props) { if (!ROOT_LOGGER_NAME.equals(logLevelConfig.getRootLoggerName())) { throw new IllegalArgumentException("Value of LogLevelConfig#rootLoggerName must be \"" + ROOT_LOGGER_NAME + "\""); } LoggerContext rootContext = getRootContext(); logLevelConfig.getConfiguredByProperties().forEach((key, value) -> applyLevelByProperty(props, rootContext.getLogger(key), value)); logLevelConfig.getConfiguredByHardcodedLevel().forEach((key, value) -> applyHardcodedLevel(rootContext, key, value)); Level propertyValueAsLevel = getPropertyValueAsLevel(props, LOG_LEVEL.getKey()); boolean traceGloballyEnabled = propertyValueAsLevel == Level.TRACE; logLevelConfig.getOffUnlessTrace().forEach(logger -> applyHardUnlessTrace(rootContext, logger, traceGloballyEnabled)); return rootContext; }
@Test public void apply_set_level_to_OFF_if_sonar_global_level_is_not_set() { LoggerContext context = underTest.apply(newLogLevelConfig().offUnlessTrace("fii").build(), new Props(new Properties())); assertThat(context.getLogger("fii").getLevel()).isEqualTo(Level.OFF); }
@Udf(description = "Converts a TIMESTAMP value from one timezone to another") public Timestamp convertTz( @UdfParameter( description = "The TIMESTAMP value.") final Timestamp timestamp, @UdfParameter( description = "The fromTimeZone in java.util.TimeZone ID format. For example: \"UTC\"," + " \"America/Los_Angeles\", \"PST\", \"Europe/London\"") final String fromTimeZone, @UdfParameter( description = "The toTimeZone in java.util.TimeZone ID format. For example: \"UTC\"," + " \"America/Los_Angeles\", \"PST\", \"Europe/London\"") final String toTimeZone ) { if (timestamp == null || fromTimeZone == null || toTimeZone == null) { return null; } try { final long offset = TimeZone.getTimeZone(ZoneId.of(toTimeZone)).getOffset(timestamp.getTime()) - TimeZone.getTimeZone(ZoneId.of(fromTimeZone)).getOffset(timestamp.getTime()); return new Timestamp(timestamp.getTime() + offset); } catch (DateTimeException e) { throw new KsqlFunctionException("Invalid time zone: " + e.getMessage()); } }
@Test public void shouldThrowOnInvalidTimezone() { // When: final KsqlFunctionException e = assertThrows( KsqlFunctionException.class, () -> udf.convertTz(Timestamp.valueOf("2000-01-01 00:00:00"), "wow", "amazing") ); // Then: assertThat(e.getMessage(), containsString("Invalid time zone")); }
public static PredicateTreeAnnotations createPredicateTreeAnnotations(Predicate predicate) { PredicateTreeAnalyzerResult analyzerResult = PredicateTreeAnalyzer.analyzePredicateTree(predicate); // The tree size is used as the interval range. int intervalEnd = analyzerResult.treeSize; AnnotatorContext context = new AnnotatorContext(intervalEnd, analyzerResult.sizeMap); assignIntervalLabels(predicate, Interval.INTERVAL_BEGIN, intervalEnd, false, context); return new PredicateTreeAnnotations( analyzerResult.minFeature, intervalEnd, context.intervals, context.intervalsWithBounds, context.featureConjunctions); }
@Test void require_that_extreme_ranges_works() { Predicate p = and( range("max range", partition("max range=9223372036854775806-9223372036854775807")), range("max edge", edgePartition("max edge=9223372036854775807", 0, 0, 1)), range("min range", partition("min range=-9223372036854775807-9223372036854775806")), range("min edge", edgePartition("min edge=-9223372036854775808", 0, 0, 1))); PredicateTreeAnnotations r = PredicateTreeAnnotator.createPredicateTreeAnnotations(p); assertEquals(4, r.minFeature); assertEquals(4, r.intervalEnd); assertEquals(2, r.intervalMap.size()); assertEquals(2, r.boundsMap.size()); assertIntervalContains(r, "max range=9223372036854775806-9223372036854775807", 0x00010001); assertBoundsContains(r, "max edge=9223372036854775807", bound(0x00020002, 0x40000002)); assertIntervalContains(r, "min range=-9223372036854775807-9223372036854775806", 0x00030003); assertBoundsContains(r, "min edge=-9223372036854775808", bound(0x00040004, 0x40000002)); }
public CreateTableCommand createTableCommand( final KsqlStructuredDataOutputNode outputNode, final Optional<RefinementInfo> emitStrategy ) { Optional<WindowInfo> windowInfo = outputNode.getKsqlTopic().getKeyFormat().getWindowInfo(); if (windowInfo.isPresent() && emitStrategy.isPresent()) { final WindowInfo info = windowInfo.get(); windowInfo = Optional.of(WindowInfo.of( info.getType(), info.getSize(), Optional.of(emitStrategy.get().getOutputRefinement()) )); } return new CreateTableCommand( outputNode.getSinkName().get(), outputNode.getSchema(), outputNode.getTimestampColumn(), outputNode.getKsqlTopic().getKafkaTopicName(), Formats.from(outputNode.getKsqlTopic()), windowInfo, Optional.of(outputNode.getOrReplace()), Optional.of(false) ); }
@Test public void shouldThrowInCreateStreamOrReplaceOnSourceTables() { // Given: final SourceName existingTableName = SourceName.of("existingTableName"); final KsqlTable existingTable = mock(KsqlTable.class); when(existingTable.getDataSourceType()).thenReturn(DataSourceType.KTABLE); when(existingTable.isSource()).thenReturn(true); when(metaStore.getSource(existingTableName)).thenReturn(existingTable); final CreateTable ddlStatement = new CreateTable(existingTableName, TableElements.of( tableElement("COL1", new Type(BIGINT), PRIMARY_KEY_CONSTRAINT), tableElement("COL2", new Type(SqlTypes.STRING))), true, false, withProperties, false); // When: final Exception e = assertThrows( KsqlException.class, () -> createSourceFactory .createTableCommand(ddlStatement, ksqlConfig)); // Then: assertThat(e.getMessage(), containsString( "Cannot add table 'existingTableName': CREATE OR REPLACE is not supported on " + "source tables.")); }
public static String getLocationPath(Class<?> clazz) { final URL location = getLocation(clazz); if (null == location) { return null; } return location.getPath(); }
@Test public void getLocationPathTest(){ final String classDir = ClassUtil.getLocationPath(ClassUtilTest.class); assertTrue(Objects.requireNonNull(classDir).endsWith("/hutool-core/target/test-classes/")); }