focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public <KeyT> void onTimer( String timerId, String timerFamilyId, KeyT key, BoundedWindow window, Instant timestamp, Instant outputTimestamp, TimeDomain timeDomain) { underlying.onTimer(timerId, timerFamilyId, key, window, timestamp, outputTimestamp, timeDomain); }
@Test public void testOnTimerCalled() { PushbackSideInputDoFnRunner<Integer, Integer> runner = createRunner(ImmutableList.of()); String timerId = "fooTimer"; IntervalWindow window = new IntervalWindow(new Instant(4), new Instant(16)); Instant timestamp = new Instant(72); // Mocking is not easily compatible with annotation analysis, so we manually record // the method call. runner.onTimer(timerId, "", null, window, timestamp, timestamp, TimeDomain.EVENT_TIME); assertThat( underlying.firedTimers, contains( TimerData.of( timerId, StateNamespaces.window(IntervalWindow.getCoder(), window), timestamp, timestamp, TimeDomain.EVENT_TIME))); }
@Override public void postCommit(final boolean enforceCheckpoint) { switch (state()) { case CREATED: // We should never write a checkpoint for a CREATED task as we may overwrite an existing checkpoint // with empty uninitialized offsets log.debug("Skipped writing checkpoint for {} task", state()); break; case RESTORING: case SUSPENDED: maybeCheckpoint(enforceCheckpoint); log.debug("Finalized commit for {} task with enforce checkpoint {}", state(), enforceCheckpoint); break; case RUNNING: if (enforceCheckpoint || !eosEnabled) { maybeCheckpoint(enforceCheckpoint); } log.debug("Finalized commit for {} task with eos {} enforce checkpoint {}", state(), eosEnabled, enforceCheckpoint); break; case CLOSED: throw new IllegalStateException("Illegal state " + state() + " while post committing active task " + id); default: throw new IllegalStateException("Unknown state " + state() + " while post committing active task " + id); } clearCommitStatuses(); }
@Test public void shouldThrowIfPostCommittingOnIllegalState() { when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); task = createStatelessTask(createConfig("100")); task.transitionTo(SUSPENDED); task.transitionTo(Task.State.CLOSED); assertThrows(IllegalStateException.class, () -> task.postCommit(true)); }
@Override public KubevirtIpPool ipPool() { return ipPool; }
@Test public void testIpInitialization() { KubevirtIpPool ipPool1 = network1.ipPool(); assertEquals(101, ipPool1.availableIps().size()); assertEquals(0, ipPool1.allocatedIps().size()); }
public static long between(Date beginDate, Date endDate, DateUnit unit) { return between(beginDate, endDate, unit, true); }
@Test public void betweenTest() { final String dateStr1 = "2017-03-01 22:34:23"; final Date date1 = DateUtil.parse(dateStr1); final String dateStr2 = "2017-04-01 23:56:14"; final Date date2 = DateUtil.parse(dateStr2); // 相差月 long betweenMonth = DateUtil.betweenMonth(date1, date2, false); assertEquals(1, betweenMonth);// 相差一个月 // 反向 betweenMonth = DateUtil.betweenMonth(date2, date1, false); assertEquals(1, betweenMonth);// 相差一个月 // 相差天 long betweenDay = DateUtil.between(date1, date2, DateUnit.DAY); assertEquals(31, betweenDay);// 相差一个月,31天 // 反向 betweenDay = DateUtil.between(date2, date1, DateUnit.DAY); assertEquals(31, betweenDay);// 相差一个月,31天 // 相差小时 long betweenHour = DateUtil.between(date1, date2, DateUnit.HOUR); assertEquals(745, betweenHour); // 反向 betweenHour = DateUtil.between(date2, date1, DateUnit.HOUR); assertEquals(745, betweenHour); // 相差分 long betweenMinute = DateUtil.between(date1, date2, DateUnit.MINUTE); assertEquals(44721, betweenMinute); // 反向 betweenMinute = DateUtil.between(date2, date1, DateUnit.MINUTE); assertEquals(44721, betweenMinute); // 相差秒 long betweenSecond = DateUtil.between(date1, date2, DateUnit.SECOND); assertEquals(2683311, betweenSecond); // 反向 betweenSecond = DateUtil.between(date2, date1, DateUnit.SECOND); assertEquals(2683311, betweenSecond); // 相差秒 long betweenMS = DateUtil.between(date1, date2, DateUnit.MS); assertEquals(2683311000L, betweenMS); // 反向 betweenMS = DateUtil.between(date2, date1, DateUnit.MS); assertEquals(2683311000L, betweenMS); }
@Override public Object deserialize(Asn1ObjectInputStream in, Class<? extends Object> type, Asn1ObjectMapper mapper) { final Asn1Entity entity = type.getAnnotation(Asn1Entity.class); final Fields fields = new FieldSet(entity.partial(), mapper.getFields(type)); return readFields(mapper, in, fields, ObjectUtils.newInstance(type)); }
@Test public void shouldDeserializeDifferentOrder() { assertEquals(new Set(1, 2), deserialize( new SetConverter(), Set.class, new byte[] { (byte) 0x82, 1, 0x02, (byte) 0x81, 1, 0x01 } )); }
public static String displayTableFromSimpleMap(String keyName, String valueName, Map<?, ?> rows){ String table = "%table\n"; table += keyName + "\t" + valueName + "\n"; table += rows.entrySet().stream() .map(e -> e.getKey() + "\t" + e.getValue()) .collect(Collectors.joining("\n")); return table; }
@Test void testDisplayTableFromSimpleMapUtil() { Map<String, Long> counts = new HashMap<>(); counts.put("hello", 4L); counts.put("world", 5L); assertEquals( TABLE_RESULT_1, JavaInterpreterUtils.displayTableFromSimpleMap("Word", "Count", counts) ); }
@Override public BackgroundException map(final IOException e) { if(ExceptionUtils.getRootCause(e) != e && ExceptionUtils.getRootCause(e) instanceof SSHException) { return this.map((SSHException) ExceptionUtils.getRootCause(e)); } final StringBuilder buffer = new StringBuilder(); this.append(buffer, e.getMessage()); if(ExceptionUtils.getRootCause(e) != e) { if(!StringUtils.equals(e.getMessage(), ExceptionUtils.getRootCause(e).getMessage())) { this.append(buffer, ExceptionUtils.getRootCause(e).getMessage()); } } if(e instanceof SFTPException) { final SFTPException failure = (SFTPException) e; final Response.StatusCode code = failure.getStatusCode(); switch(code) { case FILE_ALREADY_EXISTS: return new ConflictException(buffer.toString(),e); case NO_SUCH_FILE: case NO_SUCH_PATH: case INVALID_HANDLE: return new NotfoundException(buffer.toString(), e); case PERMISSION_DENIED: case WRITE_PROTECT: case CANNOT_DELETE: return new AccessDeniedException(buffer.toString(), e); case NO_CONNECTION: case CONNECITON_LOST: return new ConnectionRefusedException(buffer.toString(), e); case NO_MEDIA: break; case NO_SPACE_ON_FILESYSTEM: case QUOTA_EXCEEDED: return new QuotaException(buffer.toString(), e); case LOCK_CONFLICT: return new LockedException(buffer.toString(), e); default: return new InteroperabilityException(buffer.toString(), e); } } if(e instanceof UserAuthException) { return new LoginFailureException(buffer.toString(), e); } if(e instanceof ConnectionException) { return new ConnectionRefusedException(buffer.toString(), e); } if(e instanceof Buffer.BufferException) { return new InteroperabilityException(buffer.toString(), e); } if(e instanceof SSHException) { final SSHException failure = (SSHException) e; final DisconnectReason reason = failure.getDisconnectReason(); return this.map(e, buffer, reason); } return this.wrap(e, buffer); }
@Test public void testMapReadFailure() { assertEquals(SocketException.class, new SFTPExceptionMappingService().map(new SocketException("Unexpected end of sftp stream.")).getCause().getClass()); }
@Override public void setBottomOffset(int offset) { // the extra padding is a child at the end of the list var lp = mBottomGap.getLayoutParams(); lp.height = offset; mBottomGap.setLayoutParams(lp); }
@Test public void testSetPadding() { var underTest = buildViewUnderTest(); var spacer = underTest.findViewById(R.id.bottom_gap_view); Assert.assertEquals(0, spacer.getHeight()); underTest.setBottomOffset(11); Assert.assertEquals(11, spacer.getLayoutParams().height); }
@ApiOperation( value = "Export messages as CSV", notes = "Use this endpoint, if you want to configure export parameters freely instead of relying on an existing Search" ) @POST @Produces(MoreMediaTypes.TEXT_CSV) @NoAuditEvent("Has custom audit events") public ChunkedOutput<SimpleMessageChunk> retrieve(@ApiParam @Valid MessagesRequest rawrequest, @Context SearchUser searchUser) { final MessagesRequest request = fillInIfNecessary(rawrequest, searchUser); final ValidationRequest.Builder validationReq = ValidationRequest.builder(); Optional.ofNullable(rawrequest.queryString()).ifPresent(validationReq::query); Optional.ofNullable(rawrequest.timeRange()).ifPresent(validationReq::timerange); Optional.ofNullable(rawrequest.streams()).ifPresent(validationReq::streams); final ValidationResponse validationResponse = queryValidationService.validate(validationReq.build()); if (validationResponse.status().equals(ValidationStatus.ERROR)) { validationResponse.explanations().stream().findFirst().map(ValidationMessage::errorMessage).ifPresent(message -> { throw new BadRequestException("Request validation failed: " + message); }); } executionGuard.checkUserIsPermittedToSeeStreams(request.streams(), searchUser::canReadStream); ExportMessagesCommand command = commandFactory.buildFromRequest(request); return asyncRunner.apply(chunkConsumer -> exporter().export(command, chunkConsumer)); }
@Test void appliesDefaultStreamsToRequestIfOmitted() { MessagesRequest request = validRequest(); final SearchUser searchUser = TestSearchUser.builder() .allowStream("stream-1") .allowStream("stream-2") .build(); ArgumentCaptor<MessagesRequest> captor = ArgumentCaptor.forClass(MessagesRequest.class); when(commandFactory.buildFromRequest(captor.capture())).thenReturn(ExportMessagesCommand.withDefaults()); sut.retrieve(request, searchUser); MessagesRequest value = captor.getValue(); assertThat(value.streams()) .containsExactly("stream-1", "stream-2"); }
public synchronized int sendFetches() { final Map<Node, FetchSessionHandler.FetchRequestData> fetchRequests = prepareFetchRequests(); sendFetchesInternal( fetchRequests, (fetchTarget, data, clientResponse) -> { synchronized (Fetcher.this) { handleFetchSuccess(fetchTarget, data, clientResponse); } }, (fetchTarget, data, error) -> { synchronized (Fetcher.this) { handleFetchFailure(fetchTarget, data, error); } }); return fetchRequests.size(); }
@Test public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); // Fetch some records and establish an incremental fetch session. LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions1 = new LinkedHashMap<>(); partitions1.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setHighWatermark(2) .setLastStableOffset(2) .setLogStartOffset(0) .setRecords(records)); partitions1.put(tidp1, new FetchResponseData.PartitionData() .setPartitionIndex(tp1.partition()) .setHighWatermark(100) .setLogStartOffset(0) .setRecords(emptyRecords)); FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1); client.prepareResponse(resp1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); consumerClient.poll(time.timer(0)); assertTrue(fetcher.hasCompletedFetches()); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords = fetchRecords(); assertFalse(fetchedRecords.containsKey(tp1)); List<ConsumerRecord<byte[], byte[]>> records = fetchedRecords.get(tp0); assertEquals(2, records.size()); assertEquals(3L, subscriptions.position(tp0).offset); assertEquals(1L, subscriptions.position(tp1).offset); assertEquals(1, records.get(0).offset()); assertEquals(2, records.get(1).offset()); // There is still a buffered record. assertEquals(0, sendFetches()); fetchedRecords = fetchRecords(); assertFalse(fetchedRecords.containsKey(tp1)); records = fetchedRecords.get(tp0); assertEquals(1, records.size()); assertEquals(3, records.get(0).offset()); assertEquals(4L, subscriptions.position(tp0).offset); // The second response contains no new records. LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions2 = new LinkedHashMap<>(); FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2); client.prepareResponse(resp2); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.isEmpty()); assertEquals(4L, subscriptions.position(tp0).offset); assertEquals(1L, subscriptions.position(tp1).offset); // The third response contains some new records for tp0. LinkedHashMap<TopicIdPartition, FetchResponseData.PartitionData> partitions3 = new LinkedHashMap<>(); partitions3.put(tidp0, new FetchResponseData.PartitionData() .setPartitionIndex(tp0.partition()) .setHighWatermark(100) .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3); client.prepareResponse(resp3); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); fetchedRecords = fetchRecords(); assertFalse(fetchedRecords.containsKey(tp1)); records = fetchedRecords.get(tp0); assertEquals(2, records.size()); assertEquals(6L, subscriptions.position(tp0).offset); assertEquals(1L, subscriptions.position(tp1).offset); assertEquals(4, records.get(0).offset()); assertEquals(5, records.get(1).offset()); }
public static List<String> splitStatements(CharSequence string) { return codeAwareSplitOnChar(string, true, true, ';', '\n'); }
@Test public void testSplitStatements() { String text = "System.out.println(\"'\");" + "$visaApplication.setValidation( Validation.FAILED );" + "drools.update($visaApplication);"; List<String> statements = splitStatements(text); assertThat(statements.size()).isEqualTo(3); assertThat(statements.get(0)).isEqualTo("System.out.println(\"'\")"); assertThat(statements.get(1)).isEqualTo("$visaApplication.setValidation( Validation.FAILED )"); assertThat(statements.get(2)).isEqualTo("drools.update($visaApplication)"); }
@Override public boolean matches(Job localJob, Job storageProviderJob) { return AllowedConcurrentStateChange.super.matches(localJob, storageProviderJob) && localJob.getVersion() == storageProviderJob.getVersion() - 1 && localJob.getLastJobStateOfType(FailedState.class).isPresent(); }
@Test void ifStorageJobHasOtherThanScheduledStateItWillNotMatch() { final Job enqueuedJob = anEnqueuedJob().build(); final Job succeededJob = aCopyOf(enqueuedJob).withState(new SucceededState(ofMillis(10), ofMillis(6))).build(); boolean matchesAllowedStateChange = allowedStateChange.matches(enqueuedJob, succeededJob); assertThat(matchesAllowedStateChange).isFalse(); }
@Override public void open() throws InterruptedException, JournalException { // Open a new journal database or get last existing one as current journal database List<Long> dbNames; JournalException exception = null; for (int i = 0; i < RETRY_TIME; i++) { try { // sleep for retry if (i > 0) { Thread.sleep(SLEEP_INTERVAL_SEC * 1000L); } dbNames = bdbEnvironment.getDatabaseNamesWithPrefix(prefix); if (dbNames == null) { // bdb environment is closing throw new JournalException("fail to get dbNames while open bdbje journal. will exit"); } String dbName; if (dbNames.isEmpty()) { /* * This is the very first time to open. Usually, we will open a new database named "1". * But when we start cluster with an image file copied from other cluster, * here we should open database with name image max journal id + 1. * (default GlobalStateMgr.getCurrentState().getReplayedJournalId() is 0) */ if (prefix.isEmpty()) { dbName = getFullDatabaseName(GlobalStateMgr.getCurrentState().getReplayedJournalId() + 1); } else { dbName = getFullDatabaseName(StarMgrServer.getCurrentState().getReplayId() + 1); } LOG.info("the very first time to open bdb, dbname is {}", dbName); } else { // get last database as current journal database dbName = getFullDatabaseName(dbNames.get(dbNames.size() - 1)); } if (currentJournalDB != null) { currentJournalDB.close(); } currentJournalDB = bdbEnvironment.openDatabase(dbName); if (currentJournalDB == null) { LOG.warn("fail to open database {}. retried {} times", dbName, i); continue; } return; } catch (DatabaseException e) { String errMsg = String.format("catch exception after retried %d times", i + 1); LOG.warn(errMsg, e); exception = new JournalException(errMsg); exception.initCause(e); } } // failed after retry throw exception; }
@Test public void testOpenNormal(@Mocked CloseSafeDatabase database, @Mocked BDBEnvironment environment) throws Exception { new Expectations(environment) { { environment.getDatabaseNamesWithPrefix(""); times = 1; result = Arrays.asList(3L, 23L, 45L); environment.openDatabase("45"); times = 1; result = database; } }; BDBJEJournal journal = new BDBJEJournal(environment); journal.open(); }
public FEELFnResult<Object> invoke(@ParameterName("list") List list) { if ( list == null || list.isEmpty() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty")); } else { try { return FEELFnResult.ofResult(Collections.min(list, new InterceptNotComparableComparator())); } catch (ClassCastException e) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable")); } } }
@Test void invokeListWithHeterogenousTypes() { FunctionTestUtil.assertResultError(minFunction.invoke(Arrays.asList(1, "test", BigDecimal.valueOf(10.2))), InvalidParametersEvent.class); }
@VisibleForTesting static AbsoluteUnixPath getAppRootChecked( RawConfiguration rawConfiguration, ProjectProperties projectProperties) throws InvalidAppRootException { String appRoot = rawConfiguration.getAppRoot(); if (appRoot.isEmpty()) { appRoot = projectProperties.isWarProject() ? DEFAULT_JETTY_APP_ROOT : JavaContainerBuilder.DEFAULT_APP_ROOT; } try { return AbsoluteUnixPath.get(appRoot); } catch (IllegalArgumentException ex) { throw new InvalidAppRootException(appRoot, appRoot, ex); } }
@Test public void testGetAppRootChecked() throws InvalidAppRootException { when(rawConfiguration.getAppRoot()).thenReturn("/some/root"); assertThat(PluginConfigurationProcessor.getAppRootChecked(rawConfiguration, projectProperties)) .isEqualTo(AbsoluteUnixPath.get("/some/root")); }
@Operation(summary = "Get single service") @GetMapping(value = "{id}", produces = "application/json") @ResponseBody public Service getById(@PathVariable("id") Long id) { return serviceService.getServiceById(id); }
@Test public void serviceIdNotFound() { when(serviceServiceMock.getServiceById(anyLong())).thenThrow(NotFoundException.class); assertThrows(NotFoundException.class, () -> { controller.getById(anyLong()); }); }
@VisibleForTesting public Account updateLastSeen(Account account, Device device) { // compute a non-negative integer between 0 and 86400. long n = Util.ensureNonNegativeLong(account.getUuid().getLeastSignificantBits()); final long lastSeenOffsetSeconds = n % ChronoUnit.DAYS.getDuration().toSeconds(); // produce a truncated timestamp which is either today at UTC midnight // or yesterday at UTC midnight, based on per-user randomized offset used. final long todayInMillisWithOffset = Util.todayInMillisGivenOffsetFromNow(clock, Duration.ofSeconds(lastSeenOffsetSeconds).negated()); // only update the device's last seen time when it falls behind the truncated timestamp. // this ensures a few things: // (1) each account will only update last-seen at most once per day // (2) these updates will occur throughout the day rather than all occurring at UTC midnight. if (device.getLastSeen() < todayInMillisWithOffset) { Metrics.summary(DAYS_SINCE_LAST_SEEN_DISTRIBUTION_NAME, IS_PRIMARY_DEVICE_TAG, String.valueOf(device.isPrimary())) .record(Duration.ofMillis(todayInMillisWithOffset - device.getLastSeen()).toDays()); return accountsManager.updateDeviceLastSeen(account, device, Util.todayInMillis(clock)); } return account; }
@Test void testUpdateLastSeenMiddleOfDay() { clock.pin(Instant.ofEpochMilli(currentTime)); final Device device1 = acct1.getDevices().stream().findFirst().get(); final Device device2 = acct2.getDevices().stream().findFirst().get(); final Account updatedAcct1 = accountAuthenticator.updateLastSeen(acct1, device1); final Account updatedAcct2 = accountAuthenticator.updateLastSeen(acct2, device2); verify(accountsManager, never()).updateDeviceLastSeen(eq(acct1), any(), anyLong()); verify(accountsManager).updateDeviceLastSeen(eq(acct2), eq(device2), anyLong()); assertThat(device1.getLastSeen()).isEqualTo(yesterday); assertThat(device2.getLastSeen()).isEqualTo(today); assertThat(acct1).isSameAs(updatedAcct1); assertThat(acct2).isNotSameAs(updatedAcct2); }
public static FingerprintTrustManagerFactoryBuilder builder(String algorithm) { return new FingerprintTrustManagerFactoryBuilder(algorithm); }
@Test public void testWithNullFingerprint() { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { FingerprintTrustManagerFactory .builder("SHA-256") .fingerprints(FIRST_CERT_SHA256_FINGERPRINT, null) .build(); } }); }
public BlobOperationResponse stageBlockBlobList(final Exchange exchange) throws Exception { ObjectHelper.notNull(exchange, MISSING_EXCHANGE); final Object object = exchange.getIn().getMandatoryBody(); List<BlobBlock> blobBlocks = null; if (object instanceof List) { // noinspection unchecked blobBlocks = (List<BlobBlock>) object; } else if (object instanceof BlobBlock) { blobBlocks = Collections.singletonList((BlobBlock) object); } if (blobBlocks == null || blobBlocks.isEmpty()) { throw new IllegalArgumentException("Illegal storageBlocks payload"); } if (LOG.isTraceEnabled()) { LOG.trace("Putting a blob [{}] from blocks from exchange [{}]...", configurationProxy.getBlobName(exchange), exchange); } final BlobCommonRequestOptions commonRequestOptions = getCommonRequestOptions(exchange); final List<Block> blockEntries = new LinkedList<>(); blobBlocks.forEach(blobBlock -> { blockEntries.add(blobBlock.getBlockEntry()); client.stageBlockBlob(blobBlock.getBlockEntry().getName(), blobBlock.getBlockStream(), blobBlock.getBlockEntry().getSizeLong(), commonRequestOptions.getContentMD5(), commonRequestOptions.leaseId(), commonRequestOptions.getTimeout()); }); final boolean commitBlockListLater = configurationProxy.isCommitBlockListLater(exchange); if (!commitBlockListLater) { // let us commit now exchange.getIn().setBody(blockEntries); return commitBlobBlockList(exchange); } return BlobOperationResponse.createWithEmptyBody(); }
@Test void testStageBlockBlobList() throws Exception { final HttpHeaders httpHeaders = new HttpHeaders().set("x-test-header", "123"); when(client.stageBlockBlob(anyString(), any(), anyLong(), any(), any(), any())).thenReturn(httpHeaders); final Exchange exchange = new DefaultExchange(context); exchange.getIn().setBody("test"); exchange.getIn().setHeader(BlobConstants.COMMIT_BLOCK_LIST_LATER, true); // test final BlobOperations operations = new BlobOperations(configuration, client); // in case of invalid payload assertThrows(IllegalArgumentException.class, () -> operations.stageBlockBlobList(exchange)); // in case of correct payload exchange.getIn().setBody(BlobBlock.createBlobBlock("1", new ByteArrayInputStream("test".getBytes()))); // test again final BlobOperationResponse response = operations.stageBlockBlobList(exchange); assertNotNull(response); assertTrue((boolean) response.getBody()); }
public static CommandContext newInstance(String commandName) { return new CommandContext(commandName); }
@Test void testNewInstance() { CommandContext context = CommandContextFactory.newInstance("test"); assertThat(context.getCommandName(), equalTo("test")); context = CommandContextFactory.newInstance("command", new String[] {"hello"}, true); assertThat(context.getCommandName(), equalTo("command")); assertThat(context.getArgs(), Matchers.arrayContaining("hello")); assertTrue(context.isHttp()); }
int getMinimumTokens(String languageKey) { return settings.getInt("sonar.cpd." + languageKey + ".minimumTokens").orElse(100); }
@Test public void minimumTokensByLanguage() { when(configuration.getInt("sonar.cpd.java.minimumTokens")).thenReturn(Optional.of(42)); when(configuration.getInt("sonar.cpd.php.minimumTokens")).thenReturn(Optional.of(33)); assertThat(cpdSettings.getMinimumTokens("java")).isEqualTo(42); assertThat(cpdSettings.getMinimumTokens("php")).isEqualTo(33); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap(); Iterator subIter = subscriptions.keySet().iterator(); //initialize subscription mappings for assignment while (subIter.hasNext()) { String memberId = (String) subIter.next(); assignment.put(memberId, new ArrayList()); } ArrayList<String> consumerList = new ArrayList(Utils.sorted(subscriptions.keySet())); Iterator partIter = this.allPartitionsSorted(partitionsPerTopic, subscriptions).iterator(); //assign partitions at random while (partIter.hasNext()) { TopicPartition partition = (TopicPartition) partIter.next(); String topic = partition.topic(); int rand = ThreadLocalRandom.current().nextInt(0, consumerList.size()); while (!((Subscription) subscriptions.get(consumerList.get(rand))).topics().contains(topic)) { rand = ThreadLocalRandom.current().nextInt(0, consumerList.size()); } (assignment.get(consumerList.get(rand))).add(partition); } return assignment; }
@Test public void onlyPartitionsFromSubscribedTopicsAreAssigned() { String topic1 = "testTopic1"; String topic2 = "testTopic2"; List<String> topicList = new ArrayList<String>(); topicList.add(topic1); String consumerId = "testConsumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, List<TopicPartition>> assignment = testAssignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topicList))); List<TopicPartition> testAssignment = new ArrayList<>(); testAssignment.add(new TopicPartition(topic1, 0)); testAssignment.add(new TopicPartition(topic1, 1)); testAssignment.add(new TopicPartition(topic1, 2)); assertEquals(testAssignment, assignment.get(consumerId)); }
public static String getRemoteAddr(HttpServletRequest request) { String remoteAddr = request.getRemoteAddr(); String proxyHeader = request.getHeader("X-Forwarded-For"); if (proxyHeader != null && ProxyServers.isProxyServer(remoteAddr)) { final String clientAddr = proxyHeader.split(",")[0].trim(); if (!clientAddr.isEmpty()) { remoteAddr = clientAddr; } } return remoteAddr; }
@Test public void testRemoteAddrWithTrustedProxy() { assertEquals(clientAddr, getRemoteAddr(clientAddr, proxyAddr, true)); assertEquals(clientAddr, getRemoteAddr(chainedClientAddr, proxyAddr, true)); }
@PostMapping("") @RequiresPermissions("system:role:add") public ShenyuAdminResult createRole(@Valid @RequestBody final RoleDTO roleDTO) { if (SUPER.equals(roleDTO.getRoleName())) { return ShenyuAdminResult.error(ShenyuResultMessage.ROLE_CREATE_ERROR); } return ShenyuAdminResult.success(ShenyuResultMessage.CREATE_SUCCESS, roleService.createOrUpdate(roleDTO)); }
@Test public void testCreateRole() throws Exception { RoleDTO roleDTO = buildRoleDTO(); given(roleService.createOrUpdate(roleDTO)).willReturn(1); SpringBeanUtils.getInstance().setApplicationContext(mock(ConfigurableApplicationContext.class)); when(SpringBeanUtils.getInstance().getBean(RoleMapper.class)).thenReturn(roleMapper); when(roleMapper.existed(roleDTO.getId())).thenReturn(true); this.mockMvc.perform(MockMvcRequestBuilders.post("/role") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(roleDTO))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.CREATE_SUCCESS))) .andReturn(); roleDTO.setRoleName(SUPER); given(roleService.createOrUpdate(roleDTO)).willReturn(1); this.mockMvc.perform(MockMvcRequestBuilders.post("/role") .contentType(MediaType.APPLICATION_JSON) .content(GsonUtils.getInstance().toJson(roleDTO))) .andExpect(status().isOk()) .andExpect(jsonPath("$.message", is(ShenyuResultMessage.ROLE_CREATE_ERROR))) .andReturn(); }
@Override public SymbolTable getRequestSymbolTable(URI requestUri) { // If the URI prefix doesn't match, return null. if (!requestUri.toString().startsWith(_uriPrefix)) { return null; } String serviceName = LoadBalancerUtil.getServiceNameFromUri(requestUri); // First check the cache. SymbolTable symbolTable = _serviceNameToSymbolTableCache.getIfPresent(serviceName); if (symbolTable != null) { // If we got a 404, we will cache an empty symbol table. For such cases, just return null, so that no // symbol table is used. return symbolTable == EmptySymbolTable.SHARED ? null : symbolTable; } try { URI symbolTableUri = new URI(_uriPrefix + serviceName + "/" + RestLiSymbolTableRequestHandler.SYMBOL_TABLE_URI_PATH); // // Fetch remote symbol table, configuring the fetch to return an empty table on 404. This will ensure that // for services that don't have symbol tables enabled yet, we will not use any symbol tables when encoding. // symbolTable = fetchRemoteSymbolTable(symbolTableUri, Collections.emptyMap(), true); if (symbolTable != null) { // Cache the retrieved table. _serviceNameToSymbolTableCache.put(serviceName, symbolTable); // If this symbol table is not the shared empty table, also cache it by symbol table name, else return null // to not use any symbol tables when encoding. if (symbolTable != EmptySymbolTable.SHARED) { _symbolTableNameToSymbolTableCache.put( _symbolTableNameHandler.extractMetadata(symbolTable.getName()).getSymbolTableName(), symbolTable); } else { return null; } } return symbolTable; } catch (URISyntaxException ex) { LOGGER.error("Failed to construct symbol table URI from request URI " + requestUri, ex); } return null; }
@Test public void testGetRemoteRequestSymbolTableDifferentUriPrefix() { Assert.assertNull(_provider.getRequestSymbolTable(URI.create("http://blah:100/bleh"))); }
@Override public void run() { if (!redoService.isConnected()) { LogUtils.NAMING_LOGGER.warn("Grpc Connection is disconnect, skip current redo task"); return; } try { redoForInstances(); redoForSubscribes(); } catch (Exception e) { LogUtils.NAMING_LOGGER.warn("Redo task run with unexpected exception: ", e); } }
@Test void testRunRedoDeregisterSubscriber() throws NacosException { Set<SubscriberRedoData> mockData = generateMockSubscriberData(true, true, false); when(redoService.findSubscriberRedoData()).thenReturn(mockData); redoTask.run(); verify(clientProxy).doUnsubscribe(SERVICE, GROUP, CLUSTER); }
public static void isSystemTopic(String topic) throws MQClientException { if (TopicValidator.isSystemTopic(topic)) { throw new MQClientException( String.format("The topic[%s] is conflict with system topic.", topic), null); } }
@Test public void testIsSystemTopic() { for (String topic : TopicValidator.getSystemTopicSet()) { try { Validators.isSystemTopic(topic); fail("excepted MQClientException for system topic"); } catch (MQClientException e) { assertThat(e.getResponseCode()).isEqualTo(-1); assertThat(e.getErrorMessage()).isEqualTo(String.format("The topic[%s] is conflict with system topic.", topic)); } } }
public static HbaseSinkConfig load(String yamlFile) throws IOException { ObjectMapper mapper = new ObjectMapper(new YAMLFactory()); return mapper.readValue(new File(yamlFile), HbaseSinkConfig.class); }
@Test public final void loadFromMapTest() throws IOException { Map<String, Object> map = new HashMap<String, Object>(); map.put("hbaseConfigResources", "hbase-site.xml"); map.put("zookeeperQuorum", "localhost"); map.put("zookeeperClientPort", "2181"); map.put("zookeeperZnodeParent", "/hbase"); map.put("tableName", "pulsar_hbase"); map.put("rowKeyName", "rowKey"); map.put("familyName", "info"); HbaseSinkConfig config = HbaseSinkConfig.load(map); assertNotNull(config); assertEquals("hbase-site.xml", config.getHbaseConfigResources()); assertEquals("localhost", config.getZookeeperQuorum()); assertEquals("2181", config.getZookeeperClientPort()); assertEquals("/hbase", config.getZookeeperZnodeParent()); assertEquals("pulsar_hbase", config.getTableName()); assertEquals("rowKey", config.getRowKeyName()); assertEquals("info", config.getFamilyName()); }
public MetricConsumer newInstance() { if (factories.length == 1) { return factories[0].newInstance(); } MetricConsumer[] consumers = new MetricConsumer[factories.length]; for (int i = 0; i < factories.length; ++i) { consumers[i] = factories[i].newInstance(); } return new ForwardingMetricConsumer(consumers); }
@Test void requireThatSingleConsumerIsNotDelegated() { MetricConsumer consumer = Mockito.mock(MetricConsumer.class); MetricConsumerProvider provider = MetricConsumerProviders.newSingletonFactories(consumer); assertSame(consumer, provider.newInstance()); }
@Override public <T> T persist(T detachedObject) { Map<Object, Object> alreadyPersisted = new HashMap<Object, Object>(); return persist(detachedObject, alreadyPersisted, RCascadeType.PERSIST); }
@Test public void testIndexUpdate2() { RLiveObjectService s = redisson.getLiveObjectService(); TestIndexed t1 = new TestIndexed("1"); t1.setName1("test1"); t1 = s.persist(t1); TestIndexed t2 = new TestIndexed("2"); t2.setName1("test2"); t2 = s.persist(t2); t1.setObj(t2); TestIndexed t3 = new TestIndexed("3"); t3.setName1("test3"); t3 = s.persist(t3); t1.setObj(t3); }
void handleStatement(final QueuedCommand queuedCommand) { throwIfNotConfigured(); handleStatementWithTerminatedQueries( queuedCommand.getAndDeserializeCommand(commandDeserializer), queuedCommand.getAndDeserializeCommandId(), queuedCommand.getStatus(), Mode.EXECUTE, queuedCommand.getOffset(), false ); }
@Test public void shouldThrowOnCreateSourceWithoutPlan() { // Given: when(mockParser.parseSingleStatement("CREATE STREAM")) .thenReturn(PreparedStatement.of("CREATE STREAM", mock(CreateStream.class))); final Command command = new Command( "CREATE STREAM", emptyMap(), emptyMap(), Optional.empty()); // When final Exception e = assertThrows( KsqlException.class, () -> handleStatement(statementExecutorWithMocks, command, COMMAND_ID, Optional.empty(), 0L) ); // Then assertThat(e.getMessage(), containsString( "This version of ksqlDB does not support executing statements submitted prior to ksqlDB " + "0.8.0 or Confluent Platform ksqlDB 5.5. Please see the upgrading guide to upgrade.")); }
@Override public void transform(Message message, DataType fromType, DataType toType) { if (message.getHeaders().containsKey(Ddb2Constants.ITEM) || message.getHeaders().containsKey(Ddb2Constants.KEY)) { return; } JsonNode jsonBody = getBodyAsJsonNode(message); String operation = Optional.ofNullable(jsonBody.get("operation")).map(JsonNode::asText).orElse(Ddb2Operations.PutItem.name()); if (message.getExchange().hasProperties() && message.getExchange().getProperty("operation", String.class) != null) { operation = message.getExchange().getProperty("operation", String.class); } if (message.getHeaders().containsKey(Ddb2Constants.OPERATION)) { operation = message.getHeader(Ddb2Constants.OPERATION, Ddb2Operations.class).name(); } JsonNode key = jsonBody.get("key"); JsonNode item = jsonBody.get("item"); Map<String, Object> keyProps; if (key != null) { keyProps = dataFormat.getObjectMapper().convertValue(key, new TypeReference<>() { }); } else { keyProps = dataFormat.getObjectMapper().convertValue(jsonBody, new TypeReference<>() { }); } Map<String, Object> itemProps; if (item != null) { itemProps = dataFormat.getObjectMapper().convertValue(item, new TypeReference<>() { }); } else { itemProps = keyProps; } final Map<String, AttributeValue> keyMap = getAttributeValueMap(keyProps); switch (Ddb2Operations.valueOf(operation)) { case PutItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem); message.setHeader(Ddb2Constants.ITEM, getAttributeValueMap(itemProps)); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message); break; case UpdateItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.UpdateItem); message.setHeader(Ddb2Constants.KEY, keyMap); message.setHeader(Ddb2Constants.UPDATE_VALUES, getAttributeValueUpdateMap(itemProps)); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_NEW.toString(), message); break; case DeleteItem: message.setHeader(Ddb2Constants.OPERATION, Ddb2Operations.DeleteItem); message.setHeader(Ddb2Constants.KEY, keyMap); setHeaderIfNotPresent(Ddb2Constants.RETURN_VALUES, ReturnValue.ALL_OLD.toString(), message); break; default: throw new UnsupportedOperationException(String.format("Unsupported operation '%s'", operation)); } }
@Test @SuppressWarnings("unchecked") void shouldMapEmptyJson() throws Exception { Exchange exchange = new DefaultExchange(camelContext); exchange.getMessage().setBody("{}"); exchange.getMessage().setHeader(Ddb2Constants.OPERATION, Ddb2Operations.PutItem.name()); transformer.transform(exchange.getMessage(), DataType.ANY, new DataType(AWS_2_DDB_APPLICATION_JSON_TRANSFORMER)); Assertions.assertTrue(exchange.getMessage().hasHeaders()); Assertions.assertEquals(Ddb2Operations.PutItem, exchange.getMessage().getHeader(Ddb2Constants.OPERATION)); Assertions.assertEquals(ReturnValue.ALL_OLD.toString(), exchange.getMessage().getHeader(Ddb2Constants.RETURN_VALUES)); Map<String, AttributeValue> attributeValueMap = exchange.getMessage().getHeader(Ddb2Constants.ITEM, Map.class); Assertions.assertEquals(0L, attributeValueMap.size()); }
@Override public MenuButton deserializeResponse(String answer) throws TelegramApiRequestException { return deserializeResponse(answer, MenuButton.class); }
@Test public void testGetChatMenuButtonDeserializeWithWrongObject() { String responseText = "{\"ok\":false\"error_code\": 404,\"description\": \"Error message\"}"; GetChatMenuButton getChatMenuButton = GetChatMenuButton .builder() .chatId("12345") .build(); try { getChatMenuButton.deserializeResponse(responseText); fail(); } catch (TelegramApiRequestException e) { assertEquals("Unable to deserialize response", e.getMessage()); } }
public void exportMetrics(RequestContext context, PushTelemetryRequest request) { DefaultClientTelemetryPayload payload = getPayLoad(request); for (ClientTelemetryReceiver receiver : receivers) { receiver.exportMetrics(context, payload); } }
@Test public void testExportMetrics() throws UnknownHostException { assertTrue(clientMetricsReceiverPlugin.isEmpty()); clientMetricsReceiverPlugin.add(telemetryReceiver); assertFalse(clientMetricsReceiverPlugin.isEmpty()); assertEquals(0, telemetryReceiver.exportMetricsInvokedCount); assertTrue(telemetryReceiver.metricsData.isEmpty()); byte[] metrics = "test-metrics".getBytes(StandardCharsets.UTF_8); clientMetricsReceiverPlugin.exportMetrics(ClientMetricsTestUtils.requestContext(), new PushTelemetryRequest.Builder(new PushTelemetryRequestData().setMetrics(metrics), true).build()); assertEquals(1, telemetryReceiver.exportMetricsInvokedCount); assertEquals(1, telemetryReceiver.metricsData.size()); assertEquals(metrics, telemetryReceiver.metricsData.get(0).array()); }
@Override public double read() { return gaugeSource.read(); }
@Test public void whenCacheDynamicMetricSourceReplacedWithConcreteValue() { SomeObject someObject = new SomeObject(); someObject.doubleField = 42.42D; metricsRegistry.registerDynamicMetricsProvider(someObject); DoubleGauge doubleGauge = metricsRegistry.newDoubleGauge("foo.doubleField"); // needed to collect dynamic metrics and update the gauge created from them metricsRegistry.collect(mock(MetricsCollector.class)); assertEquals(42.42D, doubleGauge.read(), 10E-6); metricsRegistry.deregisterDynamicMetricsProvider(someObject); metricsRegistry.registerDynamicMetricsProvider((descriptor, context) -> context.collect(descriptor.withPrefix("foo"), "doubleField", INFO, COUNT, 142.42D)); // needed to collect dynamic metrics and update the gauge created from them metricsRegistry.collect(mock(MetricsCollector.class)); assertEquals(142.42D, doubleGauge.read(), 10E-6); }
public void loadXML( Node stepnode, List<DatabaseMeta> databases, IMetaStore metaStore ) throws KettleXMLException { readData( stepnode ); }
@Test public void testLoadXML() throws Exception { LoadFileInputMeta origMeta = createMeta(); LoadFileInputMeta testMeta = new LoadFileInputMeta(); DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance(); DocumentBuilder db = dbf.newDocumentBuilder(); Document doc = db.parse( new InputSource( new StringReader( "<step>" + xmlOrig + "</step>" ) ) ); IMetaStore metaStore = null; testMeta.loadXML( doc.getFirstChild(), null, metaStore ); assertEquals( origMeta, testMeta ); }
@VisibleForTesting static LoadingCache<StreamConfig, Integer> buildCache(PartitionCountFetcher partitionCountFetcher, long duration, TimeUnit unit) { return CacheBuilder.newBuilder().refreshAfterWrite(duration, unit) .build(new CacheLoader<StreamConfig, Integer>() { @Override public Integer load(StreamConfig key) throws Exception { // this method is called the first time cache is used for the given streamConfig Integer count = partitionCountFetcher.fetch(key); // if the count cannot be fetched, don't throw exception; return 1. // The overall consumption rate will be higher, but we prefer that over not consuming at all. return count != null ? count : 1; } @Override public ListenableFuture<Integer> reload(StreamConfig key, Integer oldValue) throws Exception { // if partition count fetcher cannot fetch the value, old value is returned Integer count = partitionCountFetcher.fetch(key); return Futures.immediateFuture(count != null ? count : oldValue); } }); }
@Test public void testBuildCache() throws Exception { PartitionCountFetcher partitionCountFetcher = mock(PartitionCountFetcher.class); LoadingCache<StreamConfig, Integer> cache = buildCache(partitionCountFetcher, 500, TimeUnit.MILLISECONDS); when(partitionCountFetcher.fetch(STREAM_CONFIG_A)).thenReturn(10); when(partitionCountFetcher.fetch(STREAM_CONFIG_B)).thenReturn(20); assertEquals((int) cache.get(STREAM_CONFIG_A), 10); // call fetcher in load method assertEquals((int) cache.get(STREAM_CONFIG_A), 10); // use cache assertEquals((int) cache.get(STREAM_CONFIG_A), 10); // use cache assertEquals((int) cache.get(STREAM_CONFIG_B), 20); // call fetcher in load method assertEquals((int) cache.get(STREAM_CONFIG_B), 20); // use cache verify(partitionCountFetcher, times(1)).fetch(STREAM_CONFIG_A); // count changes verify(partitionCountFetcher, times(1)).fetch(STREAM_CONFIG_B); // count changes when(partitionCountFetcher.fetch(STREAM_CONFIG_A)).thenReturn(11); when(partitionCountFetcher.fetch(STREAM_CONFIG_B)).thenReturn(21); assertEquals((int) cache.get(STREAM_CONFIG_A), 10); // use cache assertEquals((int) cache.get(STREAM_CONFIG_B), 20); // use cache Thread.sleep(550); // wait till cache expires assertEquals((int) cache.get(STREAM_CONFIG_A), 11); // call fetcher in reload method assertEquals((int) cache.get(STREAM_CONFIG_A), 11); // use cache assertEquals((int) cache.get(STREAM_CONFIG_A), 11); // use cache assertEquals((int) cache.get(STREAM_CONFIG_B), 21); // call fetcher in reload method assertEquals((int) cache.get(STREAM_CONFIG_B), 21); // use cache verify(partitionCountFetcher, times(2)).fetch(STREAM_CONFIG_A); verify(partitionCountFetcher, times(2)).fetch(STREAM_CONFIG_B); when(partitionCountFetcher.fetch(STREAM_CONFIG_A)).thenReturn(null); // unsuccessful fetch when(partitionCountFetcher.fetch(STREAM_CONFIG_B)).thenReturn(22); Thread.sleep(550); // wait till cache expires assertEquals((int) cache.get(STREAM_CONFIG_A), 11); // call fetcher in reload method assertEquals((int) cache.get(STREAM_CONFIG_A), 11); // use cache assertEquals((int) cache.get(STREAM_CONFIG_A), 11); // use cache assertEquals((int) cache.get(STREAM_CONFIG_B), 22); // call fetcher in reload method assertEquals((int) cache.get(STREAM_CONFIG_B), 22); // use cache verify(partitionCountFetcher, times(3)).fetch(STREAM_CONFIG_A); verify(partitionCountFetcher, times(3)).fetch(STREAM_CONFIG_B); // unsuccessful fetch in the first call for config C when(partitionCountFetcher.fetch(STREAM_CONFIG_C)).thenReturn(null); // unsuccessful fetch assertEquals((int) cache.get(STREAM_CONFIG_C), 1); // call fetcher in load method assertEquals((int) cache.get(STREAM_CONFIG_C), 1); // use cache assertEquals((int) cache.get(STREAM_CONFIG_C), 1); // use cache verify(partitionCountFetcher, times(1)).fetch(STREAM_CONFIG_C); }
@Override public CompletableFuture<Acknowledge> submitJob(JobGraph jobGraph, Time timeout) { final JobID jobID = jobGraph.getJobID(); try (MdcCloseable ignored = MdcUtils.withContext(MdcUtils.asContextData(jobID))) { log.info("Received JobGraph submission '{}' ({}).", jobGraph.getName(), jobID); } return isInGloballyTerminalState(jobID) .thenComposeAsync( isTerminated -> { if (isTerminated) { log.warn( "Ignoring JobGraph submission '{}' ({}) because the job already " + "reached a globally-terminal state (i.e. {}) in a " + "previous execution.", jobGraph.getName(), jobID, Arrays.stream(JobStatus.values()) .filter(JobStatus::isGloballyTerminalState) .map(JobStatus::name) .collect(Collectors.joining(", "))); return FutureUtils.completedExceptionally( DuplicateJobSubmissionException.ofGloballyTerminated( jobID)); } else if (jobManagerRunnerRegistry.isRegistered(jobID) || submittedAndWaitingTerminationJobIDs.contains(jobID)) { // job with the given jobID is not terminated, yet return FutureUtils.completedExceptionally( DuplicateJobSubmissionException.of(jobID)); } else if (isPartialResourceConfigured(jobGraph)) { return FutureUtils.completedExceptionally( new JobSubmissionException( jobID, "Currently jobs is not supported if parts of the vertices " + "have resources configured. The limitation will be " + "removed in future versions.")); } else { return internalSubmitJob(jobGraph); } }, getMainThreadExecutor(jobID)); }
@Test public void testOverridingJobVertexParallelisms() throws Exception { JobVertex v1 = new JobVertex("v1"); v1.setParallelism(1); JobVertex v2 = new JobVertex("v2"); v2.setParallelism(2); JobVertex v3 = new JobVertex("v3"); v3.setParallelism(3); jobGraph = new JobGraph(jobGraph.getJobID(), "job", v1, v2, v3); configuration.set( PipelineOptions.PARALLELISM_OVERRIDES, ImmutableMap.of( v1.getID().toHexString(), "10", // v2 is omitted v3.getID().toHexString(), "21", // unknown vertex added new JobVertexID().toHexString(), "23")); jobGraph.getJobConfiguration() .set( PipelineOptions.PARALLELISM_OVERRIDES, ImmutableMap.of( // verifies that job graph configuration has higher priority v3.getID().toHexString(), "42", // unknown vertex added new JobVertexID().toHexString(), "25")); dispatcher = createAndStartDispatcher( heartbeatServices, haServices, new ExpectedJobIdJobManagerRunnerFactory(jobId)); DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class); Assert.assertEquals(jobGraph.findVertexByID(v1.getID()).getParallelism(), 1); Assert.assertEquals(jobGraph.findVertexByID(v2.getID()).getParallelism(), 2); Assert.assertEquals(jobGraph.findVertexByID(v3.getID()).getParallelism(), 3); dispatcherGateway.submitJob(jobGraph, TIMEOUT).get(); Assert.assertEquals(jobGraph.findVertexByID(v1.getID()).getParallelism(), 10); Assert.assertEquals(jobGraph.findVertexByID(v2.getID()).getParallelism(), 2); Assert.assertEquals(jobGraph.findVertexByID(v3.getID()).getParallelism(), 42); }
@SuppressWarnings( "unchecked" ) public String getPlugins() { List<PluginInterface> plugins = pluginRegistry.getPlugins( RepositoryPluginType.class ); JSONArray list = new JSONArray(); for ( PluginInterface pluginInterface : plugins ) { if ( !pluginInterface.getIds()[0].equals( "PentahoEnterpriseRepository" ) ) { JSONObject repoJSON = new JSONObject(); repoJSON.put( "id", pluginInterface.getIds()[ 0 ] ); repoJSON.put( "name", pluginInterface.getName() ); repoJSON.put( "description", pluginInterface.getDescription() ); list.add( repoJSON ); } } return list.toString(); }
@Test public void testGetPlugins() throws Exception { String plugins = controller.getPlugins(); assertEquals( "[{\"name\":\"PLUGIN NAME\",\"description\":\"PLUGIN DESCRIPTION\",\"id\":\"ID\"}]", plugins ); }
static CommandLineOptions parse(Iterable<String> options) { CommandLineOptions.Builder optionsBuilder = CommandLineOptions.builder(); List<String> expandedOptions = new ArrayList<>(); expandParamsFiles(options, expandedOptions); Iterator<String> it = expandedOptions.iterator(); while (it.hasNext()) { String option = it.next(); if (!option.startsWith("-")) { optionsBuilder.filesBuilder().add(option).addAll(it); break; } String flag; String value; int idx = option.indexOf('='); if (idx >= 0) { flag = option.substring(0, idx); value = option.substring(idx + 1); } else { flag = option; value = null; } // NOTE: update usage information in UsageException when new flags are added switch (flag) { case "-i": case "-r": case "-replace": case "--replace": optionsBuilder.inPlace(true); break; case "--lines": case "-lines": case "--line": case "-line": parseRangeSet(optionsBuilder.linesBuilder(), getValue(flag, it, value)); break; case "--offset": case "-offset": optionsBuilder.addOffset(parseInteger(it, flag, value)); break; case "--length": case "-length": optionsBuilder.addLength(parseInteger(it, flag, value)); break; case "--aosp": case "-aosp": case "-a": optionsBuilder.aosp(true); break; case "--version": case "-version": case "-v": optionsBuilder.version(true); break; case "--help": case "-help": case "-h": optionsBuilder.help(true); break; case "--fix-imports-only": optionsBuilder.fixImportsOnly(true); break; case "--skip-sorting-imports": optionsBuilder.sortImports(false); break; case "--skip-removing-unused-imports": optionsBuilder.removeUnusedImports(false); break; case "--skip-reflowing-long-strings": optionsBuilder.reflowLongStrings(false); break; case "--skip-javadoc-formatting": optionsBuilder.formatJavadoc(false); break; case "-": optionsBuilder.stdin(true); break; case "-n": case "--dry-run": optionsBuilder.dryRun(true); break; case "--set-exit-if-changed": optionsBuilder.setExitIfChanged(true); break; case "-assume-filename": case "--assume-filename": optionsBuilder.assumeFilename(getValue(flag, it, value)); break; default: throw new IllegalArgumentException("unexpected flag: " + flag); } } return optionsBuilder.build(); }
@Test public void defaults() { CommandLineOptions options = CommandLineOptionsParser.parse(ImmutableList.of()); assertThat(options.files()).isEmpty(); assertThat(options.stdin()).isFalse(); assertThat(options.aosp()).isFalse(); assertThat(options.help()).isFalse(); assertThat(options.lengths()).isEmpty(); assertThat(options.lines().asRanges()).isEmpty(); assertThat(options.offsets()).isEmpty(); assertThat(options.inPlace()).isFalse(); assertThat(options.version()).isFalse(); assertThat(options.sortImports()).isTrue(); assertThat(options.removeUnusedImports()).isTrue(); assertThat(options.dryRun()).isFalse(); assertThat(options.setExitIfChanged()).isFalse(); assertThat(options.reflowLongStrings()).isTrue(); assertThat(options.formatJavadoc()).isTrue(); }
@Override @Async public void updateJobLogResultAsync(Long logId, LocalDateTime endTime, Integer duration, boolean success, String result) { try { JobLogDO updateObj = JobLogDO.builder().id(logId).endTime(endTime).duration(duration) .status(success ? JobLogStatusEnum.SUCCESS.getStatus() : JobLogStatusEnum.FAILURE.getStatus()) .result(result).build(); jobLogMapper.updateById(updateObj); } catch (Exception ex) { log.error("[updateJobLogResultAsync][logId({}) endTime({}) duration({}) success({}) result({})]", logId, endTime, duration, success, result); } }
@Test public void testUpdateJobLogResultAsync_success() { // mock 数据 JobLogDO log = randomPojo(JobLogDO.class, o -> { o.setExecuteIndex(1); o.setStatus(JobLogStatusEnum.RUNNING.getStatus()); }); jobLogMapper.insert(log); // 准备参数 Long logId = log.getId(); LocalDateTime endTime = randomLocalDateTime(); Integer duration = randomInteger(); boolean success = true; String result = randomString(); // 调用 jobLogService.updateJobLogResultAsync(logId, endTime, duration, success, result); // 校验记录的属性是否正确 JobLogDO dbLog = jobLogMapper.selectById(log.getId()); assertEquals(endTime, dbLog.getEndTime()); assertEquals(duration, dbLog.getDuration()); assertEquals(JobLogStatusEnum.SUCCESS.getStatus(), dbLog.getStatus()); assertEquals(result, dbLog.getResult()); }
@Override public Set<ConstraintCheckResult> checkConstraints(Collection<Constraint> requestedConstraints) { final ImmutableSet.Builder<ConstraintCheckResult> fulfilledConstraints = ImmutableSet.builder(); for (Constraint constraint : requestedConstraints) { if (constraint instanceof GraylogVersionConstraint) { final GraylogVersionConstraint versionConstraint = (GraylogVersionConstraint) constraint; final Requirement requiredVersion = versionConstraint.version(); final ConstraintCheckResult constraintCheckResult = ConstraintCheckResult.create(versionConstraint, requiredVersion.isSatisfiedBy(graylogVersion.withClearedSuffixAndBuild())); fulfilledConstraints.add(constraintCheckResult); } } return fulfilledConstraints.build(); }
@Test public void checkConstraints() { final GraylogVersionConstraintChecker constraintChecker = new GraylogVersionConstraintChecker("1.0.0"); final GraylogVersionConstraint graylogVersionConstraint = GraylogVersionConstraint.builder() .version("^1.0.0") .build(); final PluginVersionConstraint pluginVersionConstraint = PluginVersionConstraint.builder() .pluginId("unique-id") .version("^1.0.0") .build(); final ImmutableSet<Constraint> requiredConstraints = ImmutableSet.of(graylogVersionConstraint, pluginVersionConstraint); final Set<ConstraintCheckResult> result = constraintChecker.checkConstraints(requiredConstraints); assertThat(result.stream().allMatch(c -> c.fulfilled())).isTrue(); }
public static String getKey(String dataId, String group) { StringBuilder sb = new StringBuilder(); GroupKey.urlEncode(dataId, sb); sb.append('+'); GroupKey.urlEncode(group, sb); return sb.toString(); }
@Test public void getKeyTenantIdentifySpecialTest() { String key = Md5ConfigUtil.getKey("DataId+", "Group+", "Tenant+", "Identify"); Assert.isTrue(Objects.equals("DataId%2B+Group%2B+Tenant%2B+Identify", key)); }
public static final int getTrimTypeByCode( String tt ) { if ( tt == null ) { return 0; } for ( int i = 0; i < trimTypeCode.length; i++ ) { if ( trimTypeCode[i].equalsIgnoreCase( tt ) ) { return i; } } return 0; }
@Test public void testGetTrimTypeByCode() { assertEquals( ValueMetaBase.getTrimTypeByCode( "none" ), ValueMetaInterface.TRIM_TYPE_NONE ); assertEquals( ValueMetaBase.getTrimTypeByCode( "left" ), ValueMetaInterface.TRIM_TYPE_LEFT ); assertEquals( ValueMetaBase.getTrimTypeByCode( "right" ), ValueMetaInterface.TRIM_TYPE_RIGHT ); assertEquals( ValueMetaBase.getTrimTypeByCode( "both" ), ValueMetaInterface.TRIM_TYPE_BOTH ); assertEquals( ValueMetaBase.getTrimTypeByCode( null ), ValueMetaInterface.TRIM_TYPE_NONE ); assertEquals( ValueMetaBase.getTrimTypeByCode( "" ), ValueMetaInterface.TRIM_TYPE_NONE ); assertEquals( ValueMetaBase.getTrimTypeByCode( "fake" ), ValueMetaInterface.TRIM_TYPE_NONE ); }
@Override public NativeEntity<InputWithExtractors> createNativeEntity(Entity entity, Map<String, ValueReference> parameters, Map<EntityDescriptor, Object> nativeEntities, String username) { if (entity instanceof EntityV1) { return decode((EntityV1) entity, parameters, username); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test @Ignore("Doesn't work without massive amount of mocks") public void createNativeEntity() { final Map<String, Object> configuration = new HashMap<>(); configuration.put("override_source", null); configuration.put("recv_buffer_size", 262144); configuration.put("bind_address", "127.0.0.1"); configuration.put("port", 5555); configuration.put("number_worker_threads", 8); final Entity entity = EntityV1.builder() .id(ModelId.of("5acc84f84b900a4ff290d9a7")) .type(ModelTypes.INPUT_V1) .data(objectMapper.convertValue(InputEntity.create( ValueReference.of("Local Raw UDP"), ReferenceMapUtils.toReferenceMap(configuration), Collections.emptyMap(), ValueReference.of("org.graylog2.inputs.raw.udp.RawUDPInput"), ValueReference.of(false), Collections.emptyList()), JsonNode.class)) .build(); final NativeEntity<InputWithExtractors> nativeEntity = facade.createNativeEntity(entity, Collections.emptyMap(), Collections.emptyMap(), "username"); final InputWithExtractors inputWithExtractors = nativeEntity.entity(); final Input savedInput = inputWithExtractors.input(); final String savedId = savedInput.getId(); assertThat(nativeEntity.descriptor()).isEqualTo(EntityDescriptor.create(savedId, ModelTypes.INPUT_V1)); assertThat(savedInput.getTitle()).isEqualTo("Local Raw UDP"); assertThat(savedInput.getType()).isEqualTo("org.graylog2.inputs.raw.udp.RawUDPInput"); assertThat(savedInput.isGlobal()).isFalse(); assertThat(savedInput.getContentPack()).isNull(); }
public static <T> TimeLimiterOperator<T> of(TimeLimiter timeLimiter) { return new TimeLimiterOperator<>(timeLimiter); }
@Test public void timeoutNeverUsingFlux() { given(timeLimiter.getTimeLimiterConfig()) .willReturn(toConfig(Duration.ofMillis(1))); Flux<?> flux = Flux.never() .transformDeferred(TimeLimiterOperator.of(timeLimiter)); StepVerifier.create(flux) .expectError(TimeoutException.class) .verify(Duration.ofMinutes(1)); then(timeLimiter).should() .onError(any(TimeoutException.class)); }
public String getCompressCommand(final Path workdir, final List<Path> files) { final StringBuilder archive = new StringBuilder(); if(files.size() == 1) { archive.append(this.escape(files.get(0).getAbsolute())); } else { // Use default filename archive.append(this.escape(files.get(0).getParent().getAbsolute())).append(Path.DELIMITER).append("Archive"); } final List<String> command = new ArrayList<>(); for(Path path : files) { command.add(this.escape(PathRelativizer.relativize(workdir.getAbsolute(), path.getAbsolute()))); } return MessageFormat.format(preferences.getProperty(String.format("archive.command.create.%s", this.getIdentifier())), archive.toString(), StringUtils.join(command, " "), this.escape(workdir.getAbsolute())); }
@Test public void testCommand() throws FactoryException { assertEquals("cd /a; zip -qr /a/b.zip b", Archive.ZIP.getCompressCommand(new Path("/a", EnumSet.of(Path.Type.directory)), Collections.singletonList(new Path("/a/b", EnumSet.of(Path.Type.file))))); assertEquals("cd /a; zip -qr /a/b\\ c.zip b\\ c", Archive.ZIP.getCompressCommand(new Path("/a", EnumSet.of(Path.Type.directory)), Collections.singletonList(new Path("/a/b c", EnumSet.of(Path.Type.file))))); assertEquals("cd /a; tar -cpPf /a/b.tar b", Archive.TAR.getCompressCommand(new Path("/a", EnumSet.of(Path.Type.directory)), Collections.singletonList(new Path("/a/b", EnumSet.of(Path.Type.file))))); }
static public boolean createMissingParentDirectories(File file) { File parent = file.getParentFile(); if (parent == null) { // Parent directory not specified, therefore it's a request to // create nothing. Done! ;) return true; } // File.mkdirs() creates the parent directories only if they don't // already exist; and it's okay if they do. parent.mkdirs(); return parent.exists(); }
@Test public void checkParentCreationInquiryAndSubsequentCreation() { File file = new File(CoreTestConstants.OUTPUT_DIR_PREFIX+"/fu"+diff+"/testing.txt"); // these will be deleted later cleanupList.add(file); cleanupList.add(file.getParentFile()); assertFalse(file.getParentFile().exists()); assertTrue(FileUtil.createMissingParentDirectories(file)); assertTrue(file.getParentFile().exists()); }
void appendMergeClause(StringBuilder sb) { sb.append("MERGE INTO "); dialect.quoteIdentifier(sb, jdbcTable.getExternalNameList()); sb.append(" TARGET USING (SELECT"); Iterator<String> it = jdbcTable.dbFieldNames().iterator(); while (it.hasNext()) { String dbFieldName = it.next(); sb.append(" ? as "); dialect.quoteIdentifier(sb, dbFieldName); if (it.hasNext()) { sb.append(','); } } sb.append(" FROM dual) SOURCE"); sb.append(" ON ("); appendPrimaryKeys(sb); sb.append(")"); }
@Test public void appendMergeClause() { OracleUpsertQueryBuilder builder = new OracleUpsertQueryBuilder(jdbcTable, dialect); StringBuilder sb = new StringBuilder(); builder.appendMergeClause(sb); String mergeClause = sb.toString(); assertThat(mergeClause).isEqualTo( "MERGE INTO \"table1\" TARGET " + "USING (SELECT ? as \"field1\", ? as \"field2\" FROM dual) " + "SOURCE ON (TARGET.\"pk1\" = SOURCE.\"pk1\" AND TARGET.\"pk2\" = SOURCE.\"pk2\")" ); }
@Description("IP prefix for a given IP address and subnet size") @ScalarFunction("ip_prefix") @LiteralParameters("x") @SqlType(StandardTypes.IPPREFIX) public static Slice stringIpPrefix(@SqlType("varchar(x)") Slice slice, @SqlType(StandardTypes.BIGINT) long subnetSize) { return ipPrefix(castFromVarcharToIpAddress(slice), subnetSize); }
@Test public void testStringIpPrefix() { assertFunction("IP_PREFIX('1.2.3.4', 24)", IPPREFIX, "1.2.3.0/24"); assertFunction("IP_PREFIX('1.2.3.4', 32)", IPPREFIX, "1.2.3.4/32"); assertFunction("IP_PREFIX('1.2.3.4', 0)", IPPREFIX, "0.0.0.0/0"); assertFunction("IP_PREFIX('::ffff:1.2.3.4', 24)", IPPREFIX, "1.2.3.0/24"); assertFunction("IP_PREFIX('64:ff9b::17', 64)", IPPREFIX, "64:ff9b::/64"); assertFunction("IP_PREFIX('64:ff9b::17', 127)", IPPREFIX, "64:ff9b::16/127"); assertFunction("IP_PREFIX('64:ff9b::17', 128)", IPPREFIX, "64:ff9b::17/128"); assertFunction("IP_PREFIX('64:ff9b::17', 0)", IPPREFIX, "::/0"); assertInvalidFunction("IP_PREFIX('::ffff:1.2.3.4', -1)", "IPv4 subnet size must be in range [0, 32]"); assertInvalidFunction("IP_PREFIX('::ffff:1.2.3.4', 33)", "IPv4 subnet size must be in range [0, 32]"); assertInvalidFunction("IP_PREFIX('64:ff9b::10', -1)", "IPv6 subnet size must be in range [0, 128]"); assertInvalidFunction("IP_PREFIX('64:ff9b::10', 129)", "IPv6 subnet size must be in range [0, 128]"); assertInvalidCast("IP_PREFIX('localhost', 24)", "Cannot cast value to IPADDRESS: localhost"); assertInvalidCast("IP_PREFIX('64::ff9b::10', 24)", "Cannot cast value to IPADDRESS: 64::ff9b::10"); assertInvalidCast("IP_PREFIX('64:face:book::10', 24)", "Cannot cast value to IPADDRESS: 64:face:book::10"); assertInvalidCast("IP_PREFIX('123.456.789.012', 24)", "Cannot cast value to IPADDRESS: 123.456.789.012"); }
@Override public final void isEqualTo(@Nullable Object other) { if (Objects.equal(actual, other)) { return; } // Fail but with a more descriptive message: if (actual == null || !(other instanceof Map)) { super.isEqualTo(other); return; } containsEntriesInAnyOrder((Map<?, ?>) other, /* allowUnexpected= */ false); }
@Test public void isEqualToFailureExtraMissingAndDiffering() { ImmutableMap<String, Integer> actual = ImmutableMap.of("jan", 1, "feb", 2, "march", 3); ImmutableMap<String, Integer> expectedMap = ImmutableMap.of("jan", 1, "april", 4, "march", 5); expectFailureWhenTestingThat(actual).isEqualTo(expectedMap); assertFailureKeys( "keys with wrong values", "for key", "expected value", "but got value", "missing keys", "for key", "expected value", "unexpected keys", "for key", "unexpected value", "---", "expected", "but was"); assertFailureValueIndexed("for key", 0, "march"); assertFailureValueIndexed("expected value", 0, "5"); assertFailureValue("but got value", "3"); assertFailureValueIndexed("for key", 1, "april"); assertFailureValueIndexed("expected value", 1, "4"); assertFailureValueIndexed("for key", 2, "feb"); assertFailureValue("unexpected value", "2"); assertFailureValue("expected", "{jan=1, april=4, march=5}"); assertFailureValue("but was", "{jan=1, feb=2, march=3}"); }
@Override @MethodNotAvailable public void putAll(Map<K, V> map) { throw new MethodNotAvailableException(); }
@Test(expected = MethodNotAvailableException.class) public void testPutAll() { Map<Integer, String> expectedResult = new HashMap<>(); expectedResult.put(23, "value-23"); expectedResult.put(42, "value-42"); adapter.putAll(expectedResult); }
@Override protected Class<?> loadClass(String name, boolean resolve) throws ClassNotFoundException { Class<?> cls = null; for (ClassLoader classLoader : classLoaders) { try { cls = classLoader.loadClass(name); } catch (ClassNotFoundException e) { LOG.trace("Class " + name + " not found", e); } if (cls != null) { break; } } if (cls == null) { throw new ClassNotFoundException("Class " + name + " not found."); } return cls; }
@Test public void loadReturnsClassFromParentClassLoader() throws Exception { final ClassLoader parent = getClass().getClassLoader(); final ChainingClassLoader chainingClassLoader = new ChainingClassLoader(parent); final String className = "org.graylog2.shared.plugins.ChainingClassLoaderTest$Dummy"; final Class<?> aClass = chainingClassLoader.loadClass(className); assertThat(aClass).isNotNull(); assertThat(aClass.getSimpleName()).isEqualTo("Dummy"); assertThat(aClass.getClassLoader()).isSameAs(parent); }
@Override public List<Intent> compile(SinglePointToMultiPointIntent intent, List<Intent> installable) { Set<Link> links = new HashSet<>(); final boolean allowMissingPaths = intentAllowsPartialFailure(intent); boolean hasPaths = false; boolean missingSomePaths = false; for (ConnectPoint egressPoint : intent.egressPoints()) { if (egressPoint.deviceId().equals(intent.ingressPoint().deviceId())) { // Do not need to look for paths, since ingress and egress // devices are the same. if (deviceService.isAvailable(egressPoint.deviceId())) { hasPaths = true; } else { missingSomePaths = true; } continue; } Path path = getPath(intent, intent.ingressPoint().deviceId(), egressPoint.deviceId()); if (path != null) { hasPaths = true; links.addAll(path.links()); } else { missingSomePaths = true; } } // Allocate bandwidth if a bandwidth constraint is set ConnectPoint ingressCP = intent.filteredIngressPoint().connectPoint(); List<ConnectPoint> egressCPs = intent.filteredEgressPoints().stream() .map(fcp -> fcp.connectPoint()) .collect(Collectors.toList()); List<ConnectPoint> pathCPs = links.stream() .flatMap(l -> Stream.of(l.src(), l.dst())) .collect(Collectors.toList()); pathCPs.add(ingressCP); pathCPs.addAll(egressCPs); allocateBandwidth(intent, pathCPs); if (!hasPaths) { throw new IntentException("Cannot find any path between ingress and egress points."); } else if (!allowMissingPaths && missingSomePaths) { throw new IntentException("Missing some paths between ingress and egress points."); } Intent result = LinkCollectionIntent.builder() .appId(intent.appId()) .key(intent.key()) .selector(intent.selector()) .treatment(intent.treatment()) .links(links) .filteredIngressPoints(ImmutableSet.of(intent.filteredIngressPoint())) .filteredEgressPoints(intent.filteredEgressPoints()) .priority(intent.priority()) .applyTreatmentOnEgress(true) .constraints(intent.constraints()) .resourceGroup(intent.resourceGroup()) .build(); return Collections.singletonList(result); }
@Test public void testBandwidthConstrainedIntentAllocation() { final double bpsTotal = 1000.0; final double bpsToReserve = 100.0; ContinuousResource resourceSw1P1 = Resources.continuous(DID_1, PORT_1, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw1P2 = Resources.continuous(DID_1, PORT_2, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw2P1 = Resources.continuous(DID_2, PORT_1, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw2P2 = Resources.continuous(DID_2, PORT_2, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw3P1 = Resources.continuous(DID_3, PORT_1, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw3P2 = Resources.continuous(DID_3, PORT_2, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw3P3 = Resources.continuous(DID_3, PORT_3, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw4P1 = Resources.continuous(DID_4, PORT_1, Bandwidth.class) .resource(bpsToReserve); ContinuousResource resourceSw4P2 = Resources.continuous(DID_4, PORT_2, Bandwidth.class) .resource(bpsToReserve); String[] hops = {DID_3.toString()}; final ResourceService resourceService = MockResourceService.makeCustomBandwidthResourceService(bpsTotal); final List<Constraint> constraints = Collections.singletonList(new BandwidthConstraint(Bandwidth.bps(bpsToReserve))); FilteredConnectPoint ingress = new FilteredConnectPoint(new ConnectPoint(DID_4, PORT_1)); Set<FilteredConnectPoint> egress = ImmutableSet.of( new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_2)), new FilteredConnectPoint(new ConnectPoint(DID_2, PORT_2))); TrafficSelector ipPrefixSelector = DefaultTrafficSelector.builder() .matchIPDst(IpPrefix.valueOf("192.168.100.0/24")) .build(); SinglePointToMultiPointIntent intent = makeIntent(ingress, egress, ipPrefixSelector, constraints); SinglePointToMultiPointIntentCompiler compiler = makeCompiler(null, new IntentTestsMocks.FixedMP2MPMockPathService(hops), resourceService); compiler.compile(intent, null); Key intentKey = intent.key(); ResourceAllocation rA1 = new ResourceAllocation(resourceSw1P1, intentKey); ResourceAllocation rA2 = new ResourceAllocation(resourceSw1P2, intentKey); ResourceAllocation rA3 = new ResourceAllocation(resourceSw2P1, intentKey); ResourceAllocation rA4 = new ResourceAllocation(resourceSw2P2, intentKey); ResourceAllocation rA5 = new ResourceAllocation(resourceSw3P1, intentKey); ResourceAllocation rA6 = new ResourceAllocation(resourceSw3P2, intentKey); ResourceAllocation rA7 = new ResourceAllocation(resourceSw3P3, intentKey); ResourceAllocation rA8 = new ResourceAllocation(resourceSw4P1, intentKey); ResourceAllocation rA9 = new ResourceAllocation(resourceSw4P2, intentKey); Set<ResourceAllocation> expectedResourceAllocations = ImmutableSet.of(rA1, rA2, rA3, rA4, rA5, rA6, rA7, rA8, rA9); Set<ResourceAllocation> resourceAllocations = ImmutableSet.copyOf(resourceService.getResourceAllocations(intentKey)); assertThat(resourceAllocations, hasSize(9)); assertEquals(expectedResourceAllocations, resourceAllocations); }
public RemotingCommand resetOffset(ChannelHandlerContext ctx, RemotingCommand request) throws RemotingCommandException { final ResetOffsetRequestHeader requestHeader = (ResetOffsetRequestHeader) request.decodeCommandCustomHeader(ResetOffsetRequestHeader.class); LOGGER.info("[reset-offset] reset offset started by {}. topic={}, group={}, timestamp={}, isForce={}", RemotingHelper.parseChannelRemoteAddr(ctx.channel()), requestHeader.getTopic(), requestHeader.getGroup(), requestHeader.getTimestamp(), requestHeader.isForce()); if (this.brokerController.getBrokerConfig().isUseServerSideResetOffset()) { String topic = requestHeader.getTopic(); String group = requestHeader.getGroup(); int queueId = requestHeader.getQueueId(); long timestamp = requestHeader.getTimestamp(); Long offset = requestHeader.getOffset(); return resetOffsetInner(topic, group, queueId, timestamp, offset); } boolean isC = false; LanguageCode language = request.getLanguage(); switch (language) { case CPP: isC = true; break; } return this.brokerController.getBroker2Client().resetOffset(requestHeader.getTopic(), requestHeader.getGroup(), requestHeader.getTimestamp(), requestHeader.isForce(), isC); }
@Test public void testResetOffset() throws RemotingCommandException { ResetOffsetRequestHeader requestHeader = createRequestHeader("topic","group",-1,false,-1,-1); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.INVOKE_BROKER_TO_RESET_OFFSET, requestHeader); request.makeCustomHeaderToNet(); RemotingCommand response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.TOPIC_NOT_EXIST); this.brokerController.getTopicConfigManager().getTopicConfigTable().put("topic", new TopicConfig("topic")); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUBSCRIPTION_GROUP_NOT_EXIST); this.brokerController.getSubscriptionGroupManager().getSubscriptionGroupTable().put("group", new SubscriptionGroupConfig()); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); requestHeader.setQueueId(0); request = RemotingCommand.createRequestCommand(RequestCode.INVOKE_BROKER_TO_RESET_OFFSET, requestHeader); request.makeCustomHeaderToNet(); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SUCCESS); requestHeader.setOffset(2L); request = RemotingCommand.createRequestCommand(RequestCode.INVOKE_BROKER_TO_RESET_OFFSET, requestHeader); request.makeCustomHeaderToNet(); response = adminBrokerProcessor.processRequest(handlerContext, request); assertThat(response.getCode()).isEqualTo(ResponseCode.SYSTEM_ERROR); }
@Override protected String getFolderSuffix() { return mUfsConf.getString(PropertyKey.UNDERFS_GCS_DIRECTORY_SUFFIX); }
@Test public void testGetFolderSuffix() { Assert.assertEquals("/", mGCSUnderFileSystem.getFolderSuffix()); }
static byte[] getBytes(ByteBuffer buffer) { int remaining = buffer.remaining(); if (buffer.hasArray() && buffer.arrayOffset() == 0) { // do not copy data if the ByteBuffer is a simple wrapper over an array byte[] array = buffer.array(); if (array.length == remaining) { return array; } } buffer.mark(); byte[] avroEncodedData = new byte[remaining]; buffer.get(avroEncodedData); buffer.reset(); return avroEncodedData; }
@Test public void testGetBytesOffsetZeroDifferentLen() { byte[] originalArray = {1, 2, 3}; ByteBuffer wrapped = ByteBuffer.wrap(originalArray, 1, 2); assertEquals(0, wrapped.arrayOffset()); assertEquals(2, wrapped.remaining()); byte[] result = ByteBufferSchemaWrapper.getBytes(wrapped); assertNotSame(result, originalArray); assertArrayEquals(result, new byte[] {2,3}); }
public static double minimize(DifferentiableMultivariateFunction func, double[] x, double gtol, int maxIter) { if (gtol <= 0.0) { throw new IllegalArgumentException("Invalid gradient tolerance: " + gtol); } if (maxIter <= 0) { throw new IllegalArgumentException("Invalid maximum number of iterations: " + maxIter); } double den, fac, fad, fae, sumdg, sumxi, temp, test; int n = x.length; double[] dg = new double[n]; double[] g = new double[n]; double[] hdg = new double[n]; double[] xnew = new double[n]; double[] xi = new double[n]; double[][] hessin = new double[n][n]; // Calculate starting function value and gradient and initialize the // inverse Hessian to the unit matrix. double f = func.g(x, g); logger.info(String.format("BFGS: initial function value: %.5f", f)); for (int i = 0; i < n; i++) { hessin[i][i] = 1.0; // Initialize line direction. xi[i] = -g[i]; } double stpmax = STPMX * max(norm(x), n); for (int iter = 1; iter <= maxIter; iter++) { // The new function evaluation occurs in line search. f = linesearch(func, x, f, g, xi, xnew, stpmax); if (iter % 100 == 0) { logger.info(String.format("BFGS: the function value after %3d iterations: %.5f", iter, f)); } // update the line direction and current point. for (int i = 0; i < n; i++) { xi[i] = xnew[i] - x[i]; x[i] = xnew[i]; } // Test for convergence on x. test = 0.0; for (int i = 0; i < n; i++) { temp = abs(xi[i]) / max(abs(x[i]), 1.0); if (temp > test) { test = temp; } } if (test < TOLX) { logger.info(String.format("BFGS converges on x after %d iterations: %.5f", iter, f)); return f; } System.arraycopy(g, 0, dg, 0, n); func.g(x, g); // Test for convergence on zero gradient. den = max(f, 1.0); test = 0.0; for (int i = 0; i < n; i++) { temp = abs(g[i]) * max(abs(x[i]), 1.0) / den; if (temp > test) { test = temp; } } if (test < gtol) { logger.info(String.format("BFGS converges on gradient after %d iterations: %.5f", iter, f)); return f; } for (int i = 0; i < n; i++) { dg[i] = g[i] - dg[i]; } for (int i = 0; i < n; i++) { hdg[i] = 0.0; for (int j = 0; j < n; j++) { hdg[i] += hessin[i][j] * dg[j]; } } fac = fae = sumdg = sumxi = 0.0; for (int i = 0; i < n; i++) { fac += dg[i] * xi[i]; fae += dg[i] * hdg[i]; sumdg += dg[i] * dg[i]; sumxi += xi[i] * xi[i]; } // Skip update if fac is not sufficiently positive. if (fac > sqrt(EPSILON * sumdg * sumxi)) { fac = 1.0 / fac; fad = 1.0 / fae; // The vector that makes BFGS different from DFP. for (int i = 0; i < n; i++) { dg[i] = fac * xi[i] - fad * hdg[i]; } // BFGS updating formula. for (int i = 0; i < n; i++) { for (int j = i; j < n; j++) { hessin[i][j] += fac * xi[i] * xi[j] - fad * hdg[i] * hdg[j] + fae * dg[i] * dg[j]; hessin[j][i] = hessin[i][j]; } } } // Calculate the next direction to go. Arrays.fill(xi, 0.0); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { xi[i] -= hessin[i][j] * g[j]; } } } logger.warn(String.format("BFGS reaches maximum %d iterations: %.5f", maxIter, f)); return f; }
@Test public void testLBFGSB() { System.out.println("L-BFGS-B"); double[] x = new double[100]; double[] l = new double[100]; double[] u = new double[100]; for (int j = 1; j <= x.length; j += 2) { x[j - 1] = -1.2; x[j] = 1.2; l[j - 1] = 1.2; u[j - 1] = 2.5; l[j] = -2.5; u[j] = 0.8; } double result = BFGS.minimize(func, 5, x, l, u, 1E-8, 500); System.out.println(Arrays.toString(x)); assertEquals(2050, result, 1E-7); }
public static ULocalVarIdent create(CharSequence identifier) { return new AutoValue_ULocalVarIdent(StringName.of(identifier)); }
@Test public void equality() { new EqualsTester() .addEqualityGroup(ULocalVarIdent.create("foo")) .addEqualityGroup(ULocalVarIdent.create("bar")) .testEquals(); }
List<Tree> load() { return load( null, null ); }
@Test public void testLoad() { List<Tree> trees = fileController.load( ProviderFilterType.ALL_PROVIDERS.toString() ); Assert.assertEquals( 1, trees.size() ); Assert.assertEquals( TestFileProvider.TYPE, trees.get( 0 ).getProvider() ); }
public static List<Snap> lookup(List<GHPoint> points, EdgeFilter edgeFilter, LocationIndex locationIndex, Params params) { // todo: no snap preventions for round trip so far if (points.size() != 1) throw new IllegalArgumentException("For round trip calculation exactly one point is required"); final GHPoint start = points.get(0); TourStrategy strategy = new MultiPointTour(new Random(params.seed), params.distanceInMeter, params.roundTripPointCount, params.initialHeading); List<Snap> snaps = new ArrayList<>(2 + strategy.getNumberOfGeneratedPoints()); Snap startSnap = locationIndex.findClosest(start.lat, start.lon, edgeFilter); if (!startSnap.isValid()) throw new PointNotFoundException("Cannot find point 0: " + start, 0); snaps.add(startSnap); GHPoint last = start; for (int i = 0; i < strategy.getNumberOfGeneratedPoints(); i++) { double heading = strategy.getHeadingForIteration(i); Snap result = generateValidPoint(last, strategy.getDistanceForIteration(i), heading, edgeFilter, locationIndex, params.maxRetries); last = result.getSnappedPoint(); snaps.add(result); } snaps.add(startSnap); return snaps; }
@Test public void lookup_throwsIfNumberOfPointsNotOne() { assertThrows(IllegalArgumentException.class, () -> RoundTripRouting.lookup(Arrays.asList(ghPoint1, ghPoint2), new FiniteWeightFilter(weighting), null, new RoundTripRouting.Params())); }
public TaskContext(JobConf conf, Class<?> iKClass, Class<?> iVClass, Class<?> oKClass, Class<?> oVClass, TaskReporter reporter, TaskAttemptID id) { this.conf = conf; this.iKClass = iKClass; this.iVClass = iVClass; this.oKClass = oKClass; this.oVClass = oVClass; this.reporter = reporter; this.taskAttemptID = id; }
@Test public void testTaskContext() { TaskContext context = new TaskContext(null, null, null, null, null, null, null); context.setInputKeyClass(IntWritable.class); Assert.assertEquals(IntWritable.class.getName(), context.getInputKeyClass ().getName()); context.setInputValueClass(Text.class); Assert.assertEquals(Text.class.getName(), context.getInputValueClass() .getName()); context.setOutputKeyClass(LongWritable.class); Assert.assertEquals(LongWritable.class.getName(), context .getOutputKeyClass().getName()); context.setOutputValueClass(FloatWritable.class); Assert.assertEquals(FloatWritable.class.getName(), context .getOutputValueClass().getName()); }
@Override public void open(Configuration parameters) throws Exception { this.rateLimiterTriggeredCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.RATE_LIMITER_TRIGGERED); this.concurrentRunThrottledCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.CONCURRENT_RUN_THROTTLED); this.nothingToTriggerCounter = getRuntimeContext() .getMetricGroup() .addGroup( TableMaintenanceMetrics.GROUP_KEY, TableMaintenanceMetrics.GROUP_VALUE_DEFAULT) .counter(TableMaintenanceMetrics.NOTHING_TO_TRIGGER); this.triggerCounters = taskNames.stream() .map( name -> getRuntimeContext() .getMetricGroup() .addGroup(TableMaintenanceMetrics.GROUP_KEY, name) .counter(TableMaintenanceMetrics.TRIGGERED)) .collect(Collectors.toList()); this.nextEvaluationTimeState = getRuntimeContext() .getState(new ValueStateDescriptor<>("triggerManagerNextTriggerTime", Types.LONG)); this.accumulatedChangesState = getRuntimeContext() .getListState( new ListStateDescriptor<>( "triggerManagerAccumulatedChange", TypeInformation.of(TableChange.class))); this.lastTriggerTimesState = getRuntimeContext() .getListState(new ListStateDescriptor<>("triggerManagerLastTriggerTime", Types.LONG)); tableLoader.open(); }
@Test void testPosDeleteFileCount() throws Exception { TriggerManager manager = manager( sql.tableLoader(TABLE_NAME), new TriggerEvaluator.Builder().posDeleteFileCount(3).build()); try (KeyedOneInputStreamOperatorTestHarness<Boolean, TableChange, Trigger> testHarness = harness(manager)) { testHarness.open(); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 0); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(2).build(), 1); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(3).build(), 2); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(10).build(), 3); // No trigger in this case addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 3); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 3); addEventAndCheckResult(testHarness, TableChange.builder().posDeleteFileCount(1).build(), 4); } }
public MetricSampleAggregationResult<String, PartitionEntity> aggregate(Cluster cluster, long now, OperationProgress operationProgress) throws NotEnoughValidWindowsException { ModelCompletenessRequirements requirements = new ModelCompletenessRequirements(1, 0.0, false); return aggregate(cluster, -1L, now, requirements, operationProgress); }
@Test public void testFallbackToAvgAdjacent() throws NotEnoughValidWindowsException { KafkaCruiseControlConfig config = new KafkaCruiseControlConfig(getLoadMonitorProperties()); TopicPartition anotherTopicPartition = new TopicPartition("AnotherTopic", 1); PartitionEntity anotherPartitionEntity = new PartitionEntity(anotherTopicPartition); Metadata metadata = getMetadata(Arrays.asList(TP, anotherTopicPartition)); KafkaPartitionMetricSampleAggregator metricSampleAggregator = new KafkaPartitionMetricSampleAggregator(config, metadata); // Only give one sample to the aggregator for previous period. populateSampleAggregator(NUM_WINDOWS, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator); // Create let (NUM_WINDOWS + 1) have enough samples. CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS, WINDOW_MS, KafkaMetricDef.commonMetricDef()); // Let a window exist but not containing samples for partition 0 CruiseControlUnitTestUtils.populateSampleAggregator(1, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, anotherPartitionEntity, NUM_WINDOWS + 1, WINDOW_MS, KafkaMetricDef .commonMetricDef()); // Let the rest of the window has enough samples. CruiseControlUnitTestUtils.populateSampleAggregator(2, MIN_SAMPLES_PER_WINDOW, metricSampleAggregator, PE, NUM_WINDOWS + 2, WINDOW_MS, KafkaMetricDef.commonMetricDef()); MetricSampleAggregationResult<String, PartitionEntity> result = metricSampleAggregator.aggregate(metadata.fetch(), NUM_WINDOWS * WINDOW_MS * 2, new OperationProgress()); int numWindows = result.valuesAndExtrapolations().get(PE).metricValues().length(); assertEquals(NUM_WINDOWS, numWindows); int numExtrapolations = 0; for (Map.Entry<Integer, Extrapolation> entry : result.valuesAndExtrapolations().get(PE).extrapolations().entrySet()) { assertEquals(Extrapolation.AVG_ADJACENT, entry.getValue()); numExtrapolations++; } assertEquals(1, numExtrapolations); }
@Override public void cycle() { if (!getConfig().isWritable()) { LOG.debug("Not cycling non-writable index set <{}> ({})", getConfig().id(), getConfig().title()); return; } int oldTargetNumber; try { oldTargetNumber = getNewestIndexNumber(); } catch (NoTargetIndexException ex) { oldTargetNumber = -1; } final int newTargetNumber = oldTargetNumber + 1; final String newTarget = buildIndexName(newTargetNumber); final String oldTarget = buildIndexName(oldTargetNumber); if (oldTargetNumber == -1) { LOG.info("Cycling from <none> to <{}>.", newTarget); } else { LOG.info("Cycling from <{}> to <{}>.", oldTarget, newTarget); } // Create new index. LOG.info("Creating target index <{}>.", newTarget); if (!indices.create(newTarget, this)) { throw new RuntimeException("Could not create new target index <" + newTarget + ">."); } LOG.info("Waiting for allocation of index <{}>.", newTarget); final HealthStatus healthStatus = indices.waitForRecovery(newTarget); checkIfHealthy(healthStatus, (status) -> new RuntimeException("New target index did not become healthy (target index: <" + newTarget + ">)")); LOG.debug("Health status of index <{}>: {}", newTarget, healthStatus); addDeflectorIndexRange(newTarget); LOG.info("Index <{}> has been successfully allocated.", newTarget); // Point deflector to new index. final String indexAlias = getWriteIndexAlias(); LOG.info("Pointing index alias <{}> to new index <{}>.", indexAlias, newTarget); final Activity activity = new Activity(IndexSet.class); if (oldTargetNumber == -1) { // Only pointing, not cycling. pointTo(newTarget); activity.setMessage("Cycled index alias <" + indexAlias + "> from <none> to <" + newTarget + ">."); } else { // Re-pointing from existing old index to the new one. LOG.debug("Switching over index alias <{}>.", indexAlias); pointTo(newTarget, oldTarget); setIndexReadOnlyAndCalculateRange(oldTarget); activity.setMessage("Cycled index alias <" + indexAlias + "> from <" + oldTarget + "> to <" + newTarget + ">."); } LOG.info("Successfully pointed index alias <{}> to index <{}>.", indexAlias, newTarget); activityWriter.write(activity); auditEventSender.success(AuditActor.system(nodeId), ES_WRITE_INDEX_UPDATE, ImmutableMap.of("indexName", newTarget)); }
@Test public void cycleSetsOldIndexToReadOnly() throws SystemJobConcurrencyException { final String newIndexName = "graylog_1"; final String oldIndexName = "graylog_0"; final Map<String, Set<String>> indexNameAliases = ImmutableMap.of( oldIndexName, Collections.singleton("graylog_deflector")); when(indices.getIndexNamesAndAliases(anyString())).thenReturn(indexNameAliases); when(indices.create(newIndexName, mongoIndexSet)).thenReturn(true); when(indices.waitForRecovery(newIndexName)).thenReturn(HealthStatus.Green); final SetIndexReadOnlyAndCalculateRangeJob rangeJob = mock(SetIndexReadOnlyAndCalculateRangeJob.class); when(jobFactory.create(oldIndexName)).thenReturn(rangeJob); final MongoIndexSet mongoIndexSet = createIndexSet(config); mongoIndexSet.cycle(); verify(jobFactory, times(1)).create(oldIndexName); verify(systemJobManager, times(1)).submitWithDelay(rangeJob, 30L, TimeUnit.SECONDS); }
public static <K, V> StateSerdes<K, V> withBuiltinTypes( final String topic, final Class<K> keyClass, final Class<V> valueClass) { return new StateSerdes<>(topic, Serdes.serdeFrom(keyClass), Serdes.serdeFrom(valueClass)); }
@Test public void shouldThrowIfValueClassIsNullForBuiltinTypes() { assertThrows(NullPointerException.class, () -> StateSerdes.withBuiltinTypes("anyName", byte[].class, null)); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "logs/{executionId}") @Operation(tags = {"Logs"}, summary = "Get logs for a specific execution, taskrun or task") public List<LogEntry> findByExecution( @Parameter(description = "The execution id") @PathVariable String executionId, @Parameter(description = "The min log level filter") @Nullable @QueryValue Level minLevel, @Parameter(description = "The taskrun id") @Nullable @QueryValue String taskRunId, @Parameter(description = "The task id") @Nullable @QueryValue String taskId, @Parameter(description = "The attempt number") @Nullable @QueryValue Integer attempt ) { if (taskId != null) { return logRepository.findByExecutionIdAndTaskId(tenantService.resolveTenant(), executionId, taskId, minLevel); } else if (taskRunId != null) { if (attempt != null) { return logRepository.findByExecutionIdAndTaskRunIdAndAttempt(tenantService.resolveTenant(), executionId, taskRunId, minLevel, attempt); } return logRepository.findByExecutionIdAndTaskRunId(tenantService.resolveTenant(), executionId, taskRunId, minLevel); } else { return logRepository.findByExecutionId(tenantService.resolveTenant(), executionId, minLevel); } }
@SuppressWarnings("unchecked") @Test void findByExecution() { LogEntry log1 = logEntry(Level.INFO); LogEntry log2 = log1.toBuilder().message("another message").build(); LogEntry log3 = logEntry(Level.DEBUG); logRepository.save(log1); logRepository.save(log2); logRepository.save(log3); List<LogEntry> logs = client.toBlocking().retrieve( HttpRequest.GET("/api/v1/logs/" + log1.getExecutionId()), Argument.of(List.class, LogEntry.class) ); assertThat(logs.size(), is(2)); assertThat(logs.getFirst().getExecutionId(), is(log1.getExecutionId())); assertThat(logs.get(1).getExecutionId(), is(log1.getExecutionId())); }
@Udf public String concat(@UdfParameter final String... jsonStrings) { if (jsonStrings == null) { return null; } final List<JsonNode> nodes = new ArrayList<>(jsonStrings.length); boolean allObjects = true; for (final String jsonString : jsonStrings) { if (jsonString == null) { return null; } final JsonNode node = UdfJsonMapper.parseJson(jsonString); if (node.isMissingNode()) { return null; } if (allObjects && !node.isObject()) { allObjects = false; } nodes.add(node); } JsonNode result = nodes.get(0); if (allObjects) { for (int i = 1; i < nodes.size(); i++) { result = concatObjects((ObjectNode) result, (ObjectNode) nodes.get(i)); } } else { for (int i = 1; i < nodes.size(); i++) { result = concatArrays(toArrayNode(result), toArrayNode(nodes.get(i))); } } return UdfJsonMapper.writeValueAsJson(result); }
@Test public void shouldMergeNestedArrays() { // When: final String result = udf.concat("[1, [2]]", "[[[3]], [[[4]]]]"); // Then: assertEquals("[1,[2],[[3]],[[[4]]]]", result); }
static List<String> parseYarn(String[] args) { String[] params = new String[args.length - 1]; System.arraycopy(args, 1, params, 0, params.length); CommandLine commandLine = parse(YARN_OPTIONS, params); if (commandLine.hasOption(OPTION_HELP.getOpt())) { printYarnHelp(); System.exit(0); } List<String> options = new ArrayList<>(); options.add(args[0]); options.add("-m"); options.add("yarn-cluster"); constructYarnOption(options, OPTION_JM_MEMORY, commandLine); constructYarnOption(options, OPTION_NAME, commandLine); constructYarnOption(options, OPTION_QUEUE, commandLine); constructYarnOption(options, OPTION_SLOTS, commandLine); constructYarnOption(options, OPTION_TM_MEMORY, commandLine); return options; }
@Test void testParseYarnWithOptions() { String[] args = {"yarn", "-jm", "1024m", "-tm", "4096m"}; List<String> commandOptions = PythonShellParser.parseYarn(args); String[] expectedCommandOptions = { "yarn", "-m", "yarn-cluster", "-yjm", "1024m", "-ytm", "4096m" }; assertThat(commandOptions.toArray()).isEqualTo(expectedCommandOptions); }
public Matcher parse(String xpath) { if (xpath.equals("/text()")) { return TextMatcher.INSTANCE; } else if (xpath.equals("/node()")) { return NodeMatcher.INSTANCE; } else if (xpath.equals("/descendant::node()") || xpath.equals("/descendant:node()")) { // for compatibility return new CompositeMatcher(TextMatcher.INSTANCE, new ChildMatcher(new SubtreeMatcher(NodeMatcher.INSTANCE))); } else if (xpath.equals("/@*")) { return AttributeMatcher.INSTANCE; } else if (xpath.length() == 0) { return ElementMatcher.INSTANCE; } else if (xpath.startsWith("/@")) { String name = xpath.substring(2); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedAttributeMatcher(prefixes.get(prefix), name); } else { return Matcher.FAIL; } } else if (xpath.startsWith("/*")) { return new ChildMatcher(parse(xpath.substring(2))); } else if (xpath.startsWith("///")) { return Matcher.FAIL; } else if (xpath.startsWith("//")) { return new SubtreeMatcher(parse(xpath.substring(1))); } else if (xpath.startsWith("/")) { int slash = xpath.indexOf('/', 1); if (slash == -1) { slash = xpath.length(); } String name = xpath.substring(1, slash); String prefix = null; int colon = name.indexOf(':'); if (colon != -1) { prefix = name.substring(0, colon); name = name.substring(colon + 1); } if (prefixes.containsKey(prefix)) { return new NamedElementMatcher(prefixes.get(prefix), name, parse(xpath.substring(slash))); } else { return Matcher.FAIL; } } else { return Matcher.FAIL; } }
@Test public void testAnyElement() { Matcher matcher = parser.parse("/*"); assertFalse(matcher.matchesText()); assertFalse(matcher.matchesElement()); assertFalse(matcher.matchesAttribute(null, "name")); assertFalse(matcher.matchesAttribute(NS, "name")); assertFalse(matcher.matchesAttribute(NS, "eman")); matcher = matcher.descend(NS, "name"); assertFalse(matcher.matchesText()); assertTrue(matcher.matchesElement()); assertFalse(matcher.matchesAttribute(null, "name")); assertFalse(matcher.matchesAttribute(NS, "name")); assertFalse(matcher.matchesAttribute(NS, "eman")); assertEquals(Matcher.FAIL, matcher.descend(NS, "name")); }
@VisibleForTesting public void validateDictDataValueUnique(Long id, String dictType, String value) { DictDataDO dictData = dictDataMapper.selectByDictTypeAndValue(dictType, value); if (dictData == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的字典数据 if (id == null) { throw exception(DICT_DATA_VALUE_DUPLICATE); } if (!dictData.getId().equals(id)) { throw exception(DICT_DATA_VALUE_DUPLICATE); } }
@Test public void testValidateDictDataValueUnique_success() { // 调用,成功 dictDataService.validateDictDataValueUnique(randomLongId(), randomString(), randomString()); }
@Override public String getKModulePackageName() { return kModulePackageName; }
@Test void constructor() { assertThat(kiePMMLDroolsModel.getName()).isEqualTo(MODEL_NAME); assertThat(kiePMMLDroolsModel.getExtensions()).isEqualTo(EXTENSIONS); assertThat(kiePMMLDroolsModel.getKModulePackageName()).isEqualTo(getSanitizedPackageName(MODEL_NAME)); }
public static int isDecimal(String str) { if (str == null || str.isEmpty()) { return -1; } return str.matches("-?\\d+(\\.\\d+)?") ? DEC_RADIX : -1; }
@Test public void isDecimal_Test() { Assertions.assertEquals(10, TbUtils.isDecimal("4567039")); Assertions.assertEquals(-1, TbUtils.isDecimal("C100110")); }
boolean fillIssueInFileLocation(NewIssueLocation newIssueLocation, Location location) { PhysicalLocation physicalLocation = location.getPhysicalLocation(); String fileUri = getFileUriOrThrow(location); Optional<InputFile> file = findFile(fileUri); if (file.isEmpty()) { return false; } InputFile inputFile = file.get(); newIssueLocation.on(inputFile); regionMapper.mapRegion(physicalLocation.getRegion(), inputFile).ifPresent(newIssueLocation::at); return true; }
@Test public void fillIssueInFileLocation_ifNullUri_throws() { when(location.getPhysicalLocation().getArtifactLocation().getUri()).thenReturn(null); assertThatIllegalArgumentException() .isThrownBy(() -> locationMapper.fillIssueInFileLocation(newIssueLocation, location)) .withMessage(EXPECTED_MESSAGE_URI_MISSING); }
@Override public Collection<SchemaMetaData> load(final MetaDataLoaderMaterial material) throws SQLException { Collection<TableMetaData> tableMetaDataList = new LinkedList<>(); try (Connection connection = material.getDataSource().getConnection()) { Map<String, Collection<ColumnMetaData>> columnMetaDataMap = loadColumnMetaDataMap(connection, material.getActualTableNames()); Map<String, Collection<IndexMetaData>> indexMetaDataMap = columnMetaDataMap.isEmpty() ? Collections.emptyMap() : loadIndexMetaData(connection, columnMetaDataMap.keySet()); for (Entry<String, Collection<ColumnMetaData>> entry : columnMetaDataMap.entrySet()) { Collection<IndexMetaData> indexMetaDataList = indexMetaDataMap.getOrDefault(entry.getKey(), Collections.emptyList()); tableMetaDataList.add(new TableMetaData(entry.getKey(), entry.getValue(), indexMetaDataList, Collections.emptyList())); } } return Collections.singleton(new SchemaMetaData(material.getDefaultSchemaName(), tableMetaDataList)); }
@Test void assertLoadWithoutTables() throws SQLException { DataSource dataSource = mockDataSource(); ResultSet resultSet = mockTableMetaDataResultSet(); when(dataSource.getConnection().prepareStatement( "SELECT TABLE_CATALOG, TABLE_NAME, COLUMN_NAME, DATA_TYPE, ORDINAL_POSITION, COALESCE(IS_VISIBLE, FALSE) IS_VISIBLE, IS_NULLABLE FROM INFORMATION_SCHEMA.COLUMNS" + " WHERE TABLE_CATALOG=? AND TABLE_SCHEMA=? ORDER BY ORDINAL_POSITION") .executeQuery()).thenReturn(resultSet); ResultSet indexResultSet = mockIndexMetaDataResultSet(); when(dataSource.getConnection().prepareStatement( "SELECT TABLE_CATALOG, TABLE_NAME, INDEX_NAME, INDEX_TYPE_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_CATALOG=? AND TABLE_SCHEMA=? AND UPPER(TABLE_NAME) IN ('TBL')") .executeQuery()).thenReturn(indexResultSet); ResultSet primaryKeys = mockPrimaryKeysMetaDataResultSet(); when(dataSource.getConnection().prepareStatement( "SELECT TABLE_NAME, INDEX_NAME FROM INFORMATION_SCHEMA.INDEXES WHERE TABLE_CATALOG=? AND TABLE_SCHEMA=? AND INDEX_TYPE_NAME = 'PRIMARY KEY'").executeQuery()).thenReturn(primaryKeys); ResultSet generatedInfo = mockGeneratedInfoResultSet(); when(dataSource.getConnection().prepareStatement( "SELECT C.TABLE_NAME TABLE_NAME, C.COLUMN_NAME COLUMN_NAME, COALESCE(I.IS_GENERATED, FALSE) IS_GENERATED FROM INFORMATION_SCHEMA.COLUMNS C RIGHT JOIN" + " INFORMATION_SCHEMA.INDEXES I ON C.TABLE_NAME=I.TABLE_NAME WHERE C.TABLE_CATALOG=? AND C.TABLE_SCHEMA=?") .executeQuery()).thenReturn(generatedInfo); assertTableMetaDataMap(getDialectTableMetaDataLoader().load(new MetaDataLoaderMaterial(Collections.emptyList(), dataSource, new H2DatabaseType(), "sharding_db"))); }
public static FixedWindows of(Duration size) { return new FixedWindows(size, Duration.ZERO); }
@Test public void testDefaultWindowMappingFnGlobalWindow() { PartitioningWindowFn<?, ?> windowFn = FixedWindows.of(Duration.standardMinutes(20L)); WindowMappingFn<?> mapping = windowFn.getDefaultWindowMappingFn(); thrown.expect(IllegalArgumentException.class); mapping.getSideInputWindow(GlobalWindow.INSTANCE); }
@Override public TransferAction action(final Session<?> source, final Session<?> destination, final boolean resumeRequested, final boolean reloadRequested, final TransferPrompt prompt, final ListProgressListener listener) throws BackgroundException { if(log.isDebugEnabled()) { log.debug(String.format("Find transfer action with prompt %s", prompt)); } if(resumeRequested) { // Force resume by user or retry of failed transfer return TransferAction.resume; } final TransferAction action; if(reloadRequested) { action = TransferAction.forName( PreferencesFactory.get().getProperty("queue.download.reload.action")); } else { // Use default action = TransferAction.forName( PreferencesFactory.get().getProperty("queue.download.action")); } if(action.equals(TransferAction.callback)) { for(TransferItem download : roots) { final Local local = download.local; if(local.exists()) { if(local.isDirectory()) { if(local.list().isEmpty()) { // Do not prompt for existing empty directories continue; } } if(local.isFile()) { if(local.attributes().getSize() == 0) { // Dragging a file to the local volume creates the file already continue; } } // Prompt user to choose a filter return prompt.prompt(download); } } // No files exist yet therefore it is most straightforward to use the overwrite action return TransferAction.overwrite; } return action; }
@Test public void testActionDirectoryExistsTrue() throws Exception { final Path root = new Path("t", EnumSet.of(Path.Type.directory)); final Transfer t = new DownloadTransfer(new Host(new TestProtocol()), root, new NullLocal("p", "t") { @Override public boolean exists() { return true; } @Override public AttributedList<Local> list() { return new AttributedList<>(Collections.singletonList(new NullLocal("p", "a"))); } }); final AtomicBoolean prompt = new AtomicBoolean(); final NullSession session = new NullSession(new Host(new TestProtocol())); assertEquals(TransferAction.callback, t.action(session, null, false, false, new DisabledTransferPrompt() { @Override public TransferAction prompt(final TransferItem file) { prompt.set(true); return TransferAction.callback; } }, new DisabledListProgressListener())); assertTrue(prompt.get()); }
public static Throwable unwrapCompletionException(Throwable ex) { if (ex instanceof CompletionException) { return unwrapCompletionException(ex.getCause()); } else if (ex instanceof ExecutionException) { return unwrapCompletionException(ex.getCause()); } else { return ex; } }
@Test public void testGetOriginalException() { CompletableFuture<Void> future = CompletableFuture.completedFuture(null); CompletableFuture<Void> exceptionFuture = future.thenAccept(__ -> { throw new IllegalStateException("Illegal state"); }); assertTrue(exceptionFuture.isCompletedExceptionally()); try { exceptionFuture.get(); } catch (InterruptedException | ExecutionException e) { Throwable originalException = FutureUtil.unwrapCompletionException(e); assertTrue(originalException instanceof IllegalStateException); } CompletableFuture<Object> exceptionFuture2 = new CompletableFuture<>(); exceptionFuture2.completeExceptionally(new IllegalStateException("Completed exception")); final List<Throwable> future2Exception = Lists.newArrayList(); exceptionFuture2.exceptionally(ex -> { future2Exception.add(FutureUtil.unwrapCompletionException(ex)); return null; }); Awaitility.await() .untilAsserted(() -> { assertEquals(future2Exception.size(), 1); assertTrue(future2Exception.get(0) instanceof IllegalStateException); }); final List<Throwable> future3Exception = Lists.newArrayList(); CompletableFuture.completedFuture(null) .thenAccept(__ -> { throw new IllegalStateException("Throw illegal exception"); }) .exceptionally(ex -> { future3Exception.add(FutureUtil.unwrapCompletionException(ex)); return null; }); Awaitility.await() .untilAsserted(() -> { assertEquals(future3Exception.size(), 1); assertTrue(future3Exception.get(0) instanceof IllegalStateException); }); }
public static boolean isServiceKeyMatch(URL pattern, URL value) { return pattern.getParameter(INTERFACE_KEY).equals(value.getParameter(INTERFACE_KEY)) && isItemMatch(pattern.getGroup(), value.getGroup()) && isItemMatch(pattern.getVersion(), value.getVersion()); }
@Test void testIsServiceKeyMatch() throws Exception { URL url = URL.valueOf("test://127.0.0.1"); URL pattern = url.addParameter(GROUP_KEY, "test") .addParameter(INTERFACE_KEY, "test") .addParameter(VERSION_KEY, "test"); URL value = pattern; assertTrue(UrlUtils.isServiceKeyMatch(pattern, value)); pattern = pattern.addParameter(GROUP_KEY, "*"); assertTrue(UrlUtils.isServiceKeyMatch(pattern, value)); pattern = pattern.addParameter(VERSION_KEY, "*"); assertTrue(UrlUtils.isServiceKeyMatch(pattern, value)); }
public static List<String> detectClassPathResourcesToStage( ClassLoader classLoader, PipelineOptions options) { PipelineResourcesOptions artifactsRelatedOptions = options.as(PipelineResourcesOptions.class); List<String> detectedResources = artifactsRelatedOptions.getPipelineResourcesDetector().detect(classLoader); return detectedResources.stream().filter(isStageable()).collect(Collectors.toList()); }
@Test public void testDetectsResourcesToStage() throws IOException { File file = tmpFolder.newFile("file"); URLClassLoader classLoader = new URLClassLoader(new URL[] {file.toURI().toURL()}); PipelineResourcesOptions options = PipelineOptionsFactory.create().as(PipelineResourcesOptions.class); List<String> detectedResources = PipelineResources.detectClassPathResourcesToStage(classLoader, options); assertThat(detectedResources, not(empty())); }
@Override public InputStream getInputStream() throws AccessDeniedException { final NSURL resolved; try { resolved = this.lock(false); if(null == resolved) { return super.getInputStream(); } } catch(LocalAccessDeniedException e) { log.warn(String.format("Failure obtaining lock for %s. %s", this, e)); return super.getInputStream(); } return new LockReleaseProxyInputStream(super.getInputStream(resolved.path()), resolved); }
@Test(expected = LocalAccessDeniedException.class) public void testReadNoFile() throws Exception { final String name = UUID.randomUUID().toString(); FinderLocal l = new FinderLocal(System.getProperty("java.io.tmpdir"), name); l.getInputStream(); }
@VisibleForTesting void validateUsernameUnique(Long id, String username) { if (StrUtil.isBlank(username)) { return; } AdminUserDO user = userMapper.selectByUsername(username); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_USERNAME_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_USERNAME_EXISTS); } }
@Test public void testValidateUsernameUnique_usernameExistsForCreate() { // 准备参数 String username = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setUsername(username))); // 调用,校验异常 assertServiceException(() -> userService.validateUsernameUnique(null, username), USER_USERNAME_EXISTS); }
public static Properties loadProperties(Set<ClassLoader> classLoaders, String fileName) { return loadProperties(classLoaders, fileName, false, false); }
@Test void testLoadPropertiesNoFile() throws Exception { Properties p = ConfigUtils.loadProperties(Collections.emptySet(), "notExisted", true); Properties expected = new Properties(); assertEquals(expected, p); p = ConfigUtils.loadProperties(Collections.emptySet(), "notExisted", false); assertEquals(expected, p); }
@VisibleForTesting public ProcessContinuation run( RestrictionTracker<OffsetRange, Long> tracker, OutputReceiver<PartitionRecord> receiver, ManualWatermarkEstimator<Instant> watermarkEstimator, InitialPipelineState initialPipelineState) throws Exception { LOG.debug("DNP: Watermark: " + watermarkEstimator.getState()); LOG.debug("DNP: CurrentTracker: " + tracker.currentRestriction().getFrom()); if (tracker.currentRestriction().getFrom() == 0L) { if (!tracker.tryClaim(0L)) { LOG.error( "Could not claim initial DetectNewPartition restriction. No partitions are outputted."); return ProcessContinuation.stop(); } watermarkEstimator.setWatermark(initialPipelineState.getStartTime()); if (initialPipelineState.isResume()) { resumeFromPreviousPipelineAction.run(receiver); } else { generateInitialPartitionsAction.run(receiver, initialPipelineState.getStartTime()); } return ProcessContinuation.resume(); } // Create a new partition reconciler every run to reset the state each time. partitionReconciler = new PartitionReconciler(metadataTableDao, metrics); orphanedMetadataCleaner = new OrphanedMetadataCleaner(); // Calculating the new value of watermark is a resource intensive process. We have to do a full // scan of the metadata table and then ensure we're not missing partitions and then calculate // the low watermark. This is usually a fairly fast process even with thousands of partitions. // However, sometimes this may take so long that the runner checkpoints before the watermark is // calculated. Because the checkpoint takes place before tryClaim, this forces the DoFn to // restart, wasting the resources spent calculating the watermark. On restart, we will try to // calculate the watermark again. The problem causing the slow watermark calculation can persist // leading to a crash loop. In order to ensure we persist the calculated watermark, we calculate // the watermark after successful tryClaim. Then we write to the metadata table the new // watermark. On the start of each run we read the watermark and update the DoFn's watermark. DetectNewPartitionsState detectNewPartitionsState = metadataTableDao.readDetectNewPartitionsState(); if (detectNewPartitionsState != null) { watermarkEstimator.setWatermark(detectNewPartitionsState.getWatermark()); } // Terminate if endTime <= watermark that means all partitions have read up to or beyond // watermark. We no longer need to manage splits and merges, we can terminate. if (endTime != null && !watermarkEstimator.currentWatermark().isBefore(endTime)) { tracker.tryClaim(tracker.currentRestriction().getTo()); return ProcessContinuation.stop(); } if (!tracker.tryClaim(tracker.currentRestriction().getFrom())) { LOG.warn("DNP: Checkpointing, stopping this run: " + tracker.currentRestriction()); return ProcessContinuation.stop(); } // Read StreamPartitions to calculate watermark. List<StreamPartitionWithWatermark> streamPartitionsWithWatermark = null; if (shouldUpdateWatermark(tracker.currentRestriction().getFrom(), detectNewPartitionsState)) { streamPartitionsWithWatermark = metadataTableDao.readStreamPartitionsWithWatermark(); } // Process NewPartitions and track the ones successfully outputted. List<NewPartition> newPartitions = metadataTableDao.readNewPartitions(); List<ByteStringRange> outputtedNewPartitions = new ArrayList<>(); for (NewPartition newPartition : newPartitions) { if (processNewPartitionsAction.processNewPartition(newPartition, receiver)) { outputtedNewPartitions.add(newPartition.getPartition()); } else if (streamPartitionsWithWatermark != null) { // streamPartitionsWithWatermark is not null on runs that we update watermark. We only run // reconciliation when we update watermark. Only add incompleteNewPartitions if // reconciliation is being run partitionReconciler.addIncompleteNewPartitions(newPartition); orphanedMetadataCleaner.addIncompleteNewPartitions(newPartition); } } // Process the watermark using read StreamPartitions and NewPartitions. if (streamPartitionsWithWatermark != null) { Optional<Instant> maybeWatermark = getNewWatermark(streamPartitionsWithWatermark, newPartitions); maybeWatermark.ifPresent(metadataTableDao::updateDetectNewPartitionWatermark); // Only start reconciling after the pipeline has been running for a while. if (tracker.currentRestriction().getFrom() > 50) { // Using NewPartitions and StreamPartitions, evaluate partitions that are possibly not being // streamed. This isn't perfect because there may be partitions moving between // StreamPartitions and NewPartitions while scanning the metadata table. Also, this does not // include NewPartitions marked as deleted from a previous DNP run not yet processed by // RCSP. List<ByteStringRange> existingPartitions = streamPartitionsWithWatermark.stream() .map(StreamPartitionWithWatermark::getPartition) .collect(Collectors.toList()); existingPartitions.addAll(outputtedNewPartitions); List<ByteStringRange> missingStreamPartitions = getMissingPartitionsFromEntireKeySpace(existingPartitions); orphanedMetadataCleaner.addMissingPartitions(missingStreamPartitions); partitionReconciler.addMissingPartitions(missingStreamPartitions); processReconcilerPartitions( receiver, watermarkEstimator, initialPipelineState.getStartTime()); cleanUpOrphanedMetadata(); } } return ProcessContinuation.resume().withResumeDelay(Duration.millis(100)); }
@Test public void testMissingPartitionReconciled() throws Exception { // We only start reconciling after 50. // We advance watermark on every 2 restriction tracker advancement OffsetRange offsetRange = new OffsetRange(52, Long.MAX_VALUE); when(tracker.currentRestriction()).thenReturn(offsetRange); when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true); // Write 2 partitions to the table, missing [a, b) ByteStringRange partitionEmptyA = ByteStringRange.create("", "a"); Instant watermarkEmptyA = endTime.plus(Duration.millis(100)); PartitionRecord partitionRecordEmptyA = new PartitionRecord( partitionEmptyA, watermarkEmptyA, UniqueIdGenerator.getNextId(), watermarkEmptyA, Collections.emptyList(), null); metadataTableDao.lockAndRecordPartition(partitionRecordEmptyA); ByteStringRange partitionBEmpty = ByteStringRange.create("b", ""); Instant watermarkBEmpty = endTime.plus(Duration.millis(1)); PartitionRecord partitionRecordBEmpty = new PartitionRecord( partitionBEmpty, watermarkBEmpty, UniqueIdGenerator.getNextId(), watermarkBEmpty, Collections.emptyList(), null); metadataTableDao.lockAndRecordPartition(partitionRecordBEmpty); HashMap<ByteStringRange, Instant> missingPartitionDurations = new HashMap<>(); ByteStringRange partitionAB = ByteStringRange.create("a", "b"); // Partition missing for 10 minutes less 1 second. missingPartitionDurations.put( partitionAB, Instant.now().minus(Duration.standardSeconds(20 * 60 - 1))); metadataTableDao.writeDetectNewPartitionMissingPartitions(missingPartitionDurations); // Since there's no NewPartition corresponding to the missing partition, we can't reconcile with // continuation tokens. In order to reconcile without continuation tokens, the partition needs // to have been missing for more than 10 minutes. assertEquals( DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); verify(receiver, never()).outputWithTimestamp(any(), any()); assertEquals(1, metadataTableDao.readDetectNewPartitionMissingPartitions().size()); // Sleep for more than 1 second, enough that the missing partition needs to be reconciled. Thread.sleep(1001); // We advance the restriction tracker by 1. Because it is not a multiple of 2, we don't // evaluate missing partitions, which means we don't perform reconciliation. offsetRange = new OffsetRange(53, Long.MAX_VALUE); when(tracker.currentRestriction()).thenReturn(offsetRange); when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true); assertEquals( DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); verify(receiver, never()).outputWithTimestamp(any(), any()); assertEquals(1, metadataTableDao.readDetectNewPartitionMissingPartitions().size()); // Multiple of 2, reconciliation should happen. offsetRange = new OffsetRange(54, Long.MAX_VALUE); when(tracker.currentRestriction()).thenReturn(offsetRange); when(tracker.tryClaim(offsetRange.getFrom())).thenReturn(true); assertEquals( DoFn.ProcessContinuation.resume().withResumeDelay(Duration.millis(100)), action.run( tracker, receiver, watermarkEstimator, new InitialPipelineState(startTime, false))); assertEquals(0, metadataTableDao.readDetectNewPartitionMissingPartitions().size()); verify(receiver, times(1)) .outputWithTimestamp(partitionRecordArgumentCaptor.capture(), eq(Instant.EPOCH)); assertEquals(partitionAB, partitionRecordArgumentCaptor.getValue().getPartition()); assertEquals( watermarkEstimator.currentWatermark(), partitionRecordArgumentCaptor.getValue().getParentLowWatermark()); assertEquals(endTime, partitionRecordArgumentCaptor.getValue().getEndTime()); assertNotNull(partitionRecordArgumentCaptor.getValue().getStartTime()); // The startTime should be the startTime of the pipeline because it's less than 1 hour before // the low watermark. assertEquals(startTime, partitionRecordArgumentCaptor.getValue().getStartTime()); }
public static String getToolbarTitle(Activity activity) { try { String canonicalName = SnapCache.getInstance().getCanonicalName(activity.getClass()); if ("com.tencent.connect.common.AssistActivity".equals(canonicalName)) { if (!TextUtils.isEmpty(activity.getTitle())) { return activity.getTitle().toString(); } return null; } ActionBar actionBar = null; if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) { actionBar = activity.getActionBar(); } if (actionBar != null) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.HONEYCOMB) { if (!TextUtils.isEmpty(actionBar.getTitle())) { return actionBar.getTitle().toString(); } } } else { try { Class<?> appCompatActivityClass = compatActivity(); if (appCompatActivityClass != null && appCompatActivityClass.isInstance(activity)) { Method method = activity.getClass().getMethod("getSupportActionBar"); Object supportActionBar = method.invoke(activity); if (supportActionBar != null) { method = supportActionBar.getClass().getMethod("getTitle"); CharSequence charSequence = (CharSequence) method.invoke(supportActionBar); if (charSequence != null) { return charSequence.toString(); } } } } catch (Exception e) { //ignored } } } catch (Exception e) { SALog.printStackTrace(e); } return null; }
@Test public void getToolbarTitle() { TestActivity activity = Robolectric.setupActivity(TestActivity.class); String title = SensorsDataUtils.getToolbarTitle(activity); System.out.println("ActivityTitle = " + title); Assert.assertEquals("com.sensorsdata.analytics.android.sdk.util.SensorsDataUtilsTest$TestActivity", title); }
public GithubAppConfiguration validate(AlmSettingDto almSettingDto) { return validate(almSettingDto.getAppId(), almSettingDto.getClientId(), almSettingDto.getClientSecret(), almSettingDto.getPrivateKey(), almSettingDto.getUrl()); }
@Test public void github_validation_checks_missing_appId() { AlmSettingDto almSettingDto = new AlmSettingDto(); almSettingDto.setAppId(null); assertThatThrownBy(() -> underTest.validate(almSettingDto)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Missing appId"); }
@Override public List<Path> getDependencies() { return project.getArtifacts().stream() .map(artifact -> artifact.getFile().toPath()) .collect(Collectors.toList()); }
@Test public void testGetDependencies() throws URISyntaxException { assertThat(mavenProjectProperties.getDependencies()) .containsExactly( getResource("maven/application/dependencies/library.jarC.jar"), getResource("maven/application/dependencies/libraryB.jar"), getResource("maven/application/dependencies/libraryA.jar"), getResource("maven/application/dependencies/more/dependency-1.0.0.jar"), getResource("maven/application/dependencies/another/one/dependency-1.0.0.jar"), testRepository.artifactPathOnDisk("com.test", "dependency", "1.0.0"), testRepository.artifactPathOnDisk("com.test", "dependencyX", "1.0.0-SNAPSHOT")); }
public static ThreadFactory createThreadFactory(final String pattern, final boolean daemon) { return new ThreadFactory() { private final AtomicLong threadEpoch = new AtomicLong(0); @Override public Thread newThread(Runnable r) { String threadName; if (pattern.contains("%d")) { threadName = String.format(pattern, threadEpoch.addAndGet(1)); } else { threadName = pattern; } Thread thread = new Thread(r, threadName); thread.setDaemon(daemon); return thread; } }; }
@Test public void testThreadNameWithNumberDemon() { ThreadFactory localThreadFactory = ThreadUtils.createThreadFactory(THREAD_NAME_WITH_NUMBER, true); Thread daemonThread1 = localThreadFactory.newThread(EMPTY_RUNNABLE); Thread daemonThread2 = localThreadFactory.newThread(EMPTY_RUNNABLE); try { assertEquals(THREAD_NAME + "1", daemonThread1.getName()); assertTrue(daemonThread1.isDaemon()); } finally { try { daemonThread1.join(); } catch (InterruptedException e) { // can be ignored } } try { assertEquals(THREAD_NAME + "2", daemonThread2.getName()); assertTrue(daemonThread2.isDaemon()); } finally { try { daemonThread2.join(); } catch (InterruptedException e) { // can be ignored } } }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@Test void testFunction() { RichMapFunction<String, Boolean> mapInterface = new RichMapFunction<String, Boolean>() { private static final long serialVersionUID = 1L; @Override public void setRuntimeContext(RuntimeContext t) {} @Override public void open(OpenContext openContext) throws Exception {} @Override public RuntimeContext getRuntimeContext() { return null; } @Override public void close() throws Exception {} @Override public Boolean map(String record) throws Exception { return null; } }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes(mapInterface, BasicTypeInfo.STRING_TYPE_INFO); assertThat(ti).isEqualTo(BasicTypeInfo.BOOLEAN_TYPE_INFO); }
public static NetworkEndpoint forIp(String ipAddress) { checkArgument(InetAddresses.isInetAddress(ipAddress), "'%s' is not an IP address.", ipAddress); return NetworkEndpoint.newBuilder() .setType(NetworkEndpoint.Type.IP) .setIpAddress( IpAddress.newBuilder() .setAddressFamily(ipAddressFamily(ipAddress)) .setAddress(ipAddress)) .build(); }
@Test public void forIp_withInvalidIp_throwsIllegalArgumentException() { assertThrows(IllegalArgumentException.class, () -> NetworkEndpointUtils.forIp("abc")); }
@Override public KsMaterializedQueryResult<WindowedRow> get( final GenericKey key, final int partition, final Range<Instant> windowStart, final Range<Instant> windowEnd, final Optional<Position> position ) { try { final WindowRangeQuery<GenericKey, GenericRow> query = WindowRangeQuery.withKey(key); StateQueryRequest<KeyValueIterator<Windowed<GenericKey>, GenericRow>> request = inStore(stateStore.getStateStoreName()).withQuery(query); if (position.isPresent()) { request = request.withPositionBound(PositionBound.at(position.get())); } final StateQueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> result = stateStore.getKafkaStreams().query(request); final QueryResult<KeyValueIterator<Windowed<GenericKey>, GenericRow>> queryResult = result.getPartitionResults().get(partition); if (queryResult.isFailure()) { throw failedQueryException(queryResult); } try (KeyValueIterator<Windowed<GenericKey>, GenericRow> it = queryResult.getResult()) { final Builder<WindowedRow> builder = ImmutableList.builder(); while (it.hasNext()) { final KeyValue<Windowed<GenericKey>, GenericRow> next = it.next(); final Window wnd = next.key.window(); if (!windowStart.contains(wnd.startTime())) { continue; } if (!windowEnd.contains(wnd.endTime())) { continue; } final long rowTime = wnd.end(); final WindowedRow row = WindowedRow.of( stateStore.schema(), next.key, next.value, rowTime ); builder.add(row); } return KsMaterializedQueryResult.rowIteratorWithPosition( builder.build().iterator(), queryResult.getPosition()); } } catch (final NotUpToBoundException | MaterializationException e) { throw e; } catch (final Exception e) { throw new MaterializationException("Failed to get value from materialized table", e); } }
@Test public void shouldReturnValueIfSessionStartsBetweenBounds() { // Given: final Instant wend = UPPER_INSTANT.plusMillis(5); givenSingleSession(LOWER_INSTANT.plusMillis(1), wend); // When: final KsMaterializedQueryResult<WindowedRow> result = table.get(A_KEY, PARTITION, WINDOW_START_BOUNDS, Range.all()); // Then: final Iterator<WindowedRow> rowIterator = result.getRowIterator(); assertThat(rowIterator.hasNext(), is(true)); assertThat(rowIterator.next(), is( WindowedRow.of( SCHEMA, sessionKey(LOWER_INSTANT.plusMillis(1), wend), A_VALUE, wend.toEpochMilli() ) )); assertThat(result.getPosition(), not(Optional.empty())); assertThat(result.getPosition().get(), is(POSITION)); }
@Override public void deleteDocument(Iterable<String> haloDocIds) { var terms = new LinkedList<BytesRef>(); haloDocIds.forEach(haloDocId -> terms.add(new BytesRef(haloDocId))); var deleteQuery = new TermInSetQuery("id", terms); try { this.indexWriter.deleteDocuments(deleteQuery); this.searcherManager.maybeRefreshBlocking(); this.indexWriter.commit(); } catch (IOException e) { throw new RuntimeException(e); } }
@Test void shouldDeleteDocument() throws IOException { this.searchEngine.deleteDocument(List.of("fake-id")); verify(this.indexWriter).deleteDocuments(any(Query.class)); verify(this.searcherManager).maybeRefreshBlocking(); verify(this.indexWriter).commit(); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testGuardiansOfTheRift() { ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Amount of rifts you have closed: <col=ff0000>167</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("killcount", "guardians of the rift", 167); }
@SuppressWarnings("unchecked") @Override public int run(InputStream stdin, PrintStream out, PrintStream err, List<String> args) throws Exception { OptionParser p = new OptionParser(); OptionSpec<Integer> count = p.accepts("count", "Record Count").withRequiredArg().ofType(Integer.class); OptionSpec<String> codec = Util.compressionCodecOption(p); OptionSpec<Integer> level = Util.compressionLevelOption(p); OptionSpec<String> file = p.accepts("schema-file", "Schema File").withOptionalArg().ofType(String.class); OptionSpec<String> inschema = p.accepts("schema", "Schema").withOptionalArg().ofType(String.class); OptionSpec<Long> seedOpt = p.accepts("seed", "Seed for random").withOptionalArg().ofType(Long.class); OptionSet opts = p.parse(args.toArray(new String[0])); if (opts.nonOptionArguments().size() != 1) { err.println("Usage: outFile (filename or '-' for stdout)"); p.printHelpOn(err); return 1; } args = (List<String>) opts.nonOptionArguments(); String schemastr = inschema.value(opts); String schemafile = file.value(opts); Long seed = seedOpt.value(opts); if (schemastr == null && schemafile == null) { err.println("Need input schema (--schema-file) or (--schema)"); p.printHelpOn(err); return 1; } Schema schema = (schemafile != null) ? Util.parseSchemaFromFS(schemafile) : new Schema.Parser().parse(schemastr); DataFileWriter<Object> writer = new DataFileWriter<>(new GenericDatumWriter<>()); writer.setCodec(Util.codecFactory(opts, codec, level)); writer.create(schema, Util.fileOrStdout(args.get(0), out)); Integer countValue = count.value(opts); if (countValue == null) { err.println("Need count (--count)"); p.printHelpOn(err); writer.close(); return 1; } RandomData rd = seed == null ? new RandomData(schema, countValue) : new RandomData(schema, countValue, seed); for (Object datum : rd) writer.append(datum); writer.close(); return 0; }
@Test void stdOut() throws Exception { TestUtil.resetRandomSeed(); run(Arrays.asList("-", "--count", COUNT, "--schema-file", SCHEMA_FILE.toString(), "--seed", Long.toString(SEED))); byte[] file = out.toByteArray(); DataFileStream<Object> reader = new DataFileStream<>(new ByteArrayInputStream(file), new GenericDatumReader<>()); Iterator<Object> found = reader.iterator(); for (Object expected : new RandomData(schemaParser.parse(SCHEMA_FILE), Integer.parseInt(COUNT), SEED)) assertEquals(expected, found.next()); reader.close(); }
public static NormalKey createFromSpec(String spec) { if (spec == null || !spec.contains(":")) { throw new IllegalArgumentException("Invalid spec format"); } String[] parts = spec.split(":", 2); if (parts.length != 2) { throw new IllegalArgumentException("Invalid spec format"); } String algorithmName = parts[0]; String base64Key = parts[1]; EncryptionAlgorithmPB algorithm; if (algorithmName.equalsIgnoreCase("AES_128")) { algorithm = EncryptionAlgorithmPB.AES_128; } else { throw new IllegalArgumentException("Unsupported algorithm: " + algorithmName); } byte[] plainKey; try { plainKey = Base64.getDecoder().decode(base64Key); } catch (IllegalArgumentException e) { throw new IllegalArgumentException("Invalid Base64 key", e); } if (plainKey.length != 16) { throw new IllegalArgumentException("Invalid key length " + plainKey.length * 8); } return new NormalKey(algorithm, plainKey, null); }
@Test public void testCreateFromSpec_InvalidKeyLength() { assertThrows(IllegalArgumentException.class, () -> { NormalKey.createFromSpec("AES_128:" + Base64.getEncoder().encodeToString(new byte[10])); }); }
public static <T> void setProperty(Object bean, String name, Class<T> clazz, T value) throws Exception { Method method = ReflectUtils.getPropertySetterMethod(bean.getClass(), name, clazz); if (method.isAccessible()) { method.invoke(bean, value); } else { try { method.setAccessible(true); method.invoke(bean, value); } finally { method.setAccessible(false); } } }
@Test public void testSetProperty() throws Exception { TestBean config = new TestBean(); BeanUtils.setProperty(config, "alias", String.class, "1111aaaa"); Assert.assertEquals(config.getAlias(), "1111aaaa"); BeanUtils.setProperty(config, "alias", String.class, null); Assert.assertNull(config.getAlias()); BeanUtils.setProperty(config, "heartbeat", int.class, 3000); Assert.assertEquals(config.getHeartbeat(), 3000); BeanUtils.setProperty(config, "heartbeat", int.class, new Integer(7000)); Assert.assertEquals(config.getHeartbeat(), 7000); boolean error = false; try { BeanUtils.setProperty(config, "xxx", String.class, "1111aaaa"); } catch (Exception e) { error = true; } Assert.assertTrue(error); }
public Set<GsonUser> getDirectGroupMembers(String gitlabUrl, String token, String groupId) { return getMembers(gitlabUrl, token, format(GITLAB_GROUPS_MEMBERS_ENDPOINT, groupId)); }
@Test public void getDirectGroupMembers_whenCallIsInError_rethrows() { String token = "token-toto"; GitlabToken gitlabToken = new GitlabToken(token); when(gitlabPaginatedHttpClient.get(eq(gitlabUrl), eq(gitlabToken), eq("/groups/42/members"), any())).thenThrow(new IllegalStateException("exception")); assertThatIllegalStateException() .isThrownBy(() -> underTest.getDirectGroupMembers(gitlabUrl, token, "42")) .withMessage("exception"); }
public PropertyPanel addProp(String key, String label, String value) { properties.add(new Prop(key, label, value)); return this; }
@Test public void objectValue() { basic(); pp.addProp(KEY_A, KEY_A, new FooClass("a")) .addProp(KEY_B, KEY_B, new FooClass("bxyyzy"), "[xz]"); validateProp(KEY_A, ">a<"); validateProp(KEY_B, ">byyy<"); }
public static String[] parseKey(String groupKey) { StringBuilder sb = new StringBuilder(); String dataId = null; String group = null; String tenant = null; for (int i = 0; i < groupKey.length(); ++i) { char c = groupKey.charAt(i); if ('+' == c) { if (null == dataId) { dataId = sb.toString(); sb.setLength(0); } else if (null == group) { group = sb.toString(); sb.setLength(0); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else if ('%' == c) { char next = groupKey.charAt(++i); char nextnext = groupKey.charAt(++i); if ('2' == next && 'B' == nextnext) { sb.append('+'); } else if ('2' == next && '5' == nextnext) { sb.append('%'); } else { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } } else { sb.append(c); } } if (StringUtils.isBlank(group)) { group = sb.toString(); } else { tenant = sb.toString(); } if (group.length() == 0) { throw new IllegalArgumentException("invalid groupkey:" + groupKey); } return new String[] {dataId, group, tenant}; }
@Test void testParseKeyForPlusIllegalArgumentException() { assertThrows(IllegalArgumentException.class, () -> { GroupKey2.parseKey("+"); // Method is not expected to return due to exception thrown }); // Method is not expected to return due to exception thrown }
public static String toSanitizedString(Expression expr) { return ExpressionVisitors.visit(expr, new StringSanitizer()); }
@Test public void zeroAndNegativeNumberHandling() { assertThat( ExpressionUtil.toSanitizedString( Expressions.in( "test", 0, -1, -100, Integer.MIN_VALUE, Integer.MAX_VALUE, -1234567891234.4d, Float.MAX_VALUE, Double.MAX_VALUE))) .isEqualTo( "test IN ((1-digit-int), (1-digit-int), (3-digit-int), (10-digit-int), (10-digit-int), (13-digit-float), (39-digit-float), (309-digit-float))"); }