focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Override public void onText(Keyboard.Key key, CharSequence text) { mParentListener.listener().onText(key, text); if (mInOneShot) mKeyboardDismissAction.run(); }
@Test public void testOnText() { final AnyKeyboard.AnyKey key = Mockito.mock(AnyKeyboard.AnyKey.class); final String text = "text"; mUnderTest.onText(key, text); final InOrder inOrder = Mockito.inOrder(mMockParentListener, mMockKeyboardDismissAction); inOrder.verify(mMockParentListener).onText(Mockito.same(key), Mockito.same(text)); inOrder.verifyNoMoreInteractions(); }
@Override public void updateUserPassword(Long id, UserProfileUpdatePasswordReqVO reqVO) { // 校验旧密码密码 validateOldPassword(id, reqVO.getOldPassword()); // 执行更新 AdminUserDO updateObj = new AdminUserDO().setId(id); updateObj.setPassword(encodePassword(reqVO.getNewPassword())); // 加密密码 userMapper.updateById(updateObj); }
@Test public void testUpdateUserPassword_success() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(o -> o.setPassword("encode:tudou")); userMapper.insert(dbUser); // 准备参数 Long userId = dbUser.getId(); UserProfileUpdatePasswordReqVO reqVO = randomPojo(UserProfileUpdatePasswordReqVO.class, o -> { o.setOldPassword("tudou"); o.setNewPassword("yuanma"); }); // mock 方法 when(passwordEncoder.encode(anyString())).then( (Answer<String>) invocationOnMock -> "encode:" + invocationOnMock.getArgument(0)); when(passwordEncoder.matches(eq(reqVO.getOldPassword()), eq(dbUser.getPassword()))).thenReturn(true); // 调用 userService.updateUserPassword(userId, reqVO); // 断言 AdminUserDO user = userMapper.selectById(userId); assertEquals("encode:yuanma", user.getPassword()); }
@Override public DistroData getDatumSnapshot(String targetServer) { Member member = memberManager.find(targetServer); if (checkTargetServerStatusUnhealthy(member)) { throw new DistroException( String.format("[DISTRO] Cancel get snapshot caused by target server %s unhealthy", targetServer)); } DistroDataRequest request = new DistroDataRequest(); request.setDataOperation(DataOperation.SNAPSHOT); try { Response response = clusterRpcClientProxy .sendRequest(member, request, DistroConfig.getInstance().getLoadDataTimeoutMillis()); if (checkResponse(response)) { return ((DistroDataResponse) response).getDistroData(); } else { throw new DistroException( String.format("[DISTRO-FAILED] Get snapshot request to %s failed, code: %d, message: %s", targetServer, response.getErrorCode(), response.getMessage())); } } catch (NacosException e) { throw new DistroException("[DISTRO-FAILED] Get distro snapshot failed! ", e); } }
@Test void testGetDatumSnapshotForMemberNonExist() { assertThrows(DistroException.class, () -> { transportAgent.getDatumSnapshot(member.getAddress()); }); }
@Override public Path move(final Path file, final Path renamed, final TransferStatus status, final Delete.Callback callback, final ConnectionCallback connectionCallback) throws BackgroundException { try { final String target = new DefaultUrlProvider(session.getHost()).toUrl(renamed).find(DescriptiveUrl.Type.provider).getUrl(); if(session.getFeature(Lock.class) != null && status.getLockId() != null) { // Indicate that the client has knowledge of that state token session.getClient().move(new DAVPathEncoder().encode(file), file.isDirectory() ? String.format("%s/", target) : target, status.isExists(), Collections.singletonMap(HttpHeaders.IF, String.format("(<%s>)", status.getLockId()))); } else { session.getClient().move(new DAVPathEncoder().encode(file), file.isDirectory() ? String.format("%s/", target) : target, status.isExists()); } // Copy original file attributes return renamed.withAttributes(file.attributes()); } catch(SardineException e) { throw new DAVExceptionMappingService().map("Cannot rename {0}", e, file); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, file); } }
@Test public void testMoveWithLock() throws Exception { final Path test = new DAVTouchFeature(session).touch(new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); String lock = null; try { lock = new DAVLockFeature(session).lock(test); } catch(InteroperabilityException e) { // Not supported } assertEquals(TransferStatus.UNKNOWN_LENGTH, test.attributes().getSize()); final Path target = new DAVMoveFeature(session).move(test, new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withLockId(lock), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertFalse(new DAVFindFeature(session).find(test)); assertTrue(new DAVFindFeature(session).find(target)); assertEquals(test.attributes(), target.attributes()); new DAVDeleteFeature(session).delete(Collections.<Path>singletonList(target), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public Consumer createConsumer(Processor aProcessor) throws Exception { // validate that all of the endpoint is configured properly if (getMonitorType() != null) { if (!isPlatformServer()) { throw new IllegalArgumentException(ERR_PLATFORM_SERVER); } if (ObjectHelper.isEmpty(getObservedAttribute())) { throw new IllegalArgumentException(ERR_OBSERVED_ATTRIBUTE); } if (getMonitorType().equals("string")) { if (ObjectHelper.isEmpty(getStringToCompare())) { throw new IllegalArgumentException(ERR_STRING_TO_COMPARE); } if (!isNotifyDiffer() && !isNotifyMatch()) { throw new IllegalArgumentException(ERR_STRING_NOTIFY); } } else if (getMonitorType().equals("gauge")) { if (!isNotifyHigh() && !isNotifyLow()) { throw new IllegalArgumentException(ERR_GAUGE_NOTIFY); } if (getThresholdHigh() == null) { throw new IllegalArgumentException(ERR_THRESHOLD_HIGH); } if (getThresholdLow() == null) { throw new IllegalArgumentException(ERR_THRESHOLD_LOW); } } JMXMonitorConsumer answer = new JMXMonitorConsumer(this, aProcessor); configureConsumer(answer); return answer; } else { // shouldn't need any other validation. JMXConsumer answer = new JMXConsumer(this, aProcessor); configureConsumer(answer); return answer; } }
@Test public void noObservedAttribute() throws Exception { JMXEndpoint ep = context.getEndpoint("jmx:platform?objectDomain=FooDomain&objectName=theObjectName&monitorType=string", JMXEndpoint.class); try { ep.createConsumer(null); fail("expected exception"); } catch (IllegalArgumentException e) { assertEquals(JMXEndpoint.ERR_OBSERVED_ATTRIBUTE, e.getMessage()); } }
protected boolean nodesReturnsSameType(Object leftObject, Object rightObject) { if (Objects.equals(leftObject, rightObject)) { return true; } else if (leftObject == null || rightObject == null) { return true; } else { Class<?> left = leftObject.getClass(); Class<?> right = rightObject.getClass(); return left.equals(right) || left.isAssignableFrom(right) || right.isAssignableFrom(left); } }
@Test void nodesReturnsSameType_True() { assertThat(rangeFunction.nodesReturnsSameType(null, null)) .withFailMessage("null - null") .isTrue(); assertThat(rangeFunction.nodesReturnsSameType("Hello", "world")) .withFailMessage("\"Hello\" - \"world\"") .isTrue(); assertThat(rangeFunction.nodesReturnsSameType(null, "world")) .withFailMessage("null - \"world\"") .isTrue(); assertThat(rangeFunction.nodesReturnsSameType(1, null)) .withFailMessage("1 - null") .isTrue(); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { final BlobRequestOptions options = new BlobRequestOptions(); if(containerService.isContainer(folder)) { // Container name must be lower case. final CloudBlobContainer container = session.getClient().getContainerReference(containerService.getContainer(folder).getName()); container.create(options, context); return folder; } else { final EnumSet<Path.Type> type = EnumSet.copyOf(folder.getType()); type.add(Path.Type.placeholder); return new AzureTouchFeature(session, context).withWriter(writer).touch(folder.withType(type), status.withChecksum(writer.checksum(folder, status).compute(new NullInputStream(0L), status))); } } catch(URISyntaxException e) { throw new NotfoundException(e.getMessage(), e); } catch(StorageException e) { throw new AzureExceptionMappingService().map("Cannot create folder {0}", e, folder); } }
@Test public void testCreateContainer() throws Exception { final AzureDirectoryFeature feature = new AzureDirectoryFeature(session, null); final Path container = feature.mkdir(new Path(new AlphanumericRandomStringService().random().toLowerCase(), EnumSet.of(Path.Type.directory)), new TransferStatus()); assertTrue(new AzureFindFeature(session, null).find(container)); assertThrows(ConflictException.class, () -> feature.mkdir(container, new TransferStatus())); new AzureTouchFeature(session, null).touch(new Path(container, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus()); new AzureDeleteFeature(session, null).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(new AzureFindFeature(session, null).find(container)); }
@Override public Path mkdir(final Path folder, final TransferStatus status) throws BackgroundException { try { Files.createDirectory(session.toPath(folder)); } catch(IOException e) { throw new LocalExceptionMappingService().map("Cannot create folder {0}", e, folder); } return folder; }
@Test public void testCreateFolder() throws Exception { final LocalSession session = new LocalSession(new Host(new LocalProtocol(), new LocalProtocol().getDefaultHostname())); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Path folder = new Path(new LocalHomeFinderFeature().find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.directory)); new LocalDirectoryFeature(session).mkdir(folder, new TransferStatus()); assertTrue(Files.exists(session.toPath(folder))); assertThrows(ConflictException.class, () -> new LocalDirectoryFeature(session).mkdir(folder, new TransferStatus())); new LocalDeleteFeature(session).delete(Collections.singletonList(folder), new DisabledLoginCallback(), new Delete.DisabledCallback()); assertFalse(Files.exists(session.toPath(folder))); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testNightmarePb() { ChatMessage chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your Nightmare kill count is: <col=ff0000>1130</col>", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Team size: <col=ff0000>5 players</col> Fight duration: <col=ff0000>3:28</col> (new personal best)", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("killcount", "nightmare", 1130); verify(configManager).setRSProfileConfiguration("personalbest", "nightmare", 3 * 60 + 28.0); }
@SuppressWarnings("unchecked") @Override public <S extends StateStore> S getStateStore(final String name) { final StateStore store = stateManager.getGlobalStore(name); return (S) getReadWriteStore(store); }
@Test public void shouldNotAllowInitForSessionStore() { when(stateManager.getGlobalStore(GLOBAL_SESSION_STORE_NAME)).thenReturn(mock(SessionStore.class)); final StateStore store = globalContext.getStateStore(GLOBAL_SESSION_STORE_NAME); try { store.init((StateStoreContext) null, null); fail("Should have thrown UnsupportedOperationException."); } catch (final UnsupportedOperationException expected) { } }
@Override public boolean isEnable() { return mEnable; }
@Test public void isEnable() { SAHelper.initSensors(mApplication); SAEncryptProtocolImpl encryptProtocol = new SAEncryptProtocolImpl(); encryptProtocol.install(SensorsDataAPI.sharedInstance(mApplication).getSAContextManager()); Assert.assertTrue(encryptProtocol.isEnable()); }
@Override public boolean remove(Object o) { throw new UnsupportedOperationException("RangeSet is immutable"); }
@Test(expected = UnsupportedOperationException.class) public void remove() throws Exception { RangeSet rs = new RangeSet(4); rs.remove(1); }
public static PostgreSQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find PostgreSQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetTimeBinaryProtocolValue() { PostgreSQLBinaryProtocolValue binaryProtocolValue = PostgreSQLBinaryProtocolValueFactory.getBinaryProtocolValue(PostgreSQLColumnType.TIMESTAMP); assertThat(binaryProtocolValue, instanceOf(PostgreSQLTimeBinaryProtocolValue.class)); }
public static Criterion matchIPEcn(byte ipEcn) { return new IPEcnCriterion(ipEcn); }
@Test public void testMatchIPEcnMethod() { Criterion matchIPEcn = Criteria.matchIPEcn(ipEcn1); IPEcnCriterion ipEcnCriterion = checkAndConvert(matchIPEcn, Criterion.Type.IP_ECN, IPEcnCriterion.class); assertThat(ipEcnCriterion.ipEcn(), is(equalTo(ipEcn1))); }
@Override public void report(final List<DataPoint> dataPoints) { dataPoints.forEach(this::report); }
@Test public void shouldReturnNullForStalePoint() { // When: reporter.report(ImmutableList.of( new DataPoint( A_TIME, "baz", 123, ImmutableMap.of("foo", "bar") ) )); // Then: verify(metrics).addMetric(same(metricName), metricCaptor.capture()); assertThat( metricCaptor.getValue().value( null, A_TIME.plus(STALE_THRESHOLD.multipliedBy(2)).toEpochMilli()), nullValue() ); }
@Override public void onLeaderInformationChange(String componentId, LeaderInformation leaderInformation) { synchronized (lock) { notifyLeaderInformationChangeInternal( componentId, leaderInformation, confirmedLeaderInformation.forComponentIdOrEmpty(componentId)); } }
@Test void testAllLeaderInformationChangeIsIgnoredAfterLeaderElectionBeingClosed() throws Exception { testLeadershipChangeEventHandlingBeingIgnoredAfterLeaderElectionClose( (listener, ignoredComponentIds, externalStorage) -> listener.onLeaderInformationChange(externalStorage)); }
public void run() { validate(); if (!super.getNotification().hasErrors()) { LOGGER.info("Register worker in backend system"); } }
@Test void runWithUnderageDOB() { RegisterWorkerDto workerDto = createValidWorkerDto(); workerDto.setDateOfBirth(LocalDate.now().minusYears(17)); // Under 18 workerDto.setupWorkerDto("name", "occupation", LocalDate.now().minusYears(17)); RegisterWorker registerWorker = new RegisterWorker(workerDto); // Run the registration process registerWorker.run(); // Verify that the notification contains the underage DOB error assertTrue(registerWorker.getNotification().hasErrors()); assertTrue(registerWorker.getNotification().getErrors().contains(RegisterWorkerDto.DOB_TOO_SOON)); assertEquals(registerWorker.getNotification().getErrors().size(), 1); }
@Override public <VR> KTable<K, VR> aggregate(final Initializer<VR> initializer, final Aggregator<? super K, ? super V, VR> aggregator, final Materialized<K, VR, KeyValueStore<Bytes, byte[]>> materialized) { return aggregate(initializer, aggregator, NamedInternal.empty(), materialized); }
@Test public void shouldAggregateWithDefaultSerdes() { final MockApiProcessorSupplier<String, String, Void, Void> supplier = new MockApiProcessorSupplier<>(); groupedStream .aggregate(MockInitializer.STRING_INIT, MockAggregator.TOSTRING_ADDER) .toStream() .process(supplier); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { processData(driver); assertThat( supplier.theCapturedProcessor().lastValueAndTimestampPerKey().get("1"), equalTo(ValueAndTimestamp.make("0+A+C+D", 10L))); assertThat( supplier.theCapturedProcessor().lastValueAndTimestampPerKey().get("2"), equalTo(ValueAndTimestamp.make("0+B", 1L))); assertThat( supplier.theCapturedProcessor().lastValueAndTimestampPerKey().get("3"), equalTo(ValueAndTimestamp.make("0+E+F", 9L))); } }
@Override public Serializable read(final MySQLBinlogColumnDef columnDef, final MySQLPacketPayload payload) { int result = payload.readInt1(); return 0 == result ? MySQLTimeValueUtils.YEAR_OF_ZERO : Integer.toString(result + 1900); }
@Test void assertRead() { when(payload.readInt1()).thenReturn(1); assertThat(new MySQLYearBinlogProtocolValue().read(columnDef, payload), is("1901")); }
@Override public Map<String, PluginMetadataSummary> test(ViewDTO view) { final Optional<Search> optionalSearch = searchDbService.get(view.searchId()); return optionalSearch.map(searchRequiresParameterSupport::test) .orElseThrow(() -> new IllegalStateException("Search " + view.searchId() + " for view " + view + " is missing.")); }
@Test public void returnsParameterCapabilityIfViewDoesHaveParameters() { final Search search = Search.builder().parameters(ImmutableSet.of( ValueParameter.builder() .name("foo") .dataType("any") .build() )).build(); when(searchDbService.get("searchId")).thenReturn(Optional.of(search)); final Map<String, PluginMetadataSummary> result = this.requiresParameterSupport.test(view); assertThat(result).containsExactly( new AbstractMap.SimpleEntry("parameters", new EnterpriseMetadataSummary()) ); }
public static long getContentLength(HttpMessage message) { String value = message.headers().get(HttpHeaderNames.CONTENT_LENGTH); if (value != null) { return Long.parseLong(value); } // We know the content length if it's a Web Socket message even if // Content-Length header is missing. long webSocketContentLength = getWebSocketContentLength(message); if (webSocketContentLength >= 0) { return webSocketContentLength; } // Otherwise we don't. throw new NumberFormatException("header not found: " + HttpHeaderNames.CONTENT_LENGTH); }
@Test public void testGetContentLengthThrowsNumberFormatException() { final HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); message.headers().set(HttpHeaderNames.CONTENT_LENGTH, "bar"); try { HttpUtil.getContentLength(message); fail(); } catch (final NumberFormatException e) { // a number format exception is expected here } }
@Override public long nextDelayDuration(int reconsumeTimes) { if (reconsumeTimes < 0) { reconsumeTimes = 0; } int index = reconsumeTimes + 2; if (index >= next.length) { index = next.length - 1; } return next[index]; }
@Test public void testNextDelayDurationOutOfRange() { CustomizedRetryPolicy customizedRetryPolicy = new CustomizedRetryPolicy(); long actual = customizedRetryPolicy.nextDelayDuration(-1); assertThat(actual).isEqualTo(TimeUnit.SECONDS.toMillis(10)); actual = customizedRetryPolicy.nextDelayDuration(100); assertThat(actual).isEqualTo(TimeUnit.HOURS.toMillis(2)); }
@Nullable @Override public Message decode(@Nonnull final RawMessage rawMessage) { final GELFMessage gelfMessage = new GELFMessage(rawMessage.getPayload(), rawMessage.getRemoteAddress()); final String json = gelfMessage.getJSON(decompressSizeLimit, charset); final JsonNode node; try { node = objectMapper.readTree(json); if (node == null) { throw new IOException("null result"); } } catch (final Exception e) { log.error("Could not parse JSON, first 400 characters: " + StringUtils.abbreviate(json, 403), e); throw new IllegalStateException("JSON is null/could not be parsed (invalid JSON)", e); } try { validateGELFMessage(node, rawMessage.getId(), rawMessage.getRemoteAddress()); } catch (IllegalArgumentException e) { log.trace("Invalid GELF message <{}>", node); throw e; } // Timestamp. final double messageTimestamp = timestampValue(node); final DateTime timestamp; if (messageTimestamp <= 0) { timestamp = rawMessage.getTimestamp(); } else { // we treat this as a unix timestamp timestamp = Tools.dateTimeFromDouble(messageTimestamp); } final Message message = messageFactory.createMessage( stringValue(node, "short_message"), stringValue(node, "host"), timestamp ); message.addField(Message.FIELD_FULL_MESSAGE, stringValue(node, "full_message")); final String file = stringValue(node, "file"); if (file != null && !file.isEmpty()) { message.addField("file", file); } final long line = longValue(node, "line"); if (line > -1) { message.addField("line", line); } // Level is set by server if not specified by client. final int level = intValue(node, "level"); if (level > -1) { message.addField("level", level); } // Facility is set by server if not specified by client. final String facility = stringValue(node, "facility"); if (facility != null && !facility.isEmpty()) { message.addField("facility", facility); } // Add additional data if there is some. final Iterator<Map.Entry<String, JsonNode>> fields = node.fields(); while (fields.hasNext()) { final Map.Entry<String, JsonNode> entry = fields.next(); String key = entry.getKey(); // Do not index useless GELF "version" field. if ("version".equals(key)) { continue; } // Don't include GELF syntax underscore in message field key. if (key.startsWith("_") && key.length() > 1) { key = key.substring(1); } // We already set short_message and host as message and source. Do not add as fields again. if ("short_message".equals(key) || "host".equals(key)) { continue; } // Skip standard or already set fields. if (message.getField(key) != null || Message.RESERVED_FIELDS.contains(key) && !Message.RESERVED_SETTABLE_FIELDS.contains(key)) { continue; } // Convert JSON containers to Strings, and pick a suitable number representation. final JsonNode value = entry.getValue(); final Object fieldValue; if (value.isContainerNode()) { fieldValue = value.toString(); } else if (value.isFloatingPointNumber()) { fieldValue = value.asDouble(); } else if (value.isIntegralNumber()) { fieldValue = value.asLong(); } else if (value.isNull()) { log.debug("Field [{}] is NULL. Skipping.", key); continue; } else if (value.isTextual()) { fieldValue = value.asText(); } else { log.debug("Field [{}] has unknown value type. Skipping.", key); continue; } message.addField(key, fieldValue); } return message; }
@Test public void decodeSucceedsWithoutHost() throws Exception { final String json = "{" + "\"version\": \"1.1\"," + "\"short_message\": \"A short message that helps you identify what is going on\"" + "}"; final RawMessage rawMessage = new RawMessage(json.getBytes(StandardCharsets.UTF_8)); final Message message = codec.decode(rawMessage); assertThat(message).isNotNull(); }
@Override public PageData<WidgetsBundle> findSystemWidgetsBundles(WidgetsBundleFilter widgetsBundleFilter, PageLink pageLink) { if (widgetsBundleFilter.isFullSearch()) { return DaoUtil.toPageData( widgetsBundleRepository .findSystemWidgetsBundlesFullSearch( NULL_UUID, pageLink.getTextSearch(), DaoUtil.toPageable(pageLink))); } else { return DaoUtil.toPageData( widgetsBundleRepository .findSystemWidgetsBundles( NULL_UUID, pageLink.getTextSearch(), DaoUtil.toPageable(pageLink))); } }
@Test public void testFindSystemWidgetsBundlesFullSearch() { createSystemWidgetBundles(30, "WB_"); widgetsBundles = widgetsBundleDao.find(TenantId.SYS_TENANT_ID).stream().sorted(Comparator.comparing(WidgetsBundle::getTitle)).collect(Collectors.toList()); assertEquals(30, widgetsBundles.size()); var widgetType1 = createAndSaveWidgetType(TenantId.SYS_TENANT_ID,1, "Test widget type 1", "This is the widget type 1", new String[]{"tag1", "Tag2", "TEST_TAG"}); var widgetType2 = createAndSaveWidgetType(TenantId.SYS_TENANT_ID,2, "Test widget type 2", "This is the widget type 2", new String[]{"tag3", "Tag5", "TEST_Tag2"}); var widgetsBundle1 = widgetsBundles.get(10); widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle1.getId(), widgetType1.getId(), 0)); var widgetsBundle2 = widgetsBundles.get(15); widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle2.getId(), widgetType2.getId(), 0)); var widgetsBundle3 = widgetsBundles.get(28); widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle3.getId(), widgetType1.getId(), 0)); widgetTypeDao.saveWidgetsBundleWidget(new WidgetsBundleWidget(widgetsBundle3.getId(), widgetType2.getId(), 1)); PageLink pageLink = new PageLink(10, 0, "widget type 1", new SortOrder("title")); PageData<WidgetsBundle> widgetsBundles1 = widgetsBundleDao.findSystemWidgetsBundles(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.SYS_TENANT_ID), pageLink); assertEquals(2, widgetsBundles1.getData().size()); assertEquals(widgetsBundle1, widgetsBundles1.getData().get(0)); assertEquals(widgetsBundle3, widgetsBundles1.getData().get(1)); pageLink = new PageLink(10, 0, "Test widget type 2", new SortOrder("title")); PageData<WidgetsBundle> widgetsBundles2 = widgetsBundleDao.findSystemWidgetsBundles(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.SYS_TENANT_ID), pageLink); assertEquals(2, widgetsBundles2.getData().size()); assertEquals(widgetsBundle2, widgetsBundles2.getData().get(0)); assertEquals(widgetsBundle3, widgetsBundles2.getData().get(1)); pageLink = new PageLink(10, 0, "ppp Fd v TAG1 tt", new SortOrder("title")); PageData<WidgetsBundle> widgetsBundles3 = widgetsBundleDao.findSystemWidgetsBundles(WidgetsBundleFilter.fullSearchFromTenantId(TenantId.SYS_TENANT_ID), pageLink); assertEquals(2, widgetsBundles3.getData().size()); assertEquals(widgetsBundle1, widgetsBundles3.getData().get(0)); assertEquals(widgetsBundle3, widgetsBundles3.getData().get(1)); }
public static <K, InputT, AccumT> ParDoFn create( PipelineOptions options, KvCoder<K, ?> inputElementCoder, @Nullable CloudObject cloudUserFn, @Nullable List<SideInputInfo> sideInputInfos, List<Receiver> receivers, DataflowExecutionContext<?> executionContext, DataflowOperationContext operationContext) throws Exception { AppliedCombineFn<K, InputT, AccumT, ?> combineFn; SideInputReader sideInputReader; StepContext stepContext; if (cloudUserFn == null) { combineFn = null; sideInputReader = NullSideInputReader.empty(); stepContext = null; } else { Object deserializedFn = SerializableUtils.deserializeFromByteArray( getBytes(cloudUserFn, PropertyNames.SERIALIZED_FN), "serialized combine fn"); @SuppressWarnings("unchecked") AppliedCombineFn<K, InputT, AccumT, ?> combineFnUnchecked = ((AppliedCombineFn<K, InputT, AccumT, ?>) deserializedFn); combineFn = combineFnUnchecked; sideInputReader = executionContext.getSideInputReader( sideInputInfos, combineFn.getSideInputViews(), operationContext); stepContext = executionContext.getStepContext(operationContext); } return create( options, inputElementCoder, combineFn, sideInputReader, receivers.get(0), stepContext); }
@Test public void testPartialGroupByKey() throws Exception { Coder keyCoder = StringUtf8Coder.of(); Coder valueCoder = BigEndianIntegerCoder.of(); TestOutputReceiver receiver = new TestOutputReceiver( new ElementByteSizeObservableCoder( WindowedValue.getValueOnlyCoder( KvCoder.of(keyCoder, IterableCoder.of(valueCoder)))), counterSet, NameContextsForTests.nameContextForTest()); ParDoFn pgbkParDoFn = new SimplePartialGroupByKeyParDoFn( GroupingTables.buffering( new WindowingCoderGroupingKeyCreator(keyCoder), PairInfo.create(), new CoderSizeEstimator(WindowedValue.getValueOnlyCoder(keyCoder)), new CoderSizeEstimator(valueCoder)), receiver); pgbkParDoFn.startBundle(receiver); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 4))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("there", 5))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 6))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("joe", 7))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("there", 8))); pgbkParDoFn.processElement(WindowedValue.valueInGlobalWindow(KV.of("hi", 9))); pgbkParDoFn.finishBundle(); assertThat( receiver.outputElems, IsIterableContainingInAnyOrder.<Object>containsInAnyOrder( WindowedValue.valueInGlobalWindow(KV.of("hi", Arrays.asList(4, 6, 9))), WindowedValue.valueInGlobalWindow(KV.of("there", Arrays.asList(5, 8))), WindowedValue.valueInGlobalWindow(KV.of("joe", Arrays.asList(7))))); // Exact counter values depend on size of encoded data. If encoding // changes, then these expected counters should change to match. CounterUpdateExtractor<?> updateExtractor = Mockito.mock(CounterUpdateExtractor.class); counterSet.extractUpdates(false, updateExtractor); verify(updateExtractor).longSum(getObjectCounterName("test_receiver_out"), false, 3L); verify(updateExtractor) .longMean( getMeanByteCounterName("test_receiver_out"), false, LongCounterMean.ZERO.addValue(49L, 3)); verifyNoMoreInteractions(updateExtractor); }
@SuppressFBWarnings(value = "RV_RETURN_VALUE_OF_PUTIFABSENT_IGNORED") public int encode(CacheScope scopeInfo) { if (!mScopeToId.containsKey(scopeInfo)) { synchronized (this) { if (!mScopeToId.containsKey(scopeInfo)) { // NOTE: If update mScopeToId ahead of updating mIdToScope, // we may read a null scope info in decode. int id = mNextId; Preconditions.checkArgument(id < mMaxNumScopes, "too many scopes in shadow cache"); mNextId++; mScopeToId.putIfAbsent(scopeInfo, id); // if we use mScopeToID.put() here, // we will get the findBug's error: the hashmap may not be atomic. } } } return mScopeToId.get(scopeInfo) & mScopeMask; }
@Test public void testBasic() { int id = mScopeEncoder.encode(SCOPE1); assertEquals(0, id); }
@Override public ApiResult<CoordinatorKey, ProducerIdAndEpoch> handleSingleResponse( Node broker, CoordinatorKey key, AbstractResponse abstractResponse ) { InitProducerIdResponse response = (InitProducerIdResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { return handleError(key, error); } Map<CoordinatorKey, ProducerIdAndEpoch> completed = Collections.singletonMap(key, new ProducerIdAndEpoch( response.data().producerId(), response.data().producerEpoch() )); return new ApiResult<>(completed, Collections.emptyMap(), Collections.emptyList()); }
@Test public void testHandleSuccessfulResponse() { String transactionalId = "foo"; CoordinatorKey key = CoordinatorKey.byTransactionalId(transactionalId); FenceProducersHandler handler = new FenceProducersHandler(options, logContext, requestTimeoutMs); short epoch = 57; long producerId = 7; InitProducerIdResponse response = new InitProducerIdResponse(new InitProducerIdResponseData() .setProducerEpoch(epoch) .setProducerId(producerId)); ApiResult<CoordinatorKey, ProducerIdAndEpoch> result = handler.handleSingleResponse( node, key, response); assertEquals(emptyList(), result.unmappedKeys); assertEquals(emptyMap(), result.failedKeys); assertEquals(singleton(key), result.completedKeys.keySet()); ProducerIdAndEpoch expected = new ProducerIdAndEpoch(producerId, epoch); assertEquals(expected, result.completedKeys.get(key)); }
String replaceServerNodeUri(String existingTableName) { if (_serverNodeUri == null) { return existingTableName; } int index = existingTableName.indexOf(SERVER_NODE_URI_PREFIX_TABLENAME_SEPARATOR); if (index == -1 || index == 0 || index == existingTableName.length() - 1) { throw new RuntimeException("Unexpected name format for name: " + existingTableName); } return _serverNodeUri + SERVER_NODE_URI_PREFIX_TABLENAME_SEPARATOR + existingTableName.substring(index + 1); }
@Test public void testReplaceServerNodeUriWithoutServerNodeUri() { String name = "https://SomeOldHostName:100/SomeOtherService|SomeOtherPrefix-1000"; SymbolTableNameHandler handler = new SymbolTableNameHandler("Prefix", null); String replacedName = handler.replaceServerNodeUri(name); Assert.assertEquals(replacedName, name); }
@Override public AppendFiles appendManifest(ManifestFile manifest) { Preconditions.checkArgument( !manifest.hasExistingFiles(), "Cannot append manifest with existing files"); Preconditions.checkArgument( !manifest.hasDeletedFiles(), "Cannot append manifest with deleted files"); Preconditions.checkArgument( manifest.snapshotId() == null || manifest.snapshotId() == -1, "Snapshot id must be assigned during commit"); Preconditions.checkArgument( manifest.sequenceNumber() == -1, "Sequence must be assigned during commit"); add(manifest); return this; }
@TestTemplate public void testInvalidAppendManifest() throws IOException { assertThat(listManifestFiles()).isEmpty(); TableMetadata base = readMetadata(); assertThat(base.currentSnapshot()).isNull(); ManifestFile manifestWithExistingFiles = writeManifest("manifest-file-1.avro", manifestEntry(Status.EXISTING, null, FILE_A)); assertThatThrownBy( () -> commit(table, table.newAppend().appendManifest(manifestWithExistingFiles), branch)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot append manifest with existing files"); assertThat(readMetadata().lastSequenceNumber()).isEqualTo(0); ManifestFile manifestWithDeletedFiles = writeManifest("manifest-file-2.avro", manifestEntry(Status.DELETED, null, FILE_A)); assertThatThrownBy( () -> commit(table, table.newAppend().appendManifest(manifestWithDeletedFiles), branch)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot append manifest with deleted files"); assertThat(readMetadata().lastSequenceNumber()).isEqualTo(0); }
private static ConfigResource topicConfigResource(String tn) { return new ConfigResource(ConfigResource.Type.TOPIC, tn); }
@Test public void shouldHandleInterruptedExceptionFromDescribeConfigs(KafkaCluster cluster) throws ExecutionException, InterruptedException { var topicName = "my-topic"; kafkaAdminClient[0] = Admin.create(Map.of(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.getBootstrapServers())); var kafkaAdminClientSpy = Mockito.spy(kafkaAdminClient[0]); var result = Mockito.mock(DescribeConfigsResult.class); Mockito.doReturn(interruptedFuture()).when(result).all(); Mockito.doReturn(Map.of(topicConfigResource(), interruptedFuture())).when(result).values(); Mockito.doReturn(result).when(kafkaAdminClientSpy).describeConfigs(Mockito.argThat(a -> a.stream().anyMatch(x -> x.type() == ConfigResource.Type.TOPIC))); KafkaTopic kafkaTopic = createKafkaTopic(topicName); assertOnUpdateThrowsInterruptedException(kafkaAdminClientSpy, kafkaTopic); }
public Optional<Column> findColumn(final ColumnName columnName) { return findColumnMatching(withName(columnName)); }
@Test public void shouldGetKeyColumns() { assertThat(SOME_SCHEMA.findColumn(K0), is(Optional.of( Column.of(K0, BIGINT, Namespace.KEY, 0) ))); }
@SuppressWarnings("unchecked") static FluentIterable<ZipEntry> entries(final ZipFile file) { checkNotNull(file); return new FluentIterable<ZipEntry>() { @Override public Iterator<ZipEntry> iterator() { return (Iterator<ZipEntry>) Iterators.forEnumeration(file.entries()); } }; }
@Test public void testEntries() throws Exception { File zipDir = new File(tmpDir, "zip"); File subDir1 = new File(zipDir, "subDir1"); File subDir2 = new File(subDir1, "subdir2"); assertTrue(subDir2.mkdirs()); createFileWithContents(subDir2, "myTextFile.txt", "Simple Text"); ZipFiles.zipDirectory(tmpDir, zipFile); try (ZipFile zip = new ZipFile(zipFile)) { Enumeration<? extends ZipEntry> entries = zip.entries(); for (ZipEntry entry : ZipFiles.entries(zip)) { assertTrue(entries.hasMoreElements()); // ZipEntry doesn't override equals assertEquals(entry.getName(), entries.nextElement().getName()); } assertFalse(entries.hasMoreElements()); } }
public static DataSource createDataSource(final ModeConfiguration modeConfig) throws SQLException { return createDataSource(DefaultDatabase.LOGIC_NAME, modeConfig); }
@Test void assertCreateDataSourceWithDatabaseNameAndDefaultModeConfigurationForMultipleDataSources() throws SQLException { assertDataSource(ShardingSphereDataSourceFactory.createDataSource("test_db", null), "test_db"); }
@Override public boolean isSupport(MetricsEvent event) { return event instanceof RequestEvent; }
@Test void testListener() { AggregateMetricsCollector metricsCollector = new AggregateMetricsCollector(applicationModel); RequestEvent event = RequestEvent.toRequestEvent( applicationModel, null, null, null, invocation, MetricsSupport.getSide(invocation), MethodMetric.isServiceLevel(applicationModel)); RequestEvent beforeEvent = RequestEvent.toRequestErrorEvent( applicationModel, null, null, invocation, MetricsSupport.getSide(invocation), RpcException.FORBIDDEN_EXCEPTION, MethodMetric.isServiceLevel(applicationModel)); Assertions.assertTrue(metricsCollector.isSupport(event)); Assertions.assertTrue(metricsCollector.isSupport(beforeEvent)); }
public boolean accept(DefaultIssue issue, Component component) { if (component.getType() != FILE || (exclusionPatterns.isEmpty() && inclusionPatterns.isEmpty())) { return true; } if (isExclude(issue, component)) { return false; } return isInclude(issue, component); }
@Test public void accept_everything_when_no_filter_properties() { IssueFilter underTest = newIssueFilter(new MapSettings()); assertThat(underTest.accept(ISSUE_1, COMPONENT_1)).isTrue(); assertThat(underTest.accept(ISSUE_2, COMPONENT_2)).isTrue(); assertThat(underTest.accept(ISSUE_3, COMPONENT_3)).isTrue(); }
@Override public void deleteArticle(Long id) { // 校验存在 validateArticleExists(id); // 删除 articleMapper.deleteById(id); }
@Test public void testDeleteArticle_success() { // mock 数据 ArticleDO dbArticle = randomPojo(ArticleDO.class); articleMapper.insert(dbArticle);// @Sql: 先插入出一条存在的数据 // 准备参数 Long id = dbArticle.getId(); // 调用 articleService.deleteArticle(id); // 校验数据不存在了 assertNull(articleMapper.selectById(id)); }
@PostMapping(value = "/raft") @Secured(action = ActionTypes.WRITE, resource = "nacos/admin", signType = SignType.CONSOLE) public RestResult<String> raftOps(@RequestBody Map<String, String> commands) { return protocolManager.getCpProtocol().execute(commands); }
@Test void testRaftOps() { Mockito.when(protocolManager.getCpProtocol()).thenAnswer(invocationOnMock -> { CPProtocol cpProtocol = Mockito.mock(CPProtocol.class); Mockito.when(cpProtocol.execute(Mockito.anyMap())).thenReturn(RestResultUtils.success("res")); return cpProtocol; }); RestResult<String> result = coreOpsV2Controller.raftOps(new HashMap<>()); assertEquals("res", result.getData()); }
@VisibleForTesting void updateRetryDelayTimeToTimeline(StepRuntimeSummary runtimeSummary) { StepInstance.Status status = runtimeSummary.getRuntimeState().getStatus(); if (status == StepInstance.Status.USER_FAILED || status == StepInstance.Status.PLATFORM_FAILED || status == StepInstance.Status.TIMEOUT_FAILED) { int nextRetryDelayInSecs = runtimeSummary .getStepRetry() .getNextRetryDelay(runtimeSummary.getRuntimeState().getStatus()); String humanReadableRetryTime = DurationHelper.humanReadableFormat(Duration.ofSeconds(nextRetryDelayInSecs)); runtimeSummary.addTimeline( TimelineLogEvent.info("Retrying task in [%s]", humanReadableRetryTime)); } }
@Test public void testUpdateRetryDelayTimeToTimeline() { StepRuntimeState runtimeState = new StepRuntimeState(); runtimeState.setStatus(StepInstance.Status.USER_FAILED); Timeline timeline = new Timeline(new ArrayList<>()); StepInstance.StepRetry stepRetry = new StepInstance.StepRetry(); stepRetry.setRetryable(true); RetryPolicy.FixedBackoff fixedBackoff = RetryPolicy.FixedBackoff.builder() .errorRetryBackoffInSecs(100L) .platformRetryBackoffInSecs(200L) .build(); stepRetry.setBackoff(fixedBackoff); StepRuntimeSummary runtimeSummary = StepRuntimeSummary.builder() .timeline(timeline) .runtimeState(runtimeState) .stepRetry(stepRetry) .build(); maestroTask.updateRetryDelayTimeToTimeline(runtimeSummary); List<TimelineEvent> timelineEvents = timeline.getTimelineEvents(); assertThat(timelineEvents) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("timestamp") .contains(TimelineLogEvent.info("Retrying task in [1m 40s]")); RetryPolicy.ExponentialBackoff exponentialBackoff = RetryPolicy.ExponentialBackoff.builder() .errorRetryExponent(2) .errorRetryLimitInSecs(600L) .errorRetryBackoffInSecs(100L) .platformRetryBackoffInSecs(200L) .build(); stepRetry.setBackoff(exponentialBackoff); stepRetry.setErrorRetries(6); timelineEvents.clear(); maestroTask.updateRetryDelayTimeToTimeline(runtimeSummary); assertThat(timelineEvents) .hasSize(1) .usingRecursiveFieldByFieldElementComparatorIgnoringFields("timestamp") .contains(TimelineLogEvent.info("Retrying task in [10m]")); timelineEvents.clear(); runtimeState.setStatus(StepInstance.Status.PAUSED); maestroTask.updateRetryDelayTimeToTimeline(runtimeSummary); assertThat(timelineEvents).isEmpty(); }
@Override public Optional<JavaClass> tryResolve(String typeName) { String typeFile = typeName.replace(".", "/") + ".class"; Optional<URI> uri = tryGetUriOf(typeFile); return uri.isPresent() ? classUriImporter.tryImport(uri.get()) : Optional.empty(); }
@Test public void is_resilient_if_URI_cant_be_located() { Optional<JavaClass> result = resolver.tryResolve("sooo.Wrong"); assertThat(result).isEmpty(); verifyNoMoreInteractions(uriImporter); }
public OpenAPI read(Class<?> cls) { return read(cls, resolveApplicationPath(), null, false, null, null, new LinkedHashSet<String>(), new ArrayList<Parameter>(), new HashSet<Class<?>>()); }
@Test(description = "Single Example") public void testSingleExample() { Reader reader = new Reader(new OpenAPI()); OpenAPI openAPI = reader.read(SingleExampleResource.class); String yaml = "openapi: 3.0.1\n" + "paths:\n" + " /test1:\n" + " post:\n" + " operationId: test1\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/User'\n" + " example:\n" + " foo: foo\n" + " bar: bar\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}\n" + " /test2:\n" + " post:\n" + " operationId: test2\n" + " requestBody:\n" + " content:\n" + " application/json:\n" + " schema:\n" + " $ref: '#/components/schemas/User'\n" + " example:\n" + " foo: foo\n" + " bar: bar\n" + " responses:\n" + " default:\n" + " description: default response\n" + " content:\n" + " '*/*': {}\n" + "components:\n" + " schemas:\n" + " User:\n" + " type: object\n" + " properties:\n" + " id:\n" + " type: integer\n" + " format: int64\n" + " username:\n" + " type: string\n" + " firstName:\n" + " type: string\n" + " lastName:\n" + " type: string\n" + " email:\n" + " type: string\n" + " password:\n" + " type: string\n" + " phone:\n" + " type: string\n" + " userStatus:\n" + " type: integer\n" + " description: User Status\n" + " format: int32\n" + " xml:\n" + " name: User\n"; SerializationMatchers.assertEqualsToYaml(openAPI, yaml); }
@Nonnull public static <K, V> BatchSource<Entry<K, V>> map(@Nonnull String mapName) { return batchFromProcessor("mapSource(" + mapName + ')', readMapP(mapName)); }
@Test public void mapWithFilterAndProjectionFn_byName() { // Given List<Integer> input = sequence(itemCount); putToBatchSrcMap(input); // When BatchSource<Integer> source = Sources.map( srcName, truePredicate(), Entry<String, Integer>::getValue); // Then p.readFrom(source).writeTo(sink); execute(); assertEquals(toBag(input), sinkToBag()); }
public void sendMessage(final Account account, final Device device, final Envelope message, final boolean online) { final String channel; if (device.getGcmId() != null) { channel = "gcm"; } else if (device.getApnId() != null) { channel = "apn"; } else if (device.getFetchesMessages()) { channel = "websocket"; } else { channel = "none"; } final boolean clientPresent; if (online) { clientPresent = clientPresenceManager.isPresent(account.getUuid(), device.getId()); if (clientPresent) { messagesManager.insert(account.getUuid(), device.getId(), message.toBuilder().setEphemeral(true).build()); } } else { messagesManager.insert(account.getUuid(), device.getId(), message); // We check for client presence after inserting the message to take a conservative view of notifications. If the // client wasn't present at the time of insertion but is now, they'll retrieve the message. If they were present // but disconnected before the message was delivered, we should send a notification. clientPresent = clientPresenceManager.isPresent(account.getUuid(), device.getId()); if (!clientPresent) { try { pushNotificationManager.sendNewMessageNotification(account, device.getId(), message.getUrgent()); } catch (final NotPushRegisteredException ignored) { } } } Metrics.counter(SEND_COUNTER_NAME, CHANNEL_TAG_NAME, channel, EPHEMERAL_TAG_NAME, String.valueOf(online), CLIENT_ONLINE_TAG_NAME, String.valueOf(clientPresent), URGENT_TAG_NAME, String.valueOf(message.getUrgent()), STORY_TAG_NAME, String.valueOf(message.getStory()), SEALED_SENDER_TAG_NAME, String.valueOf(!message.hasSourceUuid())) .increment(); }
@Test void testSendOnlineMessageClientPresent() throws Exception { when(clientPresenceManager.isPresent(ACCOUNT_UUID, DEVICE_ID)).thenReturn(true); when(device.getGcmId()).thenReturn("gcm-id"); messageSender.sendMessage(account, device, message, true); ArgumentCaptor<MessageProtos.Envelope> envelopeArgumentCaptor = ArgumentCaptor.forClass( MessageProtos.Envelope.class); verify(messagesManager).insert(any(), anyByte(), envelopeArgumentCaptor.capture()); assertTrue(envelopeArgumentCaptor.getValue().getEphemeral()); verifyNoInteractions(pushNotificationManager); }
@SuppressWarnings("unchecked") @Override public synchronized ProxyInfo<T> getProxy() { if (currentUsedHandler != null) { return currentUsedHandler; } Map<String, ProxyInfo<T>> targetProxyInfos = new HashMap<>(); StringBuilder combinedInfo = new StringBuilder("["); for (int i = 0; i < proxies.size(); i++) { ProxyInfo<T> pInfo = super.getProxy(); incrementProxyIndex(); targetProxyInfos.put(pInfo.proxyInfo, pInfo); combinedInfo.append(pInfo.proxyInfo).append(','); } combinedInfo.append(']'); T wrappedProxy = (T) Proxy.newProxyInstance( RequestHedgingInvocationHandler.class.getClassLoader(), new Class<?>[]{xface}, new RequestHedgingInvocationHandler(targetProxyInfos)); currentUsedHandler = new ProxyInfo<T>(wrappedProxy, combinedInfo.toString()); return currentUsedHandler; }
@Test public void testHedgingWhenFileNotFoundException() throws Exception { ClientProtocol active = Mockito.mock(ClientProtocol.class); Mockito .when(active.getBlockLocations(anyString(), anyLong(), anyLong())) .thenThrow(new RemoteException("java.io.FileNotFoundException", "File does not exist!")); ClientProtocol standby = Mockito.mock(ClientProtocol.class); Mockito .when(standby.getBlockLocations(anyString(), anyLong(), anyLong())) .thenThrow( new RemoteException("org.apache.hadoop.ipc.StandbyException", "Standby NameNode")); RequestHedgingProxyProvider<ClientProtocol> provider = new RequestHedgingProxyProvider<>(conf, nnUri, ClientProtocol.class, createFactory(active, standby)); try { provider.getProxy().proxy.getBlockLocations("/tmp/test.file", 0L, 20L); Assert.fail("Should fail since the active namenode throws" + " FileNotFoundException!"); } catch (MultiException me) { for (Exception ex : me.getExceptions().values()) { Exception rEx = ((RemoteException) ex).unwrapRemoteException(); if (rEx instanceof StandbyException) { continue; } Assert.assertTrue(rEx instanceof FileNotFoundException); } } Mockito.verify(active).getBlockLocations(anyString(), anyLong(), anyLong()); Mockito.verify(standby).getBlockLocations(anyString(), anyLong(), anyLong()); }
@Override public boolean next() throws SQLException { return proxyBackendHandler.next(); }
@Test void assertNext() throws SQLException, NoSuchFieldException, IllegalAccessException { MySQLComQueryPacketExecutor actual = new MySQLComQueryPacketExecutor(packet, connectionSession); MemberAccessor accessor = Plugins.getMemberAccessor(); accessor.set(MySQLComQueryPacketExecutor.class.getDeclaredField("proxyBackendHandler"), actual, proxyBackendHandler); when(proxyBackendHandler.next()).thenReturn(true, false); assertTrue(actual.next()); assertFalse(actual.next()); }
@Override public CompletionStage<BeamFnApi.InstructionResponse> handle( BeamFnApi.InstructionRequest request) { LOG.debug("Sending InstructionRequest {}", request); CompletableFuture<BeamFnApi.InstructionResponse> resultFuture = new CompletableFuture<>(); outstandingRequests.put(request.getInstructionId(), resultFuture); requestReceiver.onNext(request); return resultFuture; }
@Test public void testRequestSent() { String id = "instructionId"; client.handle(BeamFnApi.InstructionRequest.newBuilder().setInstructionId(id).build()); verify(mockObserver).onNext(any(BeamFnApi.InstructionRequest.class)); }
public static Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> filterAndGenerateCheckpointBasedOnSourceLimit(Dataset<Row> sourceData, long sourceLimit, QueryInfo queryInfo, CloudObjectIncrCheckpoint cloudObjectIncrCheckpoint) { if (sourceData.isEmpty()) { // There is no file matching the prefix. CloudObjectIncrCheckpoint updatedCheckpoint = queryInfo.getEndInstant().equals(cloudObjectIncrCheckpoint.getCommit()) ? cloudObjectIncrCheckpoint : new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), null); return Pair.of(updatedCheckpoint, Option.empty()); } // Let's persist the dataset to avoid triggering the dag repeatedly sourceData.persist(StorageLevel.MEMORY_AND_DISK()); // Set ordering in query to enable batching Dataset<Row> orderedDf = QueryRunner.applyOrdering(sourceData, queryInfo.getOrderByColumns()); Option<String> lastCheckpoint = Option.of(cloudObjectIncrCheckpoint.getCommit()); Option<String> lastCheckpointKey = Option.ofNullable(cloudObjectIncrCheckpoint.getKey()); Option<String> concatenatedKey = lastCheckpoint.flatMap(checkpoint -> lastCheckpointKey.map(key -> checkpoint + key)); // Filter until last checkpoint key if (concatenatedKey.isPresent()) { orderedDf = orderedDf.withColumn("commit_key", functions.concat(functions.col(queryInfo.getOrderColumn()), functions.col(queryInfo.getKeyColumn()))); // Apply incremental filter orderedDf = orderedDf.filter(functions.col("commit_key").gt(concatenatedKey.get())).drop("commit_key"); // If there are no more files where commit_key is greater than lastCheckpointCommit#lastCheckpointKey if (orderedDf.isEmpty()) { LOG.info("Empty ordered source, returning endpoint:" + queryInfo.getEndInstant()); sourceData.unpersist(); // queryInfo.getEndInstant() represents source table's last completed instant // If current checkpoint is c1#abc and queryInfo.getEndInstant() is c1, return c1#abc. // If current checkpoint is c1#abc and queryInfo.getEndInstant() is c2, return c2. CloudObjectIncrCheckpoint updatedCheckpoint = queryInfo.getEndInstant().equals(cloudObjectIncrCheckpoint.getCommit()) ? cloudObjectIncrCheckpoint : new CloudObjectIncrCheckpoint(queryInfo.getEndInstant(), null); return Pair.of(updatedCheckpoint, Option.empty()); } } // Limit based on sourceLimit WindowSpec windowSpec = Window.orderBy(col(queryInfo.getOrderColumn()), col(queryInfo.getKeyColumn())); // Add the 'cumulativeSize' column with running sum of 'limitColumn' Dataset<Row> aggregatedData = orderedDf.withColumn(CUMULATIVE_COLUMN_NAME, sum(col(queryInfo.getLimitColumn())).over(windowSpec)); Dataset<Row> collectedRows = aggregatedData.filter(col(CUMULATIVE_COLUMN_NAME).leq(sourceLimit)); Row row = null; if (collectedRows.isEmpty()) { // If the first element itself exceeds limits then return first element LOG.info("First object exceeding source limit: " + sourceLimit + " bytes"); row = aggregatedData.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).first(); collectedRows = aggregatedData.limit(1); } else { // Get the last row and form composite key row = collectedRows.select(queryInfo.getOrderColumn(), queryInfo.getKeyColumn(), CUMULATIVE_COLUMN_NAME).orderBy( col(queryInfo.getOrderColumn()).desc(), col(queryInfo.getKeyColumn()).desc()).first(); } LOG.info("Processed batch size: " + row.get(row.fieldIndex(CUMULATIVE_COLUMN_NAME)) + " bytes"); sourceData.unpersist(); return Pair.of(new CloudObjectIncrCheckpoint(row.getString(0), row.getString(1)), Option.of(collectedRows)); }
@Test void testMultipleObjectExceedingSourceLimit() { List<Triple<String, Long, String>> filePathSizeAndCommitTime = new ArrayList<>(); // Add file paths and sizes to the list filePathSizeAndCommitTime.add(Triple.of("path/to/file1.json", 100L, "commit1")); filePathSizeAndCommitTime.add(Triple.of("path/to/file3.json", 200L, "commit1")); filePathSizeAndCommitTime.add(Triple.of("path/to/file2.json", 150L, "commit1")); filePathSizeAndCommitTime.add(Triple.of("path/to/file4.json", 50L, "commit2")); filePathSizeAndCommitTime.add(Triple.of("path/to/file5.json", 150L, "commit2")); filePathSizeAndCommitTime.add(Triple.of("path/to/file7.json", 100L, "commit3")); filePathSizeAndCommitTime.add(Triple.of("path/to/file7.json", 250L, "commit3")); Dataset<Row> inputDs = generateDataset(filePathSizeAndCommitTime); QueryInfo queryInfo = new QueryInfo( QUERY_TYPE_INCREMENTAL_OPT_VAL(), "commit1", "commit1", "commit2", "_hoodie_commit_time", "s3.object.key", "s3.object.size"); Pair<CloudObjectIncrCheckpoint, Option<Dataset<Row>>> result = IncrSourceHelper.filterAndGenerateCheckpointBasedOnSourceLimit( inputDs, 350L, queryInfo, new CloudObjectIncrCheckpoint("commit1", null)); Row row = result.getRight().get().select("cumulativeSize").collectAsList().get((int) result.getRight().get().count() - 1); assertEquals("commit1#path/to/file2.json", result.getKey().toString()); List<Row> rows = result.getRight().get().collectAsList(); assertEquals(2, rows.size()); assertEquals("[[commit1,[[bucket-1],[path/to/file1.json,100]],100], [commit1,[[bucket-1],[path/to/file2.json,150]],250]]", rows.toString()); assertEquals(250L, row.get(0)); result = IncrSourceHelper.filterAndGenerateCheckpointBasedOnSourceLimit( inputDs, 550L, queryInfo, new CloudObjectIncrCheckpoint("commit1", null)); row = result.getRight().get().select("cumulativeSize").collectAsList().get((int) result.getRight().get().count() - 1); assertEquals("commit2#path/to/file4.json", result.getKey().toString()); rows = result.getRight().get().collectAsList(); assertEquals(4, rows.size()); assertEquals("[[commit1,[[bucket-1],[path/to/file1.json,100]],100], [commit1,[[bucket-1],[path/to/file2.json,150]],250]," + " [commit1,[[bucket-1],[path/to/file3.json,200]],450], [commit2,[[bucket-1],[path/to/file4.json,50]],500]]", rows.toString()); assertEquals(500L, row.get(0)); }
public String getShardIterator( final String streamName, final String shardId, final ShardIteratorType shardIteratorType, final String startingSequenceNumber, final Instant timestamp) throws TransientKinesisException { final Date date = timestamp != null ? timestamp.toDate() : null; return wrapExceptions( () -> kinesis .getShardIterator( new GetShardIteratorRequest() .withStreamName(streamName) .withShardId(shardId) .withShardIteratorType(shardIteratorType) .withStartingSequenceNumber(startingSequenceNumber) .withTimestamp(date)) .getShardIterator()); }
@Test public void shouldReturnIteratorStartingWithTimestamp() throws Exception { Instant timestamp = Instant.now(); when(kinesis.getShardIterator( new GetShardIteratorRequest() .withStreamName(STREAM) .withShardId(SHARD_1) .withShardIteratorType(ShardIteratorType.AT_SEQUENCE_NUMBER) .withTimestamp(timestamp.toDate()))) .thenReturn(new GetShardIteratorResult().withShardIterator(SHARD_ITERATOR)); String stream = underTest.getShardIterator( STREAM, SHARD_1, ShardIteratorType.AT_SEQUENCE_NUMBER, null, timestamp); assertThat(stream).isEqualTo(SHARD_ITERATOR); }
@Override public void process() { try { if (containers.length > 1) { throw new RuntimeException("This processor can only handle single containers"); } ContainerUnloader container = containers[0]; // Get config Configuration config = createConfiguration(container); //Workspace ProjectController pc = Lookup.getDefault().lookup(ProjectController.class); if (workspace == null) { workspace = pc.openNewWorkspace(config); } else if(!configurationMatchesExisting(config, workspace)) { // The configuration check failed, stop processing return; } processMeta(container, workspace); if (container.getSource() != null && !container.getSource().isEmpty()) { pc.setSource(workspace, container.getSource()); // Remove extensions pc.renameWorkspace(workspace, container.getSource().replaceAll("(?<!^)[.].*", "")); } Progress.start(progressTicket, calculateWorkUnits()); process(container, workspace); Progress.finish(progressTicket); } finally { clean(); } }
@Test public void testProcess() { ImportContainerImpl importContainer = new ImportContainerImpl(); NodeDraft nodeDraft = new NodeDraftImpl(importContainer, "1", 1); importContainer.addNode(nodeDraft); Workspace workspace = new WorkspaceImpl(null, 1); DefaultProcessor defaultProcessor = new DefaultProcessor(); defaultProcessor.setContainers(new ImportContainerImpl[] {importContainer}); defaultProcessor.setWorkspace(workspace); defaultProcessor.process(); GraphModel graphModel = workspace.getLookup().lookup(GraphModel.class); Node node = graphModel.getGraph().getNode("1"); Assert.assertNotNull(node); }
@Override public Map<String, String> getMetadata(final Path file) throws BackgroundException { try { if(containerService.isContainer(file)) { final ContainerMetadata meta = session.getClient().getContainerMetaData(regionService.lookup(file), containerService.getContainer(file).getName()); return meta.getMetaData(); } else { return new SwiftAttributesFinderFeature(session).find(file).getMetadata(); } } catch(GenericException e) { throw new SwiftExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Failure to read attributes of {0}", e, file); } }
@Test public void testGetContainerMetadata() throws Exception { final Path container = new Path("test.cyberduck.ch", EnumSet.of(Path.Type.directory, Path.Type.volume)); container.attributes().setRegion("IAD"); final Map<String, String> metadata = new SwiftMetadataFeature(session).getMetadata(container); assertFalse(metadata.isEmpty()); }
public static void onSuccess(final ServerMemberManager manager, final Member member) { final NodeState old = member.getState(); manager.getMemberAddressInfos().add(member.getAddress()); member.setState(NodeState.UP); member.setFailAccessCnt(0); if (!Objects.equals(old, member.getState())) { manager.notifyMemberChange(member); } }
@Test void testMemberOnSuccessWhenMemberUpdatedExtendInfo() { final Member remote = buildMember(); final Member reportResult = buildMember(); reportResult.setExtendVal(MemberMetaDataConstants.VERSION, "test"); MemberUtil.onSuccess(memberManager, remote, reportResult); assertEquals("test", remote.getExtendVal(MemberMetaDataConstants.VERSION)); assertTrue(mockMemberAddressInfos.contains(remote.getAddress())); verify(memberManager).notifyMemberChange(remote); }
@Override public void exportData(JsonWriter writer) throws IOException { throw new UnsupportedOperationException("Can not export 1.1 format from this version."); }
@Test(expected = UnsupportedOperationException.class) public void testExportDisabled() throws IOException { JsonWriter writer = new JsonWriter(new StringWriter()); dataService.exportData(writer); }
@Override public ExchangeHandler getExchangeHandler() { return (ExchangeHandler) channel.getChannelHandler(); }
@Test void getExchangeHandlerTest() { Assertions.assertNull(header.getExchangeHandler()); }
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final BinaryColumnType binaryColumnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(binaryColumnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", binaryColumnType); return BINARY_PROTOCOL_VALUES.get(binaryColumnType); }
@Test void assertGetBinaryProtocolValueWithMySQLTypeTimestamp() { assertThat(MySQLBinaryProtocolValueFactory.getBinaryProtocolValue(MySQLBinaryColumnType.TIMESTAMP), instanceOf(MySQLDateBinaryProtocolValue.class)); }
public synchronized Map<String, Object> getSubtaskProgress(String taskName, @Nullable String subtaskNames, Executor executor, HttpClientConnectionManager connMgr, Map<String, String> workerEndpoints, Map<String, String> requestHeaders, int timeoutMs) throws Exception { return getSubtaskProgress(taskName, subtaskNames, new CompletionServiceHelper(executor, connMgr, HashBiMap.create(0)), workerEndpoints, requestHeaders, timeoutMs); }
@Test public void testGetSubtaskProgressNoWorker() throws Exception { TaskDriver taskDriver = mock(TaskDriver.class); JobConfig jobConfig = mock(JobConfig.class); when(taskDriver.getJobConfig(anyString())).thenReturn(jobConfig); CompletionServiceHelper httpHelper = mock(CompletionServiceHelper.class); CompletionServiceHelper.CompletionServiceResponse httpResp = new CompletionServiceHelper.CompletionServiceResponse(); when(httpHelper.doMultiGetRequest(any(), any(), anyBoolean(), any(), anyInt())).thenReturn(httpResp); PinotHelixTaskResourceManager mgr = new PinotHelixTaskResourceManager(mock(PinotHelixResourceManager.class), taskDriver); // No worker to run subtasks. Map<String, String> workerEndpoints = new HashMap<>(); String taskName = "Task_SegmentGenerationAndPushTask_someone"; String[] subtaskNames = new String[3]; for (int i = 0; i < 3; i++) { subtaskNames[i] = taskName + "_" + i; } Map<String, TaskConfig> taskConfigMap = new HashMap<>(); for (String subtaskName : subtaskNames) { taskConfigMap.put(subtaskName, mock(TaskConfig.class)); } when(jobConfig.getTaskConfigMap()).thenReturn(taskConfigMap); Map<String, Object> progress = mgr.getSubtaskProgress(taskName, StringUtils.join(subtaskNames, ','), httpHelper, workerEndpoints, Collections.emptyMap(), 1000); for (String subtaskName : subtaskNames) { assertEquals(progress.get(subtaskName), "No worker has run this subtask"); } }
public FEELFnResult<BigDecimal> invoke(@ParameterName("from") String from, @ParameterName("grouping separator") String group, @ParameterName("decimal separator") String decimal) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( group != null && !group.equals( " " ) && !group.equals( "." ) && !group.equals( "," ) ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "group", "not a valid one, can only be one of: dot ('.'), comma (','), space (' ') ")); } if ( decimal != null ) { if (!decimal.equals( "." ) && !decimal.equals( "," )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "not a valid one, can only be one of: dot ('.'), comma (',') ")); } else if (group != null && decimal.equals( group )) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "decimal", "cannot be the same as parameter 'group' ")); } } if ( group != null ) { from = from.replaceAll( "\\" + group, "" ); } if ( decimal != null ) { from = from.replaceAll( "\\" + decimal, "." ); } BigDecimal result = NumberEvalHelper.getBigDecimalOrNull(from ); if( from != null && result == null ) { // conversion failed return FEELFnResult.ofError( new InvalidParametersEvent(Severity.ERROR, "unable to calculate final number result" ) ); } else { return FEELFnResult.ofResult( result ); } }
@Test void invokeNumberWithDecimalCharComma() { FunctionTestUtil.assertResult(numberFunction.invoke("9,876", null, ","), BigDecimal.valueOf(9.876)); }
@Override public boolean contains(String clientId) { return connectionBasedClientManager.contains(clientId) || ephemeralIpPortClientManager.contains(clientId) || persistentIpPortClientManager.contains(clientId); }
@Test void testContainsConnectionIdFailed() { assertFalse(delegate.contains(connectionIdForV6)); }
public InternalComponentPropertyDto setKey(String key) { checkArgument(key != null && !key.isEmpty(), "key can't be null nor empty"); checkArgument(key.length() <= MAX_KEY_LENGTH, "key length (%s) is longer than the maximum authorized (%s). '%s' was provided", key.length(), MAX_KEY_LENGTH, key); this.key = key; return this; }
@Test void setKey_throws_IAE_if_key_is_too_long() { String veryLongKey = StringUtils.repeat("a", 513); assertThatThrownBy(() -> new InternalComponentPropertyDto().setKey(veryLongKey)) .isInstanceOf(IllegalArgumentException.class) .hasMessage(String.format("key length (513) is longer than the maximum authorized (512). '%s' was provided", veryLongKey)); }
@Override public void complement() { throw new UnsupportedOperationException(); }
@Test(expected = UnsupportedOperationException.class) public void test_complement() { set.complement(); }
static String getPassword(Configuration conf, String alias) { String password = null; try { char[] passchars = conf.getPassword(alias); if (passchars != null) { password = new String(passchars); } } catch (IOException ioe) { password = null; } return password; }
@Test void testGetPassword() throws Exception { Configuration conf = provisionCredentialsForSSL(); // use WebAppUtils as would be used by loadSslConfiguration assertEquals("keypass", WebAppUtils.getPassword(conf, WebAppUtils.WEB_APP_KEY_PASSWORD_KEY)); assertEquals("storepass", WebAppUtils.getPassword(conf, WebAppUtils.WEB_APP_KEYSTORE_PASSWORD_KEY)); assertEquals("trustpass", WebAppUtils.getPassword(conf, WebAppUtils.WEB_APP_TRUSTSTORE_PASSWORD_KEY)); // let's make sure that a password that doesn't exist returns null assertNull(WebAppUtils.getPassword(conf, "invalid-alias")); }
@Override public Executor getExecutor(URL url) { String name = url.getParameter(THREAD_NAME_KEY, (String) url.getAttribute(THREAD_NAME_KEY, DEFAULT_THREAD_NAME)); int cores = url.getParameter(CORE_THREADS_KEY, DEFAULT_CORE_THREADS); int threads = url.getParameter(THREADS_KEY, DEFAULT_THREADS); int queues = url.getParameter(QUEUES_KEY, DEFAULT_QUEUES); BlockingQueue<Runnable> blockingQueue; if (queues == 0) { blockingQueue = new SynchronousQueue<>(); } else if (queues < 0) { blockingQueue = new MemorySafeLinkedBlockingQueue<>(); } else { blockingQueue = new LinkedBlockingQueue<>(queues); } return new ThreadPoolExecutor( cores, threads, Long.MAX_VALUE, TimeUnit.MILLISECONDS, blockingQueue, new NamedInternalThreadFactory(name, true), new AbortPolicyWithReport(name, url)); }
@Test void getExecutor1() throws Exception { URL url = URL.valueOf("dubbo://10.20.130.230:20880/context/path?" + THREAD_NAME_KEY + "=demo&" + CORE_THREADS_KEY + "=1&" + THREADS_KEY + "=2&" + QUEUES_KEY + "=0"); ThreadPool threadPool = new LimitedThreadPool(); ThreadPoolExecutor executor = (ThreadPoolExecutor) threadPool.getExecutor(url); assertThat(executor.getCorePoolSize(), is(1)); assertThat(executor.getMaximumPoolSize(), is(2)); assertThat(executor.getQueue(), Matchers.<BlockingQueue<Runnable>>instanceOf(SynchronousQueue.class)); assertThat( executor.getRejectedExecutionHandler(), Matchers.<RejectedExecutionHandler>instanceOf(AbortPolicyWithReport.class)); final CountDownLatch latch = new CountDownLatch(1); executor.execute(new Runnable() { @Override public void run() { Thread thread = Thread.currentThread(); assertThat(thread, instanceOf(InternalThread.class)); assertThat(thread.getName(), startsWith("demo")); latch.countDown(); } }); latch.await(); assertThat(latch.getCount(), is(0L)); }
@BuildStep AdditionalBeanBuildItem produce(Capabilities capabilities, JobRunrBuildTimeConfiguration jobRunrBuildTimeConfiguration) { Set<Class<?>> additionalBeans = new HashSet<>(); additionalBeans.add(JobRunrProducer.class); additionalBeans.add(JobRunrStarter.class); additionalBeans.add(jsonMapper(capabilities)); additionalBeans.addAll(storageProvider(capabilities, jobRunrBuildTimeConfiguration)); return AdditionalBeanBuildItem.builder() .setUnremovable() .addBeanClasses(additionalBeans.toArray(new Class[0])) .build(); }
@Test void jobRunrProducerUsesDocumentDBStorageProviderIfMongoDBClientCapabilityIsPresent() { lenient().when(capabilities.isPresent(Capability.MONGODB_CLIENT)).thenReturn(true); when(databaseConfiguration.type()).thenReturn(Optional.of("documentdb")); final AdditionalBeanBuildItem additionalBeanBuildItem = jobRunrExtensionProcessor.produce(capabilities, jobRunrBuildTimeConfiguration); assertThat(additionalBeanBuildItem.getBeanClasses()) .contains(JobRunrDocumentDBStorageProviderProducer.class.getName()) .doesNotContain(JobRunrMongoDBStorageProviderProducer.class.getName()); }
public <ConfigType extends ConfigInstance> ConfigType toInstance(Class<ConfigType> clazz, String configId) { return ConfigInstanceUtil.getNewInstance(clazz, configId, this); }
@Test public void test_simple_struct() { Slime slime = new Slime(); addStructFields(slime.setObject().setObject("simple"), "foobar", "MALE", new String[] { "foo@bar", "bar@foo" }); StructtypesConfig config = new ConfigPayload(slime).toInstance(StructtypesConfig.class, ""); assertThat(config.simple().name(), is("foobar")); assertThat(config.simple().gender(), is(StructtypesConfig.Simple.Gender.Enum.MALE)); assertThat(config.simple().emails(0), is("foo@bar")); assertThat(config.simple().emails(1), is("bar@foo")); }
@Override public void process() { JMeterContext context = getThreadContext(); Sampler sam = context.getCurrentSampler(); SampleResult res = context.getPreviousResult(); HTTPSamplerBase sampler; HTTPSampleResult result; if (!(sam instanceof HTTPSamplerBase) || !(res instanceof HTTPSampleResult)) { log.info("Can't apply HTML Link Parser when the previous" + " sampler run is not an HTTP Request."); return; } else { sampler = (HTTPSamplerBase) sam; result = (HTTPSampleResult) res; } List<HTTPSamplerBase> potentialLinks = new ArrayList<>(); String responseText = result.getResponseDataAsString(); int index = responseText.indexOf('<'); // $NON-NLS-1$ if (index == -1) { index = 0; } if (log.isDebugEnabled()) { log.debug("Check for matches against: "+sampler.toString()); } Document html = (Document) HtmlParsingUtils.getDOM(responseText.substring(index)); addAnchorUrls(html, result, sampler, potentialLinks); addFormUrls(html, result, sampler, potentialLinks); addFramesetUrls(html, result, sampler, potentialLinks); if (!potentialLinks.isEmpty()) { HTTPSamplerBase url = potentialLinks.get(ThreadLocalRandom.current().nextInt(potentialLinks.size())); if (log.isDebugEnabled()) { log.debug("Selected: "+url.toString()); } sampler.setDomain(url.getDomain()); sampler.setPath(url.getPath()); if (url.getMethod().equals(HTTPConstants.POST)) { for (JMeterProperty jMeterProperty : sampler.getArguments()) { Argument arg = (Argument) jMeterProperty.getObjectValue(); modifyArgument(arg, url.getArguments()); } } else { sampler.setArguments(url.getArguments()); } sampler.setProtocol(url.getProtocol()); } else { log.debug("No matches found"); } }
@Test public void testFailSimpleParse2() throws Exception { HTTPSamplerBase config = makeUrlConfig(".*login\\.html"); HTTPSamplerBase context = makeContext("http://www.apache.org/subdir/previous.html"); String responseText = "<html><head><title>Test page</title></head><body>" + "<a href=\"/home/index.html?param1=value1\">" + "Goto index page</a></body></html>"; HTTPSampleResult result = new HTTPSampleResult(); result.setResponseData(responseText, null); result.setSampleLabel(context.toString()); result.setURL(context.getUrl()); jmctx.setCurrentSampler(context); jmctx.setPreviousResult(result); parser.process(); String newUrl = config.getUrl().toString(); Assertions.assertNotEquals("http://www.apache.org/home/index.html?param1=value1", newUrl); assertEquals(config.getUrl().toString(), newUrl); }
Provider resolveProviderAndEnforceNonnullJdbcUrl(Props props) { String url = props.value(JDBC_URL.getKey()); Integer embeddedDatabasePort = props.valueAsInt(JDBC_EMBEDDED_PORT.getKey()); if (embeddedDatabasePort != null) { String correctUrl = buildH2JdbcUrl(embeddedDatabasePort); warnIfUrlIsSet(embeddedDatabasePort, url, correctUrl); props.set(JDBC_URL.getKey(), correctUrl); return Provider.H2; } if (isEmpty(url)) { props.set(JDBC_URL.getKey(), buildH2JdbcUrl(JDBC_EMBEDDED_PORT_DEFAULT_VALUE)); props.set(JDBC_EMBEDDED_PORT.getKey(), String.valueOf(JDBC_EMBEDDED_PORT_DEFAULT_VALUE)); return Provider.H2; } Pattern pattern = Pattern.compile("jdbc:(\\w+):.+"); Matcher matcher = pattern.matcher(url); if (!matcher.find()) { throw new MessageException(format("Bad format of JDBC URL: %s", url)); } String key = matcher.group(1); try { return Provider.valueOf(StringUtils.upperCase(key)); } catch (IllegalArgumentException e) { throw new MessageException(format("Unsupported JDBC driver provider: %s", key)); } }
@Test public void fail_with_MessageException_when_url_does_not_have_jdbc_prefix() { Props props = newProps(JDBC_URL.getKey(), "oracle:thin:@localhost/XE"); assertThatThrownBy(() -> underTest.resolveProviderAndEnforceNonnullJdbcUrl(props)) .isInstanceOf(MessageException.class) .hasMessage("Bad format of JDBC URL: oracle:thin:@localhost/XE"); }
public int getNumListstatusThreads() { return numListstatusThreads; }
@Test public void testSetNumListtatusThreads() { final DistCpOptions.Builder builder = new DistCpOptions.Builder( new Path("hdfs://localhost:8020/source/first"), new Path("hdfs://localhost:8020/target/")); // If command line argument isn't set, we expect .getNumListstatusThreads // option to be zero (so that we know when to override conf properties). Assert.assertEquals(0, builder.build().getNumListstatusThreads()); builder.withNumListstatusThreads(12); Assert.assertEquals(12, builder.build().getNumListstatusThreads()); builder.withNumListstatusThreads(0); Assert.assertEquals(0, builder.build().getNumListstatusThreads()); // Ignore large number of threads. builder.withNumListstatusThreads(MAX_NUM_LISTSTATUS_THREADS * 2); Assert.assertEquals(MAX_NUM_LISTSTATUS_THREADS, builder.build().getNumListstatusThreads()); }
public Map<Endpoint, CompletableFuture<Void>> futures() { return futures; }
@Test public void testAddMultipleReadinessFutures() { CompletableFuture<Void> foo = new CompletableFuture<>(); CompletableFuture<Void> bar = new CompletableFuture<>(); EndpointReadyFutures readyFutures = new EndpointReadyFutures.Builder(). addReadinessFuture("foo", foo). addReadinessFuture("bar", bar). build(Optional.empty(), INFO); assertEquals(new HashSet<>(Arrays.asList(EXTERNAL, INTERNAL)), readyFutures.futures().keySet()); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); foo.complete(null); assertIncomplete(readyFutures, EXTERNAL, INTERNAL); bar.complete(null); assertComplete(readyFutures, EXTERNAL, INTERNAL); }
public String getPluginStatusReportRequestBody(List<Map<String, String>> clusterProfiles) { JsonObject jsonObject = new JsonObject(); jsonObject.add("all_cluster_profiles_properties", mapToJsonArray(clusterProfiles)); return FORCED_EXPOSE_GSON.toJson(jsonObject); }
@Test public void shouldJSONizePluginStatusRequestBody() { Map<String, String> clusterProfile1 = new HashMap<>(); clusterProfile1.put("key1", "value1"); clusterProfile1.put("key2", "value2"); List<Map<String, String>> clusterProfileConfigurations = new ArrayList<>(); clusterProfileConfigurations.add(clusterProfile1); clusterProfileConfigurations.add(clusterProfile1); String json = new ElasticAgentExtensionConverterV5().getPluginStatusReportRequestBody(clusterProfileConfigurations); assertThatJson(json).isEqualTo("{" + " \"all_cluster_profiles_properties\":[" + " {" + " \"key1\":\"value1\"," + " \"key2\":\"value2\"" + " }," + " {" + " \"key1\":\"value1\"," + " \"key2\":\"value2\"" + " }" + " ]" + "}"); }
protected static HttpUrl buildUrl(@Nullable String serverUrl, String relativeUrl) { if (serverUrl == null || !(serverUrl.toLowerCase(ENGLISH).startsWith("http://") || serverUrl.toLowerCase(ENGLISH).startsWith("https://"))) { throw new IllegalArgumentException("url must start with http:// or https://"); } return HttpUrl.parse(removeEnd(serverUrl, "/") + relativeUrl); }
@Test public void invalid_url() { assertThatThrownBy(() -> BitbucketServerRestClient.buildUrl("file://wrong-url", "")) .isInstanceOf(IllegalArgumentException.class) .hasMessage("url must start with http:// or https://"); }
public void append(T t) { checkState( !isClosed, "Bag user state is no longer usable because it is closed for %s", request.getStateKey()); newValues.add(t); }
@Test public void testAppend() throws Exception { FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient(StringUtf8Coder.of(), ImmutableMap.of(key("A"), asList("A1"))); BagUserState<String> userState = new BagUserState<>( Caches.noop(), fakeClient, "instructionId", key("A"), StringUtf8Coder.of()); userState.append("A2"); Iterable<String> stateBeforeA3 = userState.get(); assertArrayEquals(new String[] {"A1", "A2"}, Iterables.toArray(stateBeforeA3, String.class)); userState.append("A3"); assertArrayEquals(new String[] {"A1", "A2"}, Iterables.toArray(stateBeforeA3, String.class)); assertArrayEquals( new String[] {"A1", "A2", "A3"}, Iterables.toArray(userState.get(), String.class)); userState.asyncClose(); assertEquals(encode("A1", "A2", "A3"), fakeClient.getData().get(key("A"))); assertThrows(IllegalStateException.class, () -> userState.append("A4")); }
public B warmup(Integer warmup) { this.warmup = warmup; return getThis(); }
@Test void warmup() { ServiceBuilder builder = new ServiceBuilder(); builder.warmup(100); Assertions.assertEquals(100, builder.build().getWarmup()); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testForwardedNestedTuples() { String[] forwardedFields = {"f0->f0.f0; f1->f0.f1; f2->f0.f2"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, threeIntTupleType, nestedTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(0); assertThat(sp.getForwardingTargetFields(0, 1)).contains(1); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); forwardedFields[0] = "f0.f0->f1.f0.f2; f0.f1->f2; f2->f1.f2; f1->f0"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, nestedTupleType, deepNestedTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(3); assertThat(sp.getForwardingTargetFields(0, 1)).contains(6); assertThat(sp.getForwardingTargetFields(0, 4)).contains(5); assertThat(sp.getForwardingTargetFields(0, 3)).contains(0); forwardedFields[0] = "0.0->1.0.2; 0.1->2; 2->1.2; 1->0"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, nestedTupleType, deepNestedTupleType); assertThat(sp.getForwardingTargetFields(0, 0)).contains(3); assertThat(sp.getForwardingTargetFields(0, 1)).contains(6); assertThat(sp.getForwardingTargetFields(0, 4)).contains(5); assertThat(sp.getForwardingTargetFields(0, 3)).contains(0); forwardedFields[0] = "f1.f0.*->f0.*; f0->f2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, nestedTupleType); assertThat(sp.getForwardingTargetFields(0, 1)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(1); assertThat(sp.getForwardingTargetFields(0, 3)).contains(2); assertThat(sp.getForwardingTargetFields(0, 0)).contains(4); forwardedFields[0] = "1.0.*->0.*; 0->2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, nestedTupleType); assertThat(sp.getForwardingTargetFields(0, 1)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(1); assertThat(sp.getForwardingTargetFields(0, 3)).contains(2); assertThat(sp.getForwardingTargetFields(0, 0)).contains(4); forwardedFields[0] = "f1.f0->f0; f0->f2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, nestedTupleType); assertThat(sp.getForwardingTargetFields(0, 1)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(1); assertThat(sp.getForwardingTargetFields(0, 3)).contains(2); assertThat(sp.getForwardingTargetFields(0, 0)).contains(4); forwardedFields[0] = "1.0->0; 0->2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, nestedTupleType); assertThat(sp.getForwardingTargetFields(0, 1)).contains(0); assertThat(sp.getForwardingTargetFields(0, 2)).contains(1); assertThat(sp.getForwardingTargetFields(0, 3)).contains(2); assertThat(sp.getForwardingTargetFields(0, 0)).contains(4); forwardedFields[0] = "f1.f0.f1; f1.f1; f2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, deepNestedTupleType); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 4)).contains(4); assertThat(sp.getForwardingTargetFields(0, 6)).contains(6); assertThat(sp.getForwardingTargetFields(0, 0)).isEmpty(); assertThat(sp.getForwardingTargetFields(0, 1)).isEmpty(); assertThat(sp.getForwardingTargetFields(0, 3)).isEmpty(); assertThat(sp.getForwardingTargetFields(0, 5)).isEmpty(); forwardedFields[0] = "f1.f0.*; f1.f2"; sp = new SingleInputSemanticProperties(); SemanticPropUtil.getSemanticPropsSingleFromString( sp, forwardedFields, null, null, deepNestedTupleType, deepNestedTupleType); assertThat(sp.getForwardingTargetFields(0, 1)).contains(1); assertThat(sp.getForwardingTargetFields(0, 2)).contains(2); assertThat(sp.getForwardingTargetFields(0, 3)).contains(3); assertThat(sp.getForwardingTargetFields(0, 5)).contains(5); assertThat(sp.getForwardingTargetFields(0, 0)).isEmpty(); assertThat(sp.getForwardingTargetFields(0, 4)).isEmpty(); assertThat(sp.getForwardingTargetFields(0, 6)).isEmpty(); }
@Override public List<ConfigInfoStateWrapper> findDeletedConfig(final Timestamp startTime, long startId, int pageSize) { try { HistoryConfigInfoMapper historyConfigInfoMapper = mapperManager.findMapper( dataSourceService.getDataSourceType(), TableConstant.HIS_CONFIG_INFO); MapperContext context = new MapperContext(); context.putWhereParameter(FieldConstant.START_TIME, startTime); context.putWhereParameter(FieldConstant.PAGE_SIZE, pageSize); context.putWhereParameter(FieldConstant.LAST_MAX_ID, startId); MapperResult mapperResult = historyConfigInfoMapper.findDeletedConfig(context); return jt.query(mapperResult.getSql(), mapperResult.getParamList().toArray(), CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER); } catch (DataAccessException e) { LogUtil.FATAL_LOG.error("[db-error] " + e, e); throw e; } }
@Test void testFindDeletedConfig() { //mock query list return ConfigInfoStateWrapper mockObj1 = new ConfigInfoStateWrapper(); mockObj1.setDataId("data_id1"); mockObj1.setGroup("group_id1"); mockObj1.setTenant("tenant_id1"); mockObj1.setMd5("md51"); mockObj1.setLastModified(System.currentTimeMillis()); List<ConfigInfoStateWrapper> list = new ArrayList<>(); list.add(mockObj1); ConfigInfoStateWrapper mockObj2 = new ConfigInfoStateWrapper(); mockObj2.setDataId("data_id2"); mockObj2.setGroup("group_id2"); mockObj2.setTenant("tenant_id2"); mockObj2.setMd5("md52"); list.add(mockObj2); int pageSize = 1233; long startId = 23456; Timestamp timestamp = new Timestamp(System.currentTimeMillis()); Mockito.when( jdbcTemplate.query(anyString(), eq(new Object[] {timestamp, startId, pageSize}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))) .thenReturn(list); //execute List<ConfigInfoStateWrapper> deletedConfig = externalHistoryConfigInfoPersistService.findDeletedConfig(timestamp, startId, pageSize); //expect verify assertEquals("data_id1", deletedConfig.get(0).getDataId()); assertEquals("group_id1", deletedConfig.get(0).getGroup()); assertEquals("tenant_id1", deletedConfig.get(0).getTenant()); assertEquals(mockObj1.getLastModified(), deletedConfig.get(0).getLastModified()); assertEquals("data_id2", deletedConfig.get(1).getDataId()); assertEquals("group_id2", deletedConfig.get(1).getGroup()); assertEquals("tenant_id2", deletedConfig.get(1).getTenant()); assertEquals(mockObj2.getLastModified(), deletedConfig.get(1).getLastModified()); //mock exception Mockito.when( jdbcTemplate.query(anyString(), eq(new Object[] {timestamp, startId, pageSize}), eq(CONFIG_INFO_STATE_WRAPPER_ROW_MAPPER))) .thenThrow(new CannotGetJdbcConnectionException("conn error")); try { externalHistoryConfigInfoPersistService.findDeletedConfig(timestamp, startId, pageSize); assertTrue(false); } catch (Exception e) { assertEquals("conn error", e.getMessage()); } }
@Override public double dot(SGDVector other) { if (other.size() != elements.length) { throw new IllegalArgumentException("Can't dot two vectors of different dimension, this = " + elements.length + ", other = " + other.size()); } double score = 0.0; if (other instanceof DenseVector) { for (int i = 0; i < elements.length; i++) { score += get(i) * other.get(i); } } else { // else must be sparse for (VectorTuple tuple : other) { score += get(tuple.index) * tuple.value; } } return score; }
@Test public void overlappingDot() { DenseVector a = generateVectorA(); DenseVector b = generateVectorB(); assertEquals(a.dot(b), b.dot(a), 1e-10); assertEquals(-15.0, a.dot(b), 1e-10); }
@Override public Num calculate(BarSeries series, Position position) { if (position.isClosed()) { Num loss = excludeCosts ? position.getGrossProfit() : position.getProfit(); return loss.isNegative() ? loss : series.zero(); } return series.zero(); }
@Test public void calculateProfitWithShortPositions() { MockBarSeries series = new MockBarSeries(numFunction, 95, 100, 70, 80, 85, 100); TradingRecord tradingRecord = new BaseTradingRecord(Trade.sellAt(0, series), Trade.buyAt(1, series), Trade.sellAt(2, series), Trade.buyAt(5, series)); AnalysisCriterion loss = getCriterion(true); assertNumEquals(-35, loss.calculate(series, tradingRecord)); }
@Override public boolean isLatest(Topology topology) { Topology currentTopology = currentTopology(); return defaultTopology(topology).getGraph() .equals(defaultTopology(currentTopology).getGraph()); }
@Test(expected = NullPointerException.class) public void testIsLatestByNullTopology() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); TopologyService topologyService = manager.get(virtualNetwork.id(), TopologyService.class); // test the isLatest() method with a null topology. topologyService.isLatest(null); }
@Override public BasicTypeDefine reconvert(Column column) { BasicTypeDefine.BasicTypeDefineBuilder builder = BasicTypeDefine.builder() .name(column.getName()) .precision(column.getColumnLength()) .length(column.getColumnLength()) .nullable(column.isNullable()) .comment(column.getComment()) .scale(column.getScale()) .defaultValue(column.getDefaultValue()); switch (column.getDataType().getSqlType()) { case NULL: builder.columnType(IRIS_NULL); builder.dataType(IRIS_NULL); break; case STRING: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(String.format("%s(%s)", IRIS_VARCHAR, MAX_VARCHAR_LENGTH)); builder.dataType(IRIS_VARCHAR); } else if (column.getColumnLength() < MAX_VARCHAR_LENGTH) { builder.columnType( String.format("%s(%s)", IRIS_VARCHAR, column.getColumnLength())); builder.dataType(IRIS_VARCHAR); } else { builder.columnType(IRIS_LONG_VARCHAR); builder.dataType(IRIS_LONG_VARCHAR); } break; case BOOLEAN: builder.columnType(IRIS_BIT); builder.dataType(IRIS_BIT); break; case TINYINT: builder.columnType(IRIS_TINYINT); builder.dataType(IRIS_TINYINT); break; case SMALLINT: builder.columnType(IRIS_SMALLINT); builder.dataType(IRIS_SMALLINT); break; case INT: builder.columnType(IRIS_INTEGER); builder.dataType(IRIS_INTEGER); break; case BIGINT: builder.columnType(IRIS_BIGINT); builder.dataType(IRIS_BIGINT); break; case FLOAT: builder.columnType(IRIS_FLOAT); builder.dataType(IRIS_FLOAT); break; case DOUBLE: builder.columnType(IRIS_DOUBLE); builder.dataType(IRIS_DOUBLE); break; case DECIMAL: DecimalType decimalType = (DecimalType) column.getDataType(); long precision = decimalType.getPrecision(); int scale = decimalType.getScale(); if (scale < 0) { scale = 0; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is scale less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (scale > MAX_SCALE) { scale = MAX_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_SCALE, precision, scale); } if (precision < scale) { precision = scale; } if (precision <= 0) { precision = DEFAULT_PRECISION; scale = DEFAULT_SCALE; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which is precision less than 0, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), precision, scale); } else if (precision > MAX_PRECISION) { scale = MAX_SCALE; precision = MAX_PRECISION; log.warn( "The decimal column {} type decimal({},{}) is out of range, " + "which exceeds the maximum precision of {}, " + "it will be converted to decimal({},{})", column.getName(), decimalType.getPrecision(), decimalType.getScale(), MAX_PRECISION, precision, scale); } builder.columnType(String.format("%s(%s,%s)", IRIS_DECIMAL, precision, scale)); builder.dataType(IRIS_DECIMAL); builder.precision(precision); builder.scale(scale); break; case BYTES: if (column.getColumnLength() == null || column.getColumnLength() <= 0) { builder.columnType(IRIS_LONG_BINARY); builder.dataType(IRIS_LONG_BINARY); } else if (column.getColumnLength() < MAX_BINARY_LENGTH) { builder.dataType(IRIS_BINARY); builder.columnType( String.format("%s(%s)", IRIS_BINARY, column.getColumnLength())); } else { builder.columnType(IRIS_LONG_BINARY); builder.dataType(IRIS_LONG_BINARY); } break; case DATE: builder.columnType(IRIS_DATE); builder.dataType(IRIS_DATE); break; case TIME: builder.dataType(IRIS_TIME); if (Objects.nonNull(column.getScale()) && column.getScale() > 0) { Integer timeScale = column.getScale(); if (timeScale > MAX_TIME_SCALE) { timeScale = MAX_TIME_SCALE; log.warn( "The time column {} type time({}) is out of range, " + "which exceeds the maximum scale of {}, " + "it will be converted to time({})", column.getName(), column.getScale(), MAX_TIME_SCALE, timeScale); } builder.columnType(String.format("%s(%s)", IRIS_TIME, timeScale)); builder.scale(timeScale); } else { builder.columnType(IRIS_TIME); } break; case TIMESTAMP: builder.columnType(IRIS_TIMESTAMP2); builder.dataType(IRIS_TIMESTAMP2); break; default: throw CommonError.convertToConnectorTypeError( DatabaseIdentifier.IRIS, column.getDataType().getSqlType().name(), column.getName()); } return builder.build(); }
@Test public void testReconvertShort() { Column column = PhysicalColumn.builder().name("test").dataType(BasicType.SHORT_TYPE).build(); BasicTypeDefine typeDefine = IrisTypeConverter.INSTANCE.reconvert(column); Assertions.assertEquals(column.getName(), typeDefine.getName()); Assertions.assertEquals(IrisTypeConverter.IRIS_SMALLINT, typeDefine.getColumnType()); Assertions.assertEquals(IrisTypeConverter.IRIS_SMALLINT, typeDefine.getDataType()); }
@Nonnull public static <T> AggregateOperation1<T, LongLongAccumulator, Double> averagingLong( @Nonnull ToLongFunctionEx<? super T> getLongValueFn ) { checkSerializable(getLongValueFn, "getLongValueFn"); // count == accumulator.value1 // sum == accumulator.value2 return AggregateOperation .withCreate(LongLongAccumulator::new) .andAccumulate((LongLongAccumulator a, T i) -> { // a bit faster check than in addExact, specialized for increment if (a.get1() == Long.MAX_VALUE) { throw new ArithmeticException("Counter overflow"); } a.set1(a.get1() + 1); a.set2(Math.addExact(a.get2(), getLongValueFn.applyAsLong(i))); }) .andCombine((a1, a2) -> { a1.set1(Math.addExact(a1.get1(), a2.get1())); a1.set2(Math.addExact(a1.get2(), a2.get2())); }) .andDeduct((a1, a2) -> { a1.set1(Math.subtractExact(a1.get1(), a2.get1())); a1.set2(Math.subtractExact(a1.get2(), a2.get2())); }) .andExportFinish(a -> (double) a.get2() / a.get1()); }
@Test public void when_averagingLong_noInput_then_NaN() { // Given AggregateOperation1<Long, LongLongAccumulator, Double> aggrOp = averagingLong(Long::longValue); LongLongAccumulator acc = aggrOp.createFn().get(); // When double result = aggrOp.finishFn().apply(acc); // Then assertEquals(Double.NaN, result, 0.0); }
@Override public int positionedRead(long pos, byte[] b, int off, int len) throws IOException { if (!CachePerThreadContext.get().getCacheEnabled()) { MetricsSystem.meter(MetricKey.CLIENT_CACHE_BYTES_REQUESTED_EXTERNAL.getName()) .mark(len); MetricsSystem.counter(MetricKey.CLIENT_CACHE_EXTERNAL_REQUESTS.getName()).inc(); len = getExternalFileInStream().positionedRead(pos, b, off, len); MultiDimensionalMetricsSystem.EXTERNAL_DATA_READ.inc(len); return len; } try { return readInternal(new ByteArrayTargetBuffer(b, off), off, len, ReadType.READ_INTO_BYTE_ARRAY, pos, true); } catch (IOException | RuntimeException e) { LOG.warn("Failed to read from Alluxio's page cache.", e); if (mFallbackEnabled) { MetricsSystem.counter(MetricKey.CLIENT_CACHE_POSITION_READ_FALLBACK.getName()).inc(); len = getExternalFileInStream().positionedRead(pos, b, off, len); MultiDimensionalMetricsSystem.EXTERNAL_DATA_READ.inc(len); return len; } throw e; } }
@Test public void testPositionReadFallBack() throws Exception { int pages = 10; int fileSize = mPageSize * pages; byte[] testData = BufferUtils.getIncreasingByteArray(fileSize); ByteArrayCacheManager manager = new ByteArrayCacheManager(); sConf.set(PropertyKey.USER_CLIENT_CACHE_FALLBACK_ENABLED, false); //by default local cache fallback is not enabled, the read should fail for any error LocalCacheFileInStream streamWithOutFallback = setupWithSingleFile(testData, manager); try { streamWithOutFallback.positionedRead(0, new byte[10], 100, 100); fail("Expect position read fail here."); } catch (ArrayIndexOutOfBoundsException e) { //expected exception } sConf.set(PropertyKey.USER_CLIENT_CACHE_FALLBACK_ENABLED, true); LocalCacheFileInStream streamWithFallback = setupWithSingleFile(testData, manager); Assert.assertEquals(100, streamWithFallback.positionedRead(0, new byte[10], 100, 100)); Assert.assertEquals(1, MetricsSystem.counter(MetricKey.CLIENT_CACHE_POSITION_READ_FALLBACK.getName()).getCount()); }
@VisibleForTesting public void validateSmsTemplateCodeDuplicate(Long id, String code) { SmsTemplateDO template = smsTemplateMapper.selectByCode(code); if (template == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的字典类型 if (id == null) { throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code); } if (!template.getId().equals(id)) { throw exception(SMS_TEMPLATE_CODE_DUPLICATE, code); } }
@Test public void testValidateSmsTemplateCodeDuplicate_valueDuplicateForCreate() { // 准备参数 String code = randomString(); // mock 数据 smsTemplateMapper.insert(randomSmsTemplateDO(o -> o.setCode(code))); // 调用,校验异常 assertServiceException(() -> smsTemplateService.validateSmsTemplateCodeDuplicate(null, code), SMS_TEMPLATE_CODE_DUPLICATE, code); }
public void destroy() { List<SerializedPageReference> removedPages; PendingRead pendingRead; synchronized (this) { removedPages = ImmutableList.copyOf(pages); pages.clear(); bufferedBytes.getAndSet(0); noMorePages = true; destroyed.set(true); pendingRead = this.pendingRead; this.pendingRead = null; } dereferencePages(removedPages, onPagesReleased); if (pendingRead != null) { pendingRead.completeResultFutureWithEmpty(); } }
@Test public void testDestroy() { ClientBuffer buffer = new ClientBuffer(TASK_INSTANCE_ID, BUFFER_ID, NOOP_RELEASE_LISTENER); // add 5 pages the buffer for (int i = 0; i < 5; i++) { addPage(buffer, createPage(i)); } buffer.setNoMorePages(); // read a page assertBufferResultEquals(TYPES, getBufferResult(buffer, 0, sizeOfPages(1), NO_WAIT), bufferResult(0, createPage(0))); // destroy without acknowledgement buffer.destroy(); assertBufferDestroyed(buffer, 0); // follow token from previous read, which should return a finished result assertBufferResultEquals(TYPES, getBufferResult(buffer, 1, sizeOfPages(1), NO_WAIT), emptyResults(TASK_INSTANCE_ID, 0, true)); }
static BytecodeExpression lessThanOrEqual(BytecodeExpression left, BytecodeExpression right) { checkArgumentTypes(left, right); OpCode comparisonInstruction; OpCode noMatchJumpInstruction; Class<?> type = left.getType().getPrimitiveType(); if (type == int.class) { comparisonInstruction = null; noMatchJumpInstruction = IF_ICMPGT; } else if (type == long.class) { comparisonInstruction = LCMP; noMatchJumpInstruction = IFGT; } else if (type == float.class) { comparisonInstruction = FCMPG; noMatchJumpInstruction = IFGT; } else if (type == double.class) { comparisonInstruction = DCMPG; noMatchJumpInstruction = IFGT; } else { throw new IllegalArgumentException("Less than or equal does not support " + type); } return new ComparisonBytecodeExpression("<=", comparisonInstruction, noMatchJumpInstruction, left, right); }
@Test public void testLessThanOrEqual() throws Exception { assertBytecodeExpression(lessThanOrEqual(constantInt(3), constantInt(7)), 3 <= 7, "(3 <= 7)"); assertBytecodeExpression(lessThanOrEqual(constantInt(7), constantInt(3)), 7 <= 3, "(7 <= 3)"); assertBytecodeExpression(lessThanOrEqual(constantInt(7), constantInt(7)), 7 <= 7, "(7 <= 7)"); assertBytecodeExpression(lessThanOrEqual(constantLong(3L), constantLong(7L)), 3L <= 7L, "(3L <= 7L)"); assertBytecodeExpression(lessThanOrEqual(constantLong(7L), constantLong(3L)), 7L <= 3L, "(7L <= 3L)"); assertBytecodeExpression(lessThanOrEqual(constantLong(7L), constantLong(7L)), 7L <= 7L, "(7L <= 7L)"); assertBytecodeExpression(lessThanOrEqual(constantFloat(3.3f), constantFloat(7.7f)), 3.3f <= 7.7f, "(3.3f <= 7.7f)"); assertBytecodeExpression(lessThanOrEqual(constantFloat(7.7f), constantFloat(3.3f)), 7.7f <= 3.3f, "(7.7f <= 3.3f)"); assertBytecodeExpression(lessThanOrEqual(constantFloat(7.7f), constantFloat(7.7f)), 7.7f <= 7.7f, "(7.7f <= 7.7f)"); assertBytecodeExpression(lessThanOrEqual(constantFloat(Float.NaN), constantFloat(7.7f)), Float.NaN <= 7.7f, "(NaNf <= 7.7f)"); assertBytecodeExpression(lessThanOrEqual(constantFloat(7.7f), constantFloat(Float.NaN)), 7.7f <= Float.NaN, "(7.7f <= NaNf)"); assertBytecodeExpression(lessThanOrEqual(constantDouble(3.3), constantDouble(7.7)), 3.3 <= 7.7, "(3.3 <= 7.7)"); assertBytecodeExpression(lessThanOrEqual(constantDouble(7.7), constantDouble(3.3)), 7.7 <= 3.3, "(7.7 <= 3.3)"); assertBytecodeExpression(lessThanOrEqual(constantDouble(7.7), constantDouble(7.7)), 7.7 <= 7.7, "(7.7 <= 7.7)"); assertBytecodeExpression(lessThanOrEqual(constantDouble(Double.NaN), constantDouble(7.7)), Double.NaN <= 7.7, "(NaN <= 7.7)"); assertBytecodeExpression(lessThanOrEqual(constantDouble(7.7), constantDouble(Double.NaN)), 7.7 <= Double.NaN, "(7.7 <= NaN)"); }
@Override public EncryptColumnRuleConfiguration swapToObject(final YamlEncryptColumnRuleConfiguration yamlConfig) { EncryptColumnRuleConfiguration result = new EncryptColumnRuleConfiguration(yamlConfig.getName(), encryptColumnItemSwapper.swapToObject(yamlConfig.getCipher())); if (null != yamlConfig.getAssistedQuery()) { result.setAssistedQuery(encryptColumnItemSwapper.swapToObject(yamlConfig.getAssistedQuery())); } if (null != yamlConfig.getLikeQuery()) { result.setLikeQuery(encryptColumnItemSwapper.swapToObject(yamlConfig.getLikeQuery())); } return result; }
@Test void assertSwapToObject() { YamlEncryptColumnRuleConfigurationSwapper swapper = new YamlEncryptColumnRuleConfigurationSwapper(); EncryptColumnRuleConfiguration actual = swapper.swapToObject(buildYamlEncryptColumnRuleConfiguration()); assertThat(actual.getName(), is("logicColumn")); assertThat(actual.getCipher().getName(), is("cipherColumn")); assertTrue(actual.getAssistedQuery().isPresent()); assertThat(actual.getAssistedQuery().get().getName(), is("assistedQueryColumn")); assertTrue(actual.getLikeQuery().isPresent()); assertThat(actual.getLikeQuery().get().getName(), is("likeQueryColumn")); assertThat(actual.getCipher().getEncryptorName(), is("encryptorName")); }
public static void getSemanticPropsSingleFromString( SingleInputSemanticProperties result, String[] forwarded, String[] nonForwarded, String[] readSet, TypeInformation<?> inType, TypeInformation<?> outType) { getSemanticPropsSingleFromString( result, forwarded, nonForwarded, readSet, inType, outType, false); }
@Test void testNonForwardedInvalidTypes3() { String[] nonForwardedFields = {"f1; f2"}; SingleInputSemanticProperties sp = new SingleInputSemanticProperties(); assertThatThrownBy( () -> SemanticPropUtil.getSemanticPropsSingleFromString( sp, null, nonForwardedFields, null, threeIntTupleType, fiveIntTupleType)) .isInstanceOf(InvalidSemanticAnnotationException.class); }
public static List<URL> parseConfigurators(String rawConfig) { // compatible url JsonArray, such as [ "override://xxx", "override://xxx" ] List<URL> compatibleUrls = parseJsonArray(rawConfig); if (CollectionUtils.isNotEmpty(compatibleUrls)) { return compatibleUrls; } List<URL> urls = new ArrayList<>(); ConfiguratorConfig configuratorConfig = parseObject(rawConfig); String scope = configuratorConfig.getScope(); List<ConfigItem> items = configuratorConfig.getConfigs(); if (ConfiguratorConfig.SCOPE_APPLICATION.equals(scope)) { items.forEach(item -> urls.addAll(appItemToUrls(item, configuratorConfig))); } else { // service scope by default. items.forEach(item -> urls.addAll(serviceItemToUrls(item, configuratorConfig))); } return urls; }
@Test void parseConsumerSpecificProvidersTest() throws IOException { try (InputStream yamlStream = this.getClass().getResourceAsStream("/ConsumerSpecificProviders.yml")) { List<URL> urls = ConfigParser.parseConfigurators(streamToString(yamlStream)); Assertions.assertNotNull(urls); Assertions.assertEquals(1, urls.size()); URL url = urls.get(0); Assertions.assertEquals("127.0.0.1", url.getAddress()); Assertions.assertEquals("*", url.getServiceInterface()); Assertions.assertEquals(6666, url.getParameter(TIMEOUT_KEY, 0)); Assertions.assertEquals("random", url.getParameter(LOADBALANCE_KEY)); Assertions.assertEquals("127.0.0.1:20880", url.getParameter(OVERRIDE_PROVIDERS_KEY)); Assertions.assertEquals("demo-consumer", url.getApplication()); } }
public Map<String, BitSet> findMatchingRecords(String fieldName, String fieldValue) { Map<String, BitSet> matches = new HashMap<String, BitSet>(); for(HollowTypeReadState typeState : readEngine.getTypeStates()) { augmentMatchingRecords(typeState, fieldName, fieldValue, matches); } return matches; }
@Test public void matchesOnlyRecordsOfSpecifiedType() { HollowFieldMatchQuery query = new HollowFieldMatchQuery(stateEngine); Map<String, BitSet> matches = query.findMatchingRecords("TypeA", "id", "2"); Assert.assertEquals(1, matches.size()); Assert.assertEquals(1, matches.get("TypeA").cardinality()); Assert.assertTrue(matches.get("TypeA").get(1)); }
public static void tar(@NotNull File source, @NotNull File dest) throws IOException { if (!source.exists()) { throw new IllegalArgumentException("No source file or folder exists: " + source.getAbsolutePath()); } if (dest.exists()) { throw new IllegalArgumentException("Destination refers to existing file or folder: " + dest.getAbsolutePath()); } try (TarArchiveOutputStream tarOut = new TarArchiveOutputStream(new GZIPOutputStream( new BufferedOutputStream(Files.newOutputStream(dest.toPath())), 0x1000))) { doTar("", source, tarOut); } catch (IOException e) { IOUtil.deleteFile(dest); // operation filed, let's remove the destination archive throw e; } }
@Test public void testDestExists() throws Exception { dest.createNewFile(); try { CompressBackupUtil.tar(new File(randName + ".txt"), dest); } catch (IllegalArgumentException e) { return; } Assert.fail("Destination file/folder already exists. Should have thrown an exception."); }
@Override public boolean canHandleReturnType(Class returnType) { return (Flux.class.isAssignableFrom(returnType)) || (Mono.class .isAssignableFrom(returnType)); }
@Test public void testCheckTypes() { assertThat(reactorRateLimiterAspectExt.canHandleReturnType(Mono.class)).isTrue(); assertThat(reactorRateLimiterAspectExt.canHandleReturnType(Flux.class)).isTrue(); }
@Override public FSDataOutputStream build() throws IOException { Path path = getPath(); final Configuration options = getOptions(); final Map<String, String> headers = new HashMap<>(); final Set<String> mandatoryKeys = getMandatoryKeys(); final Set<String> keysToValidate = new HashSet<>(); // pick up all headers from the mandatory list and strip them before // validating the keys String headerPrefix = FS_S3A_CREATE_HEADER + "."; final int prefixLen = headerPrefix.length(); mandatoryKeys.stream().forEach(key -> { if (key.startsWith(headerPrefix) && key.length() > prefixLen) { headers.put(key.substring(prefixLen), options.get(key)); } else { keysToValidate.add(key); } }); rejectUnknownMandatoryKeys(keysToValidate, CREATE_FILE_KEYS, "for " + path); // and add any optional headers getOptionalKeys().stream() .filter(key -> key.startsWith(headerPrefix) && key.length() > prefixLen) .forEach(key -> headers.put(key.substring(prefixLen), options.get(key))); EnumSet<CreateFlag> flags = getFlags(); if (flags.contains(CreateFlag.APPEND)) { throw new UnsupportedOperationException("Append is not supported"); } if (!flags.contains(CreateFlag.CREATE) && !flags.contains(CreateFlag.OVERWRITE)) { throw new PathIOException(path.toString(), "Must specify either create or overwrite"); } final boolean performance = options.getBoolean(Constants.FS_S3A_CREATE_PERFORMANCE, false); return callbacks.createFileFromBuilder( path, getProgress(), new CreateFileOptions(flags, isRecursive(), performance, headers)); }
@Test public void testSimpleBuild() throws Throwable { Assertions.assertThat(build(mkBuilder().create())) .matches(p -> !p.isOverwrite()) .matches(p -> !p.isPerformance()); }
static String toJavaName(String opensslName) { if (opensslName == null) { return null; } Matcher matcher = PATTERN.matcher(opensslName); if (matcher.matches()) { String group1 = matcher.group(1); if (group1 != null) { return group1.toUpperCase(Locale.ROOT) + "with" + matcher.group(2).toUpperCase(Locale.ROOT); } if (matcher.group(3) != null) { return matcher.group(4).toUpperCase(Locale.ROOT) + "with" + matcher.group(3).toUpperCase(Locale.ROOT); } if (matcher.group(5) != null) { return matcher.group(6).toUpperCase(Locale.ROOT) + "with" + matcher.group(5).toUpperCase(Locale.ROOT); } } return null; }
@Test public void testWithEncryption() { assertEquals("SHA512withRSA", SignatureAlgorithmConverter.toJavaName("sha512WithRSAEncryption")); }
@Override public ResultSet getClientInfoProperties() throws SQLException { return select(format("SELECT * FROM (VALUES%n" + " ('ApplicationName', %s, 'presto-jdbc', 'Sets the source of the session'),%n" + " ('ClientInfo', %s, NULL, 'Sets the client info of the session'), %n" + " ('ClientTags', %s, NULL, 'Comma-delimited string of tags for the session'), %n" + " ('TraceToken', %s, NULL, 'Sets the trace token of the session') %n" + ") AS t (NAME, MAX_LEN, DEFAULT_VALUE, DESCRIPTION)", MAX_LENGTH, MAX_LENGTH, MAX_LENGTH, MAX_LENGTH)); }
@Test public void testGetClientInfoProperties() throws Exception { try (ResultSet resultSet = connection.getMetaData().getClientInfoProperties()) { ResultSetMetaData metadata = resultSet.getMetaData(); assertEquals(metadata.getColumnCount(), 4); assertEquals(metadata.getColumnName(1), "NAME"); assertEquals(metadata.getColumnName(2), "MAX_LEN"); assertEquals(metadata.getColumnName(3), "DEFAULT_VALUE"); assertEquals(metadata.getColumnName(4), "DESCRIPTION"); assertTrue(resultSet.next()); assertEquals(resultSet.getString(1), "ApplicationName"); assertEquals(resultSet.getInt(2), MAX_LENGTH); assertEquals(resultSet.getString(3), "presto-jdbc"); assertEquals(resultSet.getString(4), "Sets the source of the session"); assertTrue(resultSet.next()); assertEquals(resultSet.getString(1), "ClientInfo"); assertEquals(resultSet.getInt(2), MAX_LENGTH); assertNull(resultSet.getString(3)); assertEquals(resultSet.getString(4), "Sets the client info of the session"); assertTrue(resultSet.next()); assertEquals(resultSet.getString(1), "ClientTags"); assertEquals(resultSet.getInt(2), MAX_LENGTH); assertNull(resultSet.getString(3)); assertEquals(resultSet.getString(4), "Comma-delimited string of tags for the session"); assertTrue(resultSet.next()); assertEquals(resultSet.getString(1), "TraceToken"); assertEquals(resultSet.getInt(2), MAX_LENGTH); assertNull(resultSet.getString(3)); assertEquals(resultSet.getString(4), "Sets the trace token of the session"); assertFalse(resultSet.next()); } }
static String toDotString(RunnerApi.Pipeline pipeline) { return new PortablePipelineDotRenderer().toDot(pipeline); }
@Test public void testEmptyPipeline() { assertEquals( "digraph {" + " rankdir=LR" + "}", PortablePipelineDotRenderer.toDotString(PipelineTranslation.toProto(p)) .replaceAll(System.lineSeparator(), "")); }
@Override public MaintenanceDomain decode(ObjectNode json, CodecContext context) { if (json == null || !json.isObject()) { return null; } JsonNode mdNode = json.get(MD); String mdName = nullIsIllegal(mdNode.get(MD_NAME), "mdName is required").asText(); String mdNameType = MdId.MdNameType.CHARACTERSTRING.name(); if (mdNode.get(MD_NAME_TYPE) != null) { mdNameType = mdNode.get(MD_NAME_TYPE).asText(); } try { MdId mdId = MdMaNameUtil.parseMdName(mdNameType, mdName); MaintenanceDomain.MdBuilder builder = DefaultMaintenanceDomain.builder(mdId); JsonNode mdLevelNode = mdNode.get(MD_LEVEL); if (mdLevelNode != null) { MdLevel mdLevel = MdLevel.valueOf(mdLevelNode.asText()); builder = builder.mdLevel(mdLevel); } JsonNode mdNumericIdNode = mdNode.get(MD_NUMERIC_ID); if (mdNumericIdNode != null) { short mdNumericId = (short) mdNumericIdNode.asInt(); builder = builder.mdNumericId(mdNumericId); } return builder.build(); } catch (CfmConfigException e) { throw new IllegalArgumentException(e); } }
@Test public void testDecodeMd3() throws IOException { String mdString = "{\"md\": { \"mdName\": \"aa:bb:cc:dd:ee:ff:181\"," + "\"mdNameType\": \"MACANDUINT\"}}"; InputStream input = new ByteArrayInputStream( mdString.getBytes(StandardCharsets.UTF_8)); JsonNode cfg = mapper.readTree(input); MaintenanceDomain mdDecode1 = context .codec(MaintenanceDomain.class).decode((ObjectNode) cfg, context); assertEquals(MDID3_MACUINT, mdDecode1.mdId()); }
public SerializableFunction<Row, T> getFromRowFunction() { return fromRowFunction; }
@Test public void testNullRepeatedRowToProto() { ProtoDynamicMessageSchema schemaProvider = schemaFromDescriptor(RepeatPrimitive.getDescriptor()); SerializableFunction<Row, DynamicMessage> fromRow = schemaProvider.getFromRowFunction(); assertEquals(NULL_REPEATED_PROTO.toString(), fromRow.apply(NULL_REPEATED_ROW).toString()); }
@Override public CompletableFuture<ConsumerGroupHeartbeatResponseData> consumerGroupHeartbeat( RequestContext context, ConsumerGroupHeartbeatRequestData request ) { if (!isActive.get()) { return CompletableFuture.completedFuture(new ConsumerGroupHeartbeatResponseData() .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) ); } return runtime.scheduleWriteOperation( "consumer-group-heartbeat", topicPartitionFor(request.groupId()), Duration.ofMillis(config.offsetCommitTimeoutMs()), coordinator -> coordinator.consumerGroupHeartbeat(context, request) ).exceptionally(exception -> handleOperationException( "consumer-group-heartbeat", request, exception, (error, message) -> new ConsumerGroupHeartbeatResponseData() .setErrorCode(error.code()) .setErrorMessage(message) )); }
@Test public void testConsumerGroupHeartbeatWhenNotStarted() throws ExecutionException, InterruptedException { CoordinatorRuntime<GroupCoordinatorShard, CoordinatorRecord> runtime = mockRuntime(); GroupCoordinatorService service = new GroupCoordinatorService( new LogContext(), createConfig(), runtime, new GroupCoordinatorMetrics(), createConfigManager() ); ConsumerGroupHeartbeatRequestData request = new ConsumerGroupHeartbeatRequestData() .setGroupId("foo"); CompletableFuture<ConsumerGroupHeartbeatResponseData> future = service.consumerGroupHeartbeat( requestContext(ApiKeys.CONSUMER_GROUP_HEARTBEAT), request ); assertEquals( new ConsumerGroupHeartbeatResponseData() .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()), future.get() ); }
@VisibleForTesting static String extractTableName(MultivaluedMap<String, String> pathParameters, MultivaluedMap<String, String> queryParameters) { String tableName = extractTableName(pathParameters); if (tableName != null) { return tableName; } return extractTableName(queryParameters); }
@Test public void testExtractTableNameWithEmptyParams() { MultivaluedMap<String, String> pathParams = new MultivaluedHashMap<>(); MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<>(); assertNull(AuthenticationFilter.extractTableName(pathParams, queryParams)); }
@Override public Iterator<ConfigDataEnvironmentContributor> iterator() { return this.root.iterator(); }
@Test void withReplacementReplacesChild() { ConfigDataEnvironmentContributor root = createBoundContributor("root"); ConfigDataEnvironmentContributor child = createBoundContributor("child"); ConfigDataEnvironmentContributor grandchild = createBoundContributor("grandchild"); child = child.withChildren(ImportPhase.BEFORE_PROFILE_ACTIVATION, Collections.singletonList(grandchild)); root = root.withChildren(ImportPhase.BEFORE_PROFILE_ACTIVATION, Collections.singletonList(child)); ConfigDataEnvironmentContributor updated = createBoundContributor("updated"); ConfigDataEnvironmentContributor withReplacement = root.withReplacement(grandchild, updated); assertThat(asLocationsList(root.iterator())).containsExactly("grandchild", "child", "root"); assertThat(asLocationsList(withReplacement.iterator())).containsExactly("updated", "child", "root"); }
public static boolean isJobOrTransformation( EngineMetaInterface engineMetaInterface ) { if ( engineMetaInterface == null || engineMetaInterface.getRepositoryElementType() == null ) { return false; } RepositoryObjectType objectType = engineMetaInterface.getRepositoryElementType(); return RepositoryObjectType.TRANSFORMATION.equals( objectType ) || RepositoryObjectType.JOB.equals( objectType ); }
@Test public void isJobOrTransformation_withJob() { JobMeta jobInstance = new JobMeta(); assertTrue( EngineMetaUtils.isJobOrTransformation( jobInstance ) ); }
public static Set<String> computeStepIdsInRuntimeDag( @NotNull WorkflowInstance instance, @NotNull Set<String> knownStepIds) { if (instance.isFreshRun() || instance.getRunConfig() == null || instance.getRunConfig().getPolicy() != RunPolicy.RESTART_FROM_SPECIFIC) { return instance.getRuntimeDag().keySet(); } Map<String, Set<String>> childMap = new HashMap<>(); prepareDagForTraversal( instance.getRuntimeDag(), knownStepIds, instance.getRunConfig().getRestartConfig(), new HashMap<>(), childMap); return childMap.keySet(); }
@Test public void testComputeStepIdsInRuntimeDagForInstanceRestart() { WorkflowInstance instance = new WorkflowInstance(); instance.setRunConfig(new RunConfig()); instance.getRunConfig().setPolicy(RunPolicy.RESTART_FROM_BEGINNING); instance.setRuntimeDag(runtimeDag2); Set<String> actual = DagHelper.computeStepIdsInRuntimeDag(instance, Collections.singleton("job_2")); Assert.assertEquals("[job_1, job_3, job_2, job_5, job_4]", actual.toString()); }
public static String checkNotNullEmpty(String value, String name) throws IllegalArgumentException { if (isBlank(value)) { throw new IllegalArgumentException(name + " is null or empty"); } return value; }
@Test public void testCheckNotNullEmptyInputNullThrowsException() { thrown.expect(IllegalArgumentException.class); EagleEyeCoreUtils.checkNotNullEmpty(null, "bar"); // Method is not expected to return due to exception thrown }
public TermsAggregationBuilder buildTermsAggregation(String name, TopAggregationDefinition<?> topAggregation, @Nullable Integer numberOfTerms) { TermsAggregationBuilder termsAggregation = AggregationBuilders.terms(name) .field(topAggregation.getFilterScope().getFieldName()) .order(order) .minDocCount(TERM_AGGREGATION_MIN_DOC_COUNT); if (numberOfTerms != null) { termsAggregation.size(numberOfTerms); } if (subAggregation != null) { termsAggregation = termsAggregation.subAggregation(subAggregation); } return termsAggregation; }
@Test public void buildTermsAggregation_adds_custom_order_from_constructor() { String aggName = randomAlphabetic(10); SimpleFieldTopAggregationDefinition topAggregation = new SimpleFieldTopAggregationDefinition("bar", false); TermsAggregationBuilder agg = underTestWithCustomsSubAggAndOrder.buildTermsAggregation(aggName, topAggregation, null); assertThat(agg.getName()).isEqualTo(aggName); assertThat(agg.field()).isEqualTo(topAggregation.getFilterScope().getFieldName()); assertThat(agg.order()).isEqualTo(BucketOrder.compound(customOrder, ES_BUILTIN_TIE_BREAKER)); }