focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public synchronized String encrypt(String keyRingId, String keyId, String message) { CryptoKeyName keyName = CryptoKeyName.of(projectId, region, keyRingId, keyId); LOG.info("Encrypting given message using key {}.", keyName.toString()); try (KeyManagementServiceClient client = clientFactory.getKMSClient()) { EncryptResponse response = client.encrypt(keyName, ByteString.copyFromUtf8(message)); LOG.info("Successfully encrypted message."); return new String( Base64.getEncoder().encode(response.getCiphertext().toByteArray()), StandardCharsets.UTF_8); } }
@Test public void testEncryptShouldThrowErrorWhenClientFailsToConnect() { when(kmsClientFactory.getKMSClient()).thenThrow(KMSResourceManagerException.class); assertThrows( KMSResourceManagerException.class, () -> testManager.encrypt(KEYRING_ID, KEY_ID, "test message")); }
public Host deserialize(final T serialized) { final Deserializer<T> dict = factory.create(serialized); Object protocolObj = dict.stringForKey("Protocol"); if(protocolObj == null) { log.warn(String.format("Missing protocol key in %s", dict)); return null; } final Protocol protocol; final String identifier = protocolObj.toString(); final Object providerObj = dict.stringForKey("Provider"); if(null == providerObj) { protocol = protocols.forName(identifier); } else { protocol = protocols.forName(identifier, providerObj.toString()); } if(null != protocol) { final Host bookmark = new Host(protocol); final Object hostnameObj = dict.stringForKey("Hostname"); if(hostnameObj != null) { bookmark.setHostname(hostnameObj.toString()); } final Object uuidObj = dict.stringForKey("UUID"); if(uuidObj != null) { bookmark.setUuid(uuidObj.toString()); } final Object usernameObj = dict.stringForKey("Username"); if(usernameObj != null) { bookmark.getCredentials().setUsername(usernameObj.toString()); } final Object cdnCredentialsObj = dict.stringForKey("CDN Credentials"); if(cdnCredentialsObj != null) { bookmark.getCdnCredentials().setUsername(cdnCredentialsObj.toString()); } // Legacy final String keyObjDeprecated = dict.stringForKey("Private Key File"); if(keyObjDeprecated != null) { bookmark.getCredentials().setIdentity(LocalFactory.get(keyObjDeprecated)); } final T keyObj = dict.objectForKey("Private Key File Dictionary"); if(keyObj != null) { bookmark.getCredentials().setIdentity(new LocalDictionary<>(factory).deserialize(keyObj)); } final Object certObj = dict.stringForKey("Client Certificate"); if(certObj != null) { bookmark.getCredentials().setCertificate(certObj.toString()); } final Object portObj = dict.stringForKey("Port"); if(portObj != null) { bookmark.setPort(Integer.parseInt(portObj.toString())); } final Object pathObj = dict.stringForKey("Path"); if(pathObj != null) { bookmark.setDefaultPath(pathObj.toString()); } // Legacy final Object workdirObjDeprecated = dict.stringForKey("Workdir"); if(workdirObjDeprecated != null) { bookmark.setWorkdir(new Path(workdirObjDeprecated.toString(), EnumSet.of(Path.Type.directory))); } final T workdirObj = dict.objectForKey("Workdir Dictionary"); if(workdirObj != null) { bookmark.setWorkdir(new PathDictionary<>(factory).deserialize(workdirObj)); } final Object nicknameObj = dict.stringForKey("Nickname"); if(nicknameObj != null) { bookmark.setNickname(nicknameObj.toString()); } final Object encodingObj = dict.stringForKey("Encoding"); if(encodingObj != null) { bookmark.setEncoding(encodingObj.toString()); } final Object connectModeObj = dict.stringForKey("FTP Connect Mode"); if(connectModeObj != null) { bookmark.setFTPConnectMode(FTPConnectMode.valueOf(connectModeObj.toString())); } final Object transferObj = dict.stringForKey("Transfer Connection"); if(transferObj != null) { final Host.TransferType transfer = Host.TransferType.valueOf(transferObj.toString()); if(PreferencesFactory.get().getList("queue.transfer.type.enabled").contains(transfer.name())) { bookmark.setTransfer(transfer); } } else { // Legacy Object connObj = dict.stringForKey("Maximum Connections"); if(connObj != null) { if(1 == Integer.parseInt(connObj.toString())) { bookmark.setTransfer(Host.TransferType.browser); } } } // Legacy final Object downloadObjDeprecated = dict.stringForKey("Download Folder"); if(downloadObjDeprecated != null) { bookmark.setDownloadFolder(LocalFactory.get(downloadObjDeprecated.toString())); } final T downloadObj = dict.objectForKey("Download Folder Dictionary"); if(downloadObj != null) { bookmark.setDownloadFolder(new LocalDictionary<>(factory).deserialize(downloadObj)); } final T uploadObj = dict.objectForKey("Upload Folder Dictionary"); if(uploadObj != null) { bookmark.setUploadFolder(new LocalDictionary<>(factory).deserialize(uploadObj)); } final Object timezoneObj = dict.stringForKey("Timezone"); if(timezoneObj != null) { bookmark.setTimezone(TimeZone.getTimeZone(timezoneObj.toString())); } final Object commentObj = dict.stringForKey("Comment"); if(commentObj != null) { bookmark.setComment(commentObj.toString()); } final Object urlObj = dict.stringForKey("Web URL"); if(urlObj != null) { bookmark.setWebURL(urlObj.toString()); } final Object accessObj = dict.stringForKey("Access Timestamp"); if(accessObj != null) { bookmark.setTimestamp(new Date(Long.parseLong(accessObj.toString()))); } final Object volumeObj = dict.stringForKey("Volume"); if(volumeObj != null) { bookmark.setVolume(LocalFactory.get(volumeObj.toString())); } final Object readonlyObj = dict.stringForKey("Readonly"); if(readonlyObj != null) { bookmark.setReadonly(Boolean.valueOf(readonlyObj.toString())); } final Map customObj = dict.mapForKey("Custom"); if(customObj != null) { bookmark.setCustom(customObj); } final Object labelObj = dict.stringForKey("Labels"); if(labelObj != null) { bookmark.setLabels(new HashSet<>(dict.listForKey("Labels"))); } return bookmark; } else { log.warn(String.format("No protocol registered for identifier %s", protocolObj)); return null; } }
@Test public void testDictionaryWorkdirRegion() { final Host h = new Host(new TestProtocol(), "h", 66); h.setLabels(new HashSet<>(Arrays.asList("a", "b"))); final Path container = new Path("/container", EnumSet.of(Path.Type.directory)); container.attributes().setRegion("r"); h.setWorkdir(container); final Host deserialized = new HostDictionary<>(new DeserializerFactory<>()).deserialize(h.serialize(SerializerFactory.get())); assertEquals(h, deserialized); assertEquals("r", deserialized.getWorkdir().attributes().getRegion()); }
public static String formatTime(Date date) { return TIME_FORMATTER.format(date); }
@Test public void formatTime() { assertEquals("-300101000000000+", SmppUtils.formatTime(new Date(0L))); assertEquals("-300101024640000+", SmppUtils.formatTime(new Date(10000000L))); }
public static int registerSchema( final SchemaRegistryClient srClient, final ParsedSchema parsedSchema, final String topic, final String subject, final boolean isKey ) throws KsqlSchemaAuthorizationException, KsqlException { try { if (parsedSchema instanceof ProtobufSchema) { final ProtobufSchema resolved = AbstractKafkaProtobufSerializer.resolveDependencies( srClient, true, false, true, null, new DefaultReferenceSubjectNameStrategy(), topic, isKey, (ProtobufSchema) parsedSchema ); return srClient.register(subject, resolved); } else { return srClient.register(subject, parsedSchema); } } catch (IOException | RestClientException e) { if (SchemaRegistryUtil.isAuthErrorCode(e)) { final AclOperation deniedOperation = SchemaRegistryUtil.getDeniedOperation(e.getMessage()); if (deniedOperation != AclOperation.UNKNOWN) { throw new KsqlSchemaAuthorizationException( deniedOperation, subject); } } throw new KsqlException("Could not register schema for topic: " + e.getMessage(), e); } }
@Test public void shouldThrowKsqlSchemaAuthorizationException() throws Exception { // Given: when(schemaRegistryClient.register(anyString(), any(ParsedSchema.class))) .thenThrow(new RestClientException("User is denied operation Write on Subject", 403, 40301)); // When: final Exception e = assertThrows( KsqlSchemaAuthorizationException.class, () -> SchemaRegistryUtil.registerSchema(schemaRegistryClient, AVRO_SCHEMA, "topic", "subject", false) ); // Then: assertThat(e.getMessage(), equalTo( "Authorization denied to Write on Schema Registry subject: [subject]")); }
@Override public FTPFile parseFTPEntry(String entry) { if(matches(entry)) { String typeStr = group(1); String filesize = group(18); String datestr = group(19) + " " + group(20); String name = group(21); String endtoken = group(22); return parseFTPEntry(typeStr, null, null, filesize, datestr, name, endtoken); } return null; }
@Test public void testParse() { FTPFile parsed; parsed = parser.parseFTPEntry( "drwxr-xr-x folder 0 Oct 18 13:02 Akrilik" ); assertNotNull(parsed); assertEquals("Akrilik", parsed.getName()); assertEquals(FTPFile.DIRECTORY_TYPE, parsed.getType()); assertEquals(Calendar.OCTOBER, parsed.getTimestamp().get(Calendar.MONTH)); assertEquals(18, parsed.getTimestamp().get(Calendar.DAY_OF_MONTH)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)); parsed = parser.parseFTPEntry( "drwxrwxrwx folder 0 Oct 11 14:53 Uploads" ); assertNotNull(parsed); assertEquals("Uploads", parsed.getName()); assertEquals(FTPFile.DIRECTORY_TYPE, parsed.getType()); assertEquals(Calendar.OCTOBER, parsed.getTimestamp().get(Calendar.MONTH)); assertEquals(11, parsed.getTimestamp().get(Calendar.DAY_OF_MONTH)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)); parsed = parser.parseFTPEntry( "-rw-r--r-- 0 589878 589878 Oct 15 13:03 WebDAV SS.bmp" ); assertNotNull(parsed); assertEquals("WebDAV SS.bmp", parsed.getName()); assertEquals(FTPFile.FILE_TYPE, parsed.getType()); assertEquals(Calendar.OCTOBER, parsed.getTimestamp().get(Calendar.MONTH)); assertEquals(15, parsed.getTimestamp().get(Calendar.DAY_OF_MONTH)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.READ_PERMISSION)); assertTrue(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.WRITE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.WRITE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.WRITE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.USER_ACCESS, FTPFile.EXECUTE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.GROUP_ACCESS, FTPFile.EXECUTE_PERMISSION)); assertFalse(parsed.hasPermission(FTPFile.WORLD_ACCESS, FTPFile.EXECUTE_PERMISSION)); }
@Override public String getColumnTypeName(final int columnIndex) throws SQLException { return resultSetMetaData.getColumnTypeName(columnIndex); }
@Test void assertGetColumnTypeName() throws SQLException { assertThat(queryResultMetaData.getColumnTypeName(1), is("INT")); }
public static URL getCorrectHostnamePort(String hostPort) { return validateHostPortString(hostPort); }
@Test void testCorrectHostnamePortWithHttpsScheme() throws Exception { final URL url = new URL("https", "foo.com", 8080, "/some/other/path/index.html"); assertThat( NetUtils.getCorrectHostnamePort( "https://foo.com:8080/some/other/path/index.html")) .isEqualTo(url); }
public static void validate(FilterPredicate predicate, MessageType schema) { Objects.requireNonNull(predicate, "predicate cannot be null"); Objects.requireNonNull(schema, "schema cannot be null"); predicate.accept(new SchemaCompatibilityValidator(schema)); }
@Test public void testRepeatedNotSupportedForPrimitivePredicates() { try { validate(eq(lotsOfLongs, 10l), schema); fail("this should throw"); } catch (IllegalArgumentException e) { assertEquals( "FilterPredicates do not currently support repeated columns. Column lotsOfLongs is repeated.", e.getMessage()); } }
public Response get(URL url, Request request) throws IOException { return call(HttpMethods.GET, url, request); }
@Test public void testHttpTimeout() throws IOException { FailoverHttpClient httpClient = newHttpClient(false, false); try (Response ignored = httpClient.get(fakeUrl.toURL(), fakeRequest(5982))) { // intentionally empty } Mockito.verify(mockHttpRequest).setConnectTimeout(5982); Mockito.verify(mockHttpRequest).setReadTimeout(5982); }
@Override public PollResult poll(long currentTimeMs) { return pollInternal( prepareFetchRequests(), this::handleFetchSuccess, this::handleFetchFailure ); }
@Test public void testFetchOnCompletedFetchesForSomePausedPartitions() { buildFetcher(); Map<TopicPartition, List<ConsumerRecord<byte[], byte[]>>> fetchedRecords; assignFromUser(mkSet(tp0, tp1)); // seek to tp0 and tp1 in two polls to generate 2 complete requests and responses // #1 seek, request, poll, response subscriptions.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp0))); assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tidp0, records, Errors.NONE, 100L, 0)); networkClientDelegate.poll(time.timer(0)); // #2 seek, request, poll, response subscriptions.seekUnvalidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); assertEquals(1, sendFetches()); client.prepareResponse(fullFetchResponse(tidp1, nextRecords, Errors.NONE, 100L, 0)); subscriptions.pause(tp0); networkClientDelegate.poll(time.timer(0)); fetchedRecords = fetchRecords(); assertEquals(1, fetchedRecords.size(), "Should return completed fetch for unpaused partitions"); assertTrue(fetcher.hasCompletedFetches(), "Should still contain completed fetches"); assertNotNull(fetchedRecords.get(tp1)); assertNull(fetchedRecords.get(tp0)); assertEmptyFetch("Should not return records or advance position for remaining paused partition"); assertTrue(fetcher.hasCompletedFetches(), "Should still contain completed fetches"); }
@Subscribe public void onChatMessage(ChatMessage chatMessage) { if (chatMessage.getType() != ChatMessageType.TRADE && chatMessage.getType() != ChatMessageType.GAMEMESSAGE && chatMessage.getType() != ChatMessageType.SPAM && chatMessage.getType() != ChatMessageType.FRIENDSCHATNOTIFICATION) { return; } String message = chatMessage.getMessage(); Matcher matcher = KILLCOUNT_PATTERN.matcher(message); if (matcher.find()) { final String boss = matcher.group("boss"); final int kc = Integer.parseInt(matcher.group("kc")); final String pre = matcher.group("pre"); final String post = matcher.group("post"); if (Strings.isNullOrEmpty(pre) && Strings.isNullOrEmpty(post)) { unsetKc(boss); return; } String renamedBoss = KILLCOUNT_RENAMES .getOrDefault(boss, boss) // The config service doesn't support keys with colons in them .replace(":", ""); if (boss != renamedBoss) { // Unset old TOB kc unsetKc(boss); unsetPb(boss); unsetKc(boss.replace(":", ".")); unsetPb(boss.replace(":", ".")); // Unset old story mode unsetKc("Theatre of Blood Story Mode"); unsetPb("Theatre of Blood Story Mode"); } setKc(renamedBoss, kc); // We either already have the pb, or need to remember the boss for the upcoming pb if (lastPb > -1) { log.debug("Got out-of-order personal best for {}: {}", renamedBoss, lastPb); if (renamedBoss.contains("Theatre of Blood")) { // TOB team size isn't sent in the kill message, but can be computed from varbits int tobTeamSize = tobTeamSize(); lastTeamSize = tobTeamSize == 1 ? "Solo" : (tobTeamSize + " players"); } else if (renamedBoss.contains("Tombs of Amascut")) { // TOA team size isn't sent in the kill message, but can be computed from varbits int toaTeamSize = toaTeamSize(); lastTeamSize = toaTeamSize == 1 ? "Solo" : (toaTeamSize + " players"); } final double pb = getPb(renamedBoss); // If a raid with a team size, only update the pb if it is lower than the existing pb // so that the pb is the overall lowest of any team size if (lastTeamSize == null || pb == 0 || lastPb < pb) { log.debug("Setting overall pb (old: {})", pb); setPb(renamedBoss, lastPb); } if (lastTeamSize != null) { log.debug("Setting team size pb: {}", lastTeamSize); setPb(renamedBoss + " " + lastTeamSize, lastPb); } lastPb = -1; lastTeamSize = null; } else { lastBossKill = renamedBoss; lastBossTime = client.getTickCount(); } return; } matcher = DUEL_ARENA_WINS_PATTERN.matcher(message); if (matcher.find()) { final int oldWins = getKc("Duel Arena Wins"); final int wins = matcher.group(2).equals("one") ? 1 : Integer.parseInt(matcher.group(2).replace(",", "")); final String result = matcher.group(1); int winningStreak = getKc("Duel Arena Win Streak"); int losingStreak = getKc("Duel Arena Lose Streak"); if (result.equals("won") && wins > oldWins) { losingStreak = 0; winningStreak += 1; } else if (result.equals("were defeated")) { losingStreak += 1; winningStreak = 0; } else { log.warn("unrecognized duel streak chat message: {}", message); } setKc("Duel Arena Wins", wins); setKc("Duel Arena Win Streak", winningStreak); setKc("Duel Arena Lose Streak", losingStreak); } matcher = DUEL_ARENA_LOSSES_PATTERN.matcher(message); if (matcher.find()) { int losses = matcher.group(1).equals("one") ? 1 : Integer.parseInt(matcher.group(1).replace(",", "")); setKc("Duel Arena Losses", losses); } matcher = KILL_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = NEW_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_PB_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = RAIDS_DURATION_PATTERN.matcher(message); if (matcher.find()) { matchPb(matcher); } matcher = HS_PB_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group("floor")); String floortime = matcher.group("floortime"); String floorpb = matcher.group("floorpb"); String otime = matcher.group("otime"); String opb = matcher.group("opb"); String pb = MoreObjects.firstNonNull(floorpb, floortime); setPb("Hallowed Sepulchre Floor " + floor, timeStringToSeconds(pb)); if (otime != null) { pb = MoreObjects.firstNonNull(opb, otime); setPb("Hallowed Sepulchre", timeStringToSeconds(pb)); } } matcher = HS_KC_FLOOR_PATTERN.matcher(message); if (matcher.find()) { int floor = Integer.parseInt(matcher.group(1)); int kc = Integer.parseInt(matcher.group(2).replaceAll(",", "")); setKc("Hallowed Sepulchre Floor " + floor, kc); } matcher = HS_KC_GHC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hallowed Sepulchre", kc); } matcher = HUNTER_RUMOUR_KC_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1).replaceAll(",", "")); setKc("Hunter Rumours", kc); } if (lastBossKill != null && lastBossTime != client.getTickCount()) { lastBossKill = null; lastBossTime = -1; } matcher = COLLECTION_LOG_ITEM_PATTERN.matcher(message); if (matcher.find()) { String item = matcher.group(1); int petId = findPet(item); if (petId != -1) { final List<Integer> petList = new ArrayList<>(getPetList()); if (!petList.contains(petId)) { log.debug("New pet added: {}/{}", item, petId); petList.add(petId); setPetList(petList); } } } matcher = GUARDIANS_OF_THE_RIFT_PATTERN.matcher(message); if (matcher.find()) { int kc = Integer.parseInt(matcher.group(1)); setKc("Guardians of the Rift", kc); } }
@Test public void testCoXKill() { ChatMessage chatMessage = new ChatMessage(null, FRIENDSCHATNOTIFICATION, "", "<col=ef20ff>Congratulations - your raid is complete!</col><br>Team size: <col=ff0000>24+ players</col> Duration:</col> <col=ff0000>37:04.20</col> (new personal best)</col>>", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); chatMessage = new ChatMessage(null, GAMEMESSAGE, "", "Your completed Chambers of Xeric count is: <col=ff0000>51</col>.", null, 0); chatCommandsPlugin.onChatMessage(chatMessage); verify(configManager).setRSProfileConfiguration("killcount", "chambers of xeric", 51); verify(configManager).setRSProfileConfiguration("personalbest", "chambers of xeric", 37 * 60 + 4.2); verify(configManager).setRSProfileConfiguration("personalbest", "chambers of xeric 24+ players", 37 * 60 + 4.2); }
@Override public void run() { if (processor != null) { processor.execute(); } else { if (!beforeHook()) { logger.info("before-feature hook returned [false], aborting: {}", this); } else { scenarios.forEachRemaining(this::processScenario); } afterFeature(); } }
@Test void testCallOnceGlobal() { run("callonce-global.feature"); }
@Override public Response requestReply(Request request, Connection connection) { if (request instanceof NotifySubscriberRequest) { NotifySubscriberRequest notifyRequest = (NotifySubscriberRequest) request; serviceInfoHolder.processServiceInfo(notifyRequest.getServiceInfo()); return new NotifySubscriberResponse(); } return null; }
@Test void testRequestReply() { //given ServiceInfoHolder holder = mock(ServiceInfoHolder.class); NamingPushRequestHandler handler = new NamingPushRequestHandler(holder); ServiceInfo info = new ServiceInfo("name", "cluster1"); Request req = NotifySubscriberRequest.buildNotifySubscriberRequest(info); //when Response response = handler.requestReply(req, new TestConnection(new RpcClient.ServerInfo())); //then assertTrue(response instanceof NotifySubscriberResponse); verify(holder, times(1)).processServiceInfo(info); }
@GetMapping public DeferredResult<ResponseEntity<List<ApolloConfigNotification>>> pollNotification( @RequestParam(value = "appId") String appId, @RequestParam(value = "cluster") String cluster, @RequestParam(value = "notifications") String notificationsAsString, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "ip", required = false) String clientIp) { List<ApolloConfigNotification> notifications = null; try { notifications = gson.fromJson(notificationsAsString, notificationsTypeReference); } catch (Throwable ex) { Tracer.logError(ex); } if (CollectionUtils.isEmpty(notifications)) { throw BadRequestException.invalidNotificationsFormat(notificationsAsString); } Map<String, ApolloConfigNotification> filteredNotifications = filterNotifications(appId, notifications); if (CollectionUtils.isEmpty(filteredNotifications)) { throw BadRequestException.invalidNotificationsFormat(notificationsAsString); } DeferredResultWrapper deferredResultWrapper = new DeferredResultWrapper(bizConfig.longPollingTimeoutInMilli()); Set<String> namespaces = Sets.newHashSetWithExpectedSize(filteredNotifications.size()); Map<String, Long> clientSideNotifications = Maps.newHashMapWithExpectedSize(filteredNotifications.size()); for (Map.Entry<String, ApolloConfigNotification> notificationEntry : filteredNotifications.entrySet()) { String normalizedNamespace = notificationEntry.getKey(); ApolloConfigNotification notification = notificationEntry.getValue(); namespaces.add(normalizedNamespace); clientSideNotifications.put(normalizedNamespace, notification.getNotificationId()); if (!Objects.equals(notification.getNamespaceName(), normalizedNamespace)) { deferredResultWrapper.recordNamespaceNameNormalizedResult(notification.getNamespaceName(), normalizedNamespace); } } Multimap<String, String> watchedKeysMap = watchKeysUtil.assembleAllWatchKeys(appId, cluster, namespaces, dataCenter); Set<String> watchedKeys = Sets.newHashSet(watchedKeysMap.values()); /** * 1、set deferredResult before the check, for avoid more waiting * If the check before setting deferredResult,it may receive a notification the next time * when method handleMessage is executed between check and set deferredResult. */ deferredResultWrapper .onTimeout(() -> logWatchedKeys(watchedKeys, "Apollo.LongPoll.TimeOutKeys")); deferredResultWrapper.onCompletion(() -> { //unregister all keys for (String key : watchedKeys) { deferredResults.remove(key, deferredResultWrapper); } logWatchedKeys(watchedKeys, "Apollo.LongPoll.CompletedKeys"); }); //register all keys for (String key : watchedKeys) { this.deferredResults.put(key, deferredResultWrapper); } logWatchedKeys(watchedKeys, "Apollo.LongPoll.RegisteredKeys"); logger.debug("Listening {} from appId: {}, cluster: {}, namespace: {}, datacenter: {}", watchedKeys, appId, cluster, namespaces, dataCenter); /** * 2、check new release */ List<ReleaseMessage> latestReleaseMessages = releaseMessageService.findLatestReleaseMessagesGroupByMessages(watchedKeys); /** * Manually close the entity manager. * Since for async request, Spring won't do so until the request is finished, * which is unacceptable since we are doing long polling - means the db connection would be hold * for a very long time */ entityManagerUtil.closeEntityManager(); List<ApolloConfigNotification> newNotifications = getApolloConfigNotifications(namespaces, clientSideNotifications, watchedKeysMap, latestReleaseMessages); if (!CollectionUtils.isEmpty(newNotifications)) { deferredResultWrapper.setResult(newNotifications); } return deferredResultWrapper.getResult(); }
@Test public void testPollNotificationWithDefaultNamespaceAsFile() throws Exception { String namespace = String.format("%s.%s", defaultNamespace, "properties"); when(namespaceUtil.filterNamespaceName(namespace)).thenReturn(defaultNamespace); String someWatchKey = "someKey"; String anotherWatchKey = "anotherKey"; Multimap<String, String> watchKeysMap = assembleMultiMap(defaultNamespace, Lists.newArrayList(someWatchKey, anotherWatchKey)); String notificationAsString = transformApolloConfigNotificationsToString(namespace, someNotificationId); when(watchKeysUtil .assembleAllWatchKeys(someAppId, someCluster, Sets.newHashSet(defaultNamespace), someDataCenter)).thenReturn( watchKeysMap); DeferredResult<ResponseEntity<List<ApolloConfigNotification>>> deferredResult = controller .pollNotification(someAppId, someCluster, notificationAsString, someDataCenter, someClientIp); assertEquals(watchKeysMap.size(), deferredResults.size()); assertWatchKeys(watchKeysMap, deferredResult); }
public static boolean subjectExists( final SchemaRegistryClient srClient, final String subject ) { return getLatestSchema(srClient, subject).isPresent(); }
@Test public void shouldReturnTrueOnIsSubjectExists() throws Exception { // Given: when(schemaRegistryClient.getLatestSchemaMetadata("foo-value")).thenReturn(schemaMetadata); // When: final boolean subjectExists = SchemaRegistryUtil.subjectExists(schemaRegistryClient, "foo-value"); // Then: assertTrue("Expected subject to exist", subjectExists); }
@Override public SerializationServiceBuilder setPortableVersion(int portableVersion) { if (portableVersion < 0) { throw new IllegalArgumentException("Portable Version cannot be negative!"); } this.portableVersion = portableVersion; return this; }
@Test(expected = IllegalArgumentException.class) public void test_exceptionThrown_whenPortableVersionNegative() { getSerializationServiceBuilder().setPortableVersion(-1); }
@VisibleForTesting protected <T> T retryOnException(ObjectStoreOperation<T> op, Supplier<String> description) throws IOException { RetryPolicy retryPolicy = getRetryPolicy(); IOException thrownException = null; while (retryPolicy.attempt()) { try { return op.apply(); } catch (IOException e) { LOG.debug("Attempt {} to {} failed with exception : {}", retryPolicy.getAttemptCount(), description.get(), e.toString()); handleRetriablException(e); thrownException = e; } } throw thrownException; }
@Test public void testRetryOnException() { //if ufs throws SocketException, it will retry. ObjectUnderFileSystem.ObjectStoreOperation<Object> mockOp = Mockito.mock( ObjectUnderFileSystem.ObjectStoreOperation.class); try { Mockito.when(mockOp.apply()).thenThrow(new SocketException()).thenReturn(null); mObjectUFS.retryOnException(mockOp, () -> "test"); // retry policy will retry, so op will apply twice. Mockito.verify(mockOp, Mockito.atLeast(2)).apply(); } catch (IOException e) { fail(); } //if ufs throws other Exception, it will throw Exception. try { Mockito.reset(mockOp); Mockito.when(mockOp.apply()).thenThrow(new FileNotFoundException()).thenReturn(null); mObjectUFS.retryOnException(mockOp, () -> "test"); } catch (IOException e) { assertTrue(e instanceof FileNotFoundException); } try { // op only apply once and then throw exception. Mockito.verify(mockOp, Mockito.atLeast(1)).apply(); } catch (IOException e) { fail(); } }
public static <T extends Comparable<? super T>> Combine.Globally<T, T> globally() { return Combine.globally(Max.<T>naturalOrder()); }
@Test public void testDisplayData() { Top.Natural<Integer> comparer = new Top.Natural<>(); Combine.Globally<Integer, Integer> max = Max.globally(comparer); assertThat(DisplayData.from(max), hasDisplayItem("comparer", comparer.getClass())); }
public static Object construct(Object something) throws Exception { if (something instanceof String) { return Class.forName((String)something).getConstructor().newInstance(); } else if (something instanceof Map) { // keys are the class name, values are the parameters. for (Map.Entry<String, Object> entry : ((Map<String, Object>) something).entrySet()) { if (entry.getValue() instanceof Map) { return constructByNamedParams(Class.forName(entry.getKey()), (Map)entry.getValue()); } else if (entry.getValue() instanceof List) { return constructByParameterizedConstructor(Class.forName(entry.getKey()), (List)entry.getValue()); } } } return null; }
@Test public void classWithListedParams_constructed_setsParams() throws Exception { Map<String, List<Map<String, Object>>> constructMap = new HashMap<>(); List<Map<String, Object>> params = new ArrayList<>(); params.add(Collections.singletonMap("java.lang.String", "Hello")); params.add(Collections.singletonMap("java.lang.String", "There")); constructMap.put("com.networknt.service.ClassWithoutDefaultConstructor", params); ClassWithoutDefaultConstructor result = (ClassWithoutDefaultConstructor)ServiceUtil.construct(constructMap); Assert.assertEquals("Hello", result.getFirst()); Assert.assertEquals("There", result.getSecond()); }
@Override public void init(byte[] data, int offset) { this.data = data; this.size = data != null ? data.length : 0; this.pos = offset; }
@Test public void testInit_null() { in.init(null, 0); assertNull(in.data); assertEquals(0, in.size); assertEquals(0, in.pos); }
public static void initRequestFromEntity(HttpRequestBase requestBase, Map<String, String> body, String charset) throws Exception { if (body == null || body.isEmpty()) { return; } List<NameValuePair> params = new ArrayList<>(body.size()); for (Map.Entry<String, String> entry : body.entrySet()) { params.add(new BasicNameValuePair(entry.getKey(), entry.getValue())); } if (requestBase instanceof HttpEntityEnclosingRequest) { HttpEntityEnclosingRequest request = (HttpEntityEnclosingRequest) requestBase; HttpEntity entity = new UrlEncodedFormEntity(params, charset); request.setEntity(entity); } }
@Test void testInitRequestFromEntity1() throws Exception { BaseHttpMethod.HttpGetWithEntity httpRequest = new BaseHttpMethod.HttpGetWithEntity(""); HttpUtils.initRequestFromEntity(httpRequest, Collections.singletonMap("k", "v"), "UTF-8"); HttpEntity entity = httpRequest.getEntity(); InputStream contentStream = entity.getContent(); byte[] bytes = new byte[contentStream.available()]; contentStream.read(bytes); assertEquals("k=v", new String(bytes, StandardCharsets.UTF_8)); }
@Override public boolean processRow( final StepMetaInterface smi, final StepDataInterface sdi ) throws KettleException { this.meta = (TeraFastMeta) smi; Object[] row = getRow(); if ( row == null ) { /* In case we have no data, we need to ensure that the printstream was ever initialized. It will if there is * data. So we check for a null printstream, then we close the dataFile and execute only if it existed. */ if ( this.dataFilePrintStream != null ) { this.dataFilePrintStream.close(); IOUtils.closeQuietly( this.dataFile ); this.execute(); } setOutputDone(); try { logBasic( BaseMessages.getString( PKG, "TeraFast.Log.WatingForFastload" ) ); if ( this.process != null ) { final int exitVal = this.process.waitFor(); if ( exitVal != 0 ) { setErrors( DEFAULT_ERROR_CODE ); } logBasic( BaseMessages.getString( PKG, "TeraFast.Log.ExitValueFastloadPath", "" + exitVal ) ); } } catch ( Exception e ) { logError( BaseMessages.getString( PKG, "TeraFast.Log.ErrorInStep" ), e ); this.setDefaultError(); stopAll(); } return false; } if ( this.first ) { this.first = false; try { final File tempDataFile = new File( resolveFileName( this.meta.getDataFile().getValue() ) ); this.dataFile = FileUtils.openOutputStream( tempDataFile ); this.dataFilePrintStream = new PrintStream( dataFile ); } catch ( IOException e ) { throw new KettleException( "Cannot open data file [path=" + this.dataFile + "]", e ); } // determine column sort order according to field mapping // thus the columns in the generated datafile are always in the same order and have the same size as in the // targetTable this.tableRowMeta = this.meta.getRequiredFields( this.getTransMeta() ); RowMetaInterface streamRowMeta = this.getTransMeta().getPrevStepFields( this.getStepMeta() ); this.columnSortOrder = new ArrayList<>( this.tableRowMeta.size() ); for ( int i = 0; i < this.tableRowMeta.size(); i++ ) { ValueMetaInterface column = this.tableRowMeta.getValueMeta( i ); int tableIndex = this.meta.getTableFieldList().getValue().indexOf( column.getName() ); if ( tableIndex >= 0 ) { String streamField = this.meta.getStreamFieldList().getValue().get( tableIndex ); this.columnSortOrder.add( streamRowMeta.indexOfValue( streamField ) ); } } } writeToDataFile( getInputRowMeta(), row ); return true; }
@Test public void testNullDataFilePrintStream() throws KettleException { TeraFast teraFastDataFilePrintStreamIsNull = mock( TeraFast.class ); doReturn( null ).when( teraFastDataFilePrintStreamIsNull ).getRow(); TeraFastMeta meta = mock( TeraFastMeta.class ); GenericStepData data = mock( GenericStepData.class ); assertFalse( teraFastDataFilePrintStreamIsNull.processRow( meta, data ) ); }
@Override public Node upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { final ThreadPool pool = ThreadPoolFactory.get("multipart", concurrency); try { final InputStream in; if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) { in = new SDSTripleCryptEncryptorFeature(session, nodeid).encrypt(file, local.getInputStream(), status); } else { in = local.getInputStream(); } final CreateFileUploadRequest createFileUploadRequest = new CreateFileUploadRequest() .directS3Upload(true) .timestampModification(status.getModified() != null ? new DateTime(status.getModified()) : null) .timestampCreation(status.getCreated() != null ? new DateTime(status.getCreated()) : null) .size(TransferStatus.UNKNOWN_LENGTH == status.getLength() ? null : status.getLength()) .parentId(Long.parseLong(nodeid.getVersionId(file.getParent()))) .name(file.getName()); final CreateFileUploadResponse createFileUploadResponse = new NodesApi(session.getClient()) .createFileUploadChannel(createFileUploadRequest, StringUtils.EMPTY); if(log.isDebugEnabled()) { log.debug(String.format("upload started for %s with response %s", file, createFileUploadResponse)); } final Map<Integer, TransferStatus> etags = new HashMap<>(); final List<PresignedUrl> presignedUrls = this.retrievePresignedUrls(createFileUploadResponse, status); final List<Future<TransferStatus>> parts = new ArrayList<>(); try { final String random = new UUIDRandomStringService().random(); // Full size of file final long size = status.getLength() + status.getOffset(); long offset = 0; long remaining = status.getLength(); for(int partNumber = 1; remaining >= 0; partNumber++) { final long length = Math.min(Math.max((size / (MAXIMUM_UPLOAD_PARTS - 1)), partsize), remaining); final PresignedUrl presignedUrl = presignedUrls.get(partNumber - 1); if(new SDSTripleCryptEncryptorFeature(session, nodeid).isEncrypted(containerService.getContainer(file))) { final Local temporary = temp.create(String.format("%s-%d", random, partNumber)); if(log.isDebugEnabled()) { log.debug(String.format("Encrypted contents for part %d to %s", partNumber, temporary)); } final FileBuffer buffer = new FileBuffer(temporary); new StreamCopier(status, StreamProgress.noop).withAutoclose(false).withLimit(length) .transfer(in, new BufferOutputStream(buffer)); parts.add(this.submit(pool, file, temporary, buffer, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), 0L, length, callback)); } else { parts.add(this.submit(pool, file, local, Buffer.noop, throttle, listener, status, presignedUrl.getUrl(), presignedUrl.getPartNumber(), offset, length, callback)); } remaining -= length; offset += length; if(0L == remaining) { break; } } } finally { in.close(); } Interruptibles.awaitAll(parts) .forEach(part -> etags.put(part.getPart(), part)); final CompleteS3FileUploadRequest completeS3FileUploadRequest = new CompleteS3FileUploadRequest() .keepShareLinks(new HostPreferences(session.getHost()).getBoolean("sds.upload.sharelinks.keep")) .resolutionStrategy(CompleteS3FileUploadRequest.ResolutionStrategyEnum.OVERWRITE); if(status.getFilekey() != null) { final ObjectReader reader = session.getClient().getJSON().getContext(null).readerFor(FileKey.class); final FileKey fileKey = reader.readValue(status.getFilekey().array()); final EncryptedFileKey encryptFileKey = Crypto.encryptFileKey( TripleCryptConverter.toCryptoPlainFileKey(fileKey), TripleCryptConverter.toCryptoUserPublicKey(session.keyPair().getPublicKeyContainer()) ); completeS3FileUploadRequest.setFileKey(TripleCryptConverter.toSwaggerFileKey(encryptFileKey)); } etags.forEach((key, value) -> completeS3FileUploadRequest.addPartsItem( new S3FileUploadPart().partEtag(value.getChecksum().hash).partNumber(key))); if(log.isDebugEnabled()) { log.debug(String.format("Complete file upload with %s for %s", completeS3FileUploadRequest, file)); } new NodesApi(session.getClient()).completeS3FileUpload(completeS3FileUploadRequest, createFileUploadResponse.getUploadId(), StringUtils.EMPTY); // Polling return new SDSUploadService(session, nodeid).await(file, status, createFileUploadResponse.getUploadId()).getNode(); } catch(CryptoSystemException | InvalidFileKeyException | InvalidKeyPairException | UnknownVersionException e) { throw new TripleCryptExceptionMappingService().map("Upload {0} failed", e, file); } catch(ApiException e) { throw new SDSExceptionMappingService(nodeid).map("Upload {0} failed", e, file); } catch(IOException e) { throw new DefaultIOExceptionMappingService().map("Upload {0} failed", e, file); } finally { temp.shutdown(); // Cancel future tasks pool.shutdown(false); } }
@Test public void testTripleCryptUploadExactMultipartSize() throws Exception { final SDSNodeIdProvider nodeid = new SDSNodeIdProvider(session); final SDSDirectS3UploadFeature feature = new SDSDirectS3UploadFeature(session, nodeid, new SDSDirectS3WriteFeature(session, nodeid)); final Path room = new SDSDirectoryFeature(session, nodeid).createRoom( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), true); final Path test = new Path(room, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final byte[] random = RandomUtils.nextBytes(10 * 1024 * 1024); final OutputStream out = local.getOutputStream(false); IOUtils.write(random, out); out.close(); final TransferStatus status = new TransferStatus(); status.setFilekey(SDSTripleCryptEncryptorFeature.generateFileKey()); status.setLength(random.length); final SDSEncryptionBulkFeature bulk = new SDSEncryptionBulkFeature(session, nodeid); bulk.pre(Transfer.Type.upload, Collections.singletonMap(new TransferItem(test, local), status), new DisabledConnectionCallback()); final Node node = feature.upload(test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), new DisabledStreamListener(), status, new DisabledLoginCallback()); assertTrue(status.isComplete()); assertNotSame(PathAttributes.EMPTY, status.getResponse()); assertTrue(new SDSFindFeature(session, nodeid).find(test)); final PathAttributes attributes = new SDSAttributesFinderFeature(session, nodeid).find(test); assertEquals(random.length, attributes.getSize()); assertEquals(new SDSAttributesAdapter(session).toAttributes(node), attributes); final byte[] compare = new byte[random.length]; final InputStream stream = new TripleCryptReadFeature(session, nodeid, new SDSReadFeature(session, nodeid)).read(test, new TransferStatus(), new DisabledConnectionCallback() { @Override public Credentials prompt(final Host bookmark, final String title, final String reason, final LoginOptions options) { return new VaultCredentials("eth[oh8uv4Eesij"); } }); IOUtils.readFully(stream, compare); stream.close(); assertArrayEquals(random, compare); new SDSDeleteFeature(session, nodeid).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); local.delete(); }
public static URL urlForResource(String location) throws MalformedURLException, FileNotFoundException { if (location == null) { throw new NullPointerException("location is required"); } URL url = null; if (!location.matches(SCHEME_PATTERN)) { url = Loader.getResourceBySelfClassLoader(location); } else if (location.startsWith(CLASSPATH_SCHEME)) { String path = location.substring(CLASSPATH_SCHEME.length()); if (path.startsWith("/")) { path = path.substring(1); } if (path.length() == 0) { throw new MalformedURLException("path is required"); } url = Loader.getResourceBySelfClassLoader(path); } else { url = new URL(location); } if (url == null) { throw new FileNotFoundException(location); } return url; }
@Test public void testExplicitClasspathUrl() throws Exception { URL url = LocationUtil.urlForResource(LocationUtil.CLASSPATH_SCHEME + TEST_CLASSPATH_RESOURCE); validateResource(url); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestListRecordings() { internalEncodeLogHeader(buffer, 0, 32, 32, () -> 100_000_000L); final ListRecordingsRequestEncoder requestEncoder = new ListRecordingsRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(9) .correlationId(78) .fromRecordingId(45) .recordCount(10); dissectControlRequest(CMD_IN_LIST_RECORDINGS, buffer, 0, builder); assertEquals("[0.100000000] " + CONTEXT + ": " + CMD_IN_LIST_RECORDINGS.name() + " [32/32]:" + " controlSessionId=9" + " correlationId=78" + " fromRecordingId=45" + " recordCount=10", builder.toString()); }
@Override public CompletionStage<V> getAsync(K key) { return map.getAsync(key); }
@Test public void testGetAsync() throws Exception { map.put(42, "foobar"); Future<String> future = adapter.getAsync(42).toCompletableFuture(); String result = future.get(); assertEquals("foobar", result); }
public static <T> RedistributeArbitrarily<T> arbitrarily() { return new RedistributeArbitrarily<>(null, false); }
@Test @Category(ValidatesRunner.class) public void testRedistributePreservesTimestamps() { PCollection<KV<String, TimestampedValue<String>>> input = pipeline .apply( Create.timestamped( TimestampedValue.of("foo", BoundedWindow.TIMESTAMP_MIN_VALUE), TimestampedValue.of("foo", new Instant(0)), TimestampedValue.of("bar", new Instant(33)), TimestampedValue.of("bar", GlobalWindow.INSTANCE.maxTimestamp())) .withCoder(StringUtf8Coder.of())) .apply( WithKeys.<String, String>of(input12 -> input12) .withKeyType(TypeDescriptors.strings())) .apply("ReifyOriginalTimestamps", Reify.timestampsInValue()); // The outer TimestampedValue is the reified timestamp post-reshuffle. The inner // TimestampedValue is the pre-reshuffle timestamp. PCollection<TimestampedValue<TimestampedValue<String>>> output = input .apply(Redistribute.arbitrarily()) .apply("ReifyRedistributedTimestamps", Reify.timestampsInValue()) .apply(Values.create()); PAssert.that(output) .satisfies( input1 -> { for (TimestampedValue<TimestampedValue<String>> elem : input1) { Instant originalTimestamp = elem.getValue().getTimestamp(); Instant afterRedistributeTimestamp = elem.getTimestamp(); assertThat( "Redistribute must preserve element timestamps", afterRedistributeTimestamp, equalTo(originalTimestamp)); } return null; }); pipeline.run(); }
public TopicStatsTable getTopicStatsInfo(final String addr, final String topic, final long timeoutMillis) throws InterruptedException, RemotingTimeoutException, RemotingSendRequestException, RemotingConnectException, MQBrokerException { GetTopicStatsInfoRequestHeader requestHeader = new GetTopicStatsInfoRequestHeader(); requestHeader.setTopic(topic); RemotingCommand request = RemotingCommand.createRequestCommand(RequestCode.GET_TOPIC_STATS_INFO, requestHeader); RemotingCommand response = this.remotingClient.invokeSync(MixAll.brokerVIPChannel(this.clientConfig.isVipChannelEnabled(), addr), request, timeoutMillis); switch (response.getCode()) { case ResponseCode.SUCCESS: { TopicStatsTable topicStatsTable = TopicStatsTable.decode(response.getBody(), TopicStatsTable.class); return topicStatsTable; } default: break; } throw new MQBrokerException(response.getCode(), response.getRemark(), addr); }
@Test public void assertGetTopicStatsInfo() throws RemotingException, InterruptedException, MQBrokerException { mockInvokeSync(); TopicStatsTable responseBody = new TopicStatsTable(); MessageQueue messageQueue = new MessageQueue(); TopicOffset topicOffset = new TopicOffset(); responseBody.getOffsetTable().put(messageQueue, topicOffset); setResponseBody(responseBody); TopicStatsTable actual = mqClientAPI.getTopicStatsInfo(defaultBrokerAddr, defaultTopic, defaultTimeout); assertNotNull(actual); assertEquals(1, actual.getOffsetTable().size()); }
public void setBuildId(long buildId) { this.buildId = buildId; }
@Test public void shouldIgnoreIdAndBuildIdAsPartOfEqualAndHashCodeCheck() { final ArtifactPlan installer_1 = new ArtifactPlan(ArtifactPlanType.file, "src", "dest"); installer_1.setId(100); installer_1.setBuildId(1000); final ArtifactPlan installer_2 = new ArtifactPlan(ArtifactPlanType.file, "src", "dest"); installer_2.setId(200); installer_2.setBuildId(2000); assertThat(installer_1).isEqualTo(installer_2); }
public static DivideUpstream buildDivideUpstream(final String protocol, final String host, final Integer port) { return DivideUpstream.builder().upstreamHost(LOCALHOST) .protocol(protocol).upstreamUrl(buildUrl(host, port)) .weight(50).warmup(Constants.WARMUP_TIME) .timestamp(System.currentTimeMillis()) .status(Objects.nonNull(port) && StringUtils.isNotBlank(host)) .build(); }
@Test public void buildDivideUpstream() { DivideUpstream divideUpstream = CommonUpstreamUtils.buildDivideUpstream("http", HOST, PORT); Assert.assertNotNull(divideUpstream); Assert.assertEquals(HOST + ":" + PORT, divideUpstream.getUpstreamUrl()); Assert.assertEquals("http", divideUpstream.getProtocol()); }
@Override public void setTimestamp(final Path file, final TransferStatus status) throws BackgroundException { if(file.isVolume()) { log.warn(String.format("Skip setting timestamp for %s", file)); return; } try { if(null != status.getModified()) { // The Custom-Time metadata is a user-specified date and time represented in the RFC 3339 // format YYYY-MM-DD'T'HH:MM:SS.SS'Z' or YYYY-MM-DD'T'HH:MM:SS'Z' when milliseconds are zero. final Storage.Objects.Patch request = session.getClient().objects().patch(containerService.getContainer(file).getName(), containerService.getKey(file), new StorageObject().setCustomTime(new DateTime(status.getModified()))); if(containerService.getContainer(file).attributes().getCustom().containsKey(GoogleStorageAttributesFinderFeature.KEY_REQUESTER_PAYS)) { request.setUserProject(session.getHost().getCredentials().getUsername()); } final StorageObject latest = request.execute(); status.setResponse(new GoogleStorageAttributesFinderFeature(session).toAttributes(latest)); } } catch(IOException e) { final BackgroundException failure = new GoogleStorageExceptionMappingService().map("Failure to write attributes of {0}", e, file); if(file.isDirectory()) { if(failure instanceof NotfoundException) { // No placeholder file may exist but we just have a common prefix return; } } if(failure instanceof InteroperabilityException) { if(log.isWarnEnabled()) { log.warn(String.format("Retry rewriting file %s with failure %s writing custom time", file, failure)); } // You cannot remove Custom-Time once it's been set on an object. Additionally, the value for Custom-Time cannot // decrease. That is, you cannot set Custom-Time to be an earlier date/time than the existing Custom-Time. // You can, however, effectively remove or reset the Custom-Time by rewriting the object. status.setResponse(new GoogleStorageCopyFeature(session).copy(file, file, status, new DisabledConnectionCallback(), new DisabledStreamListener()).attributes()); return; } throw failure; } }
@Test public void testFindTimesteamp() throws Exception { final Path bucket = new Path("cyberduck-test-eu", EnumSet.of(Path.Type.directory, Path.Type.volume)); final Path test = new GoogleStorageTouchFeature(session).touch(new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus().withModified(1530305150672L)); assertEquals(1530305150672L, new GoogleStorageAttributesFinderFeature(session).find(test).getModificationDate()); final TransferStatus status = new TransferStatus(); final byte[] content = RandomUtils.nextBytes(1033); status.setLength(content.length); status.setModified(1530305150673L); final HttpResponseOutputStream<StorageObject> out = new GoogleStorageWriteFeature(session).write(test, status, new DisabledConnectionCallback()); new StreamCopier(new TransferStatus(), new TransferStatus()).transfer(new ByteArrayInputStream(content), out); out.close(); final PathAttributes response = status.getResponse(); assertNotNull(response.getETag()); assertNotNull(response.getVersionId()); test.withAttributes(response); assertEquals(1530305150673L, response.getModificationDate()); final GoogleStorageTimestampFeature feature = new GoogleStorageTimestampFeature(session); // Rewrite object with timestamp earlier than already set final TransferStatus rewriteStatus = new TransferStatus().withModified(1530305150672L); feature.setTimestamp(test, rewriteStatus); final PathAttributes attrAfterRewrite = new GoogleStorageAttributesFinderFeature(session).find(test); assertEquals(rewriteStatus.getResponse(), attrAfterRewrite); assertEquals(1530305150672L, attrAfterRewrite.getModificationDate()); assertNotEquals(response.getETag(), attrAfterRewrite.getETag()); assertNotEquals(response.getVersionId(), attrAfterRewrite.getVersionId()); final TransferStatus patchStatus = new TransferStatus().withModified(1630305150672L); feature.setTimestamp(test, patchStatus); assertEquals(1630305150672L, new GoogleStorageAttributesFinderFeature(session).find(test).getModificationDate()); final PathAttributes attrAfterPatch = new GoogleStorageAttributesFinderFeature(session).find(test); assertEquals(patchStatus.getResponse(), attrAfterPatch); final String eTagAfterPatch = attrAfterPatch.getETag(); assertNotEquals(attrAfterRewrite.getETag(), eTagAfterPatch); final Path moved = new GoogleStorageMoveFeature(session).move(test, new Path(bucket, new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)), new TransferStatus(), new Delete.DisabledCallback(), new DisabledConnectionCallback()); assertEquals(1630305150672L, moved.attributes().getModificationDate()); assertEquals(1630305150672L, new GoogleStorageAttributesFinderFeature(session).find(moved).getModificationDate()); assertNotEquals(eTagAfterPatch, new GoogleStorageAttributesFinderFeature(session).find(moved).getETag()); new GoogleStorageDeleteFeature(session).delete(Collections.singletonList(moved), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
@Override public void launch() throws IOException { type.assertFull(); String numaId = SupervisorUtils.getNumaIdForPort(port, conf); if (numaId == null) { LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {}", assignment, supervisorId, port, workerId); } else { LOG.info("Launching worker with assignment {} for this supervisor {} on port {} with id {} bound to numa zone {}", assignment, supervisorId, port, workerId, numaId); } exitedEarly = false; final WorkerResources resources = assignment.get_resources(); final int memOnHeap = getMemOnHeap(resources); final int memOffHeap = getMemOffHeap(resources); memoryLimitMb = calculateMemoryLimit(resources, memOnHeap); final String stormRoot = ConfigUtils.supervisorStormDistRoot(conf, topologyId); String jlp = javaLibraryPath(stormRoot, conf); Map<String, String> topEnvironment = new HashMap<String, String>(); @SuppressWarnings("unchecked") Map<String, String> environment = (Map<String, String>) topoConf.get(Config.TOPOLOGY_ENVIRONMENT); if (environment != null) { topEnvironment.putAll(environment); } String ldLibraryPath = topEnvironment.get("LD_LIBRARY_PATH"); if (ldLibraryPath != null) { jlp = jlp + System.getProperty("path.separator") + ldLibraryPath; } topEnvironment.put("LD_LIBRARY_PATH", jlp); if (resourceIsolationManager.isResourceManaged()) { final int cpu = (int) Math.ceil(resources.get_cpu()); //Save the memory limit so we can enforce it less strictly resourceIsolationManager.reserveResourcesForWorker(workerId, (int) memoryLimitMb, cpu, numaId); } List<String> commandList = mkLaunchCommand(memOnHeap, memOffHeap, stormRoot, jlp, numaId); LOG.info("Launching worker with command: {}. ", ServerUtils.shellCmd(commandList)); String workerDir = ConfigUtils.workerRoot(conf, workerId); String logPrefix = "Worker Process " + workerId; ProcessExitCallback processExitCallback = new ProcessExitCallback(logPrefix); resourceIsolationManager.launchWorkerProcess(getWorkerUser(), topologyId, topoConf, port, workerId, commandList, topEnvironment, logPrefix, processExitCallback, new File(workerDir)); }
@Test public void testLaunchStorm0version() throws Exception { final String topoId = "test_topology_storm_0.x"; final int supervisorPort = 6628; final int port = 8080; final String stormHome = ContainerTest.asAbsPath("tmp", "storm-home"); final String stormLogDir = ContainerTest.asFile(".", "target").getCanonicalPath(); final String workerId = "worker-id"; final String stormLocal = ContainerTest.asAbsPath("tmp", "storm-local"); final String distRoot = ContainerTest.asAbsPath(stormLocal, "supervisor", "stormdist", topoId); final File stormcode = new File(distRoot, "stormcode.ser"); final File stormjar = new File(distRoot, "stormjar.jar"); final String log4jdir = ContainerTest.asAbsPath(stormHome, "conf"); final String workerConf = ContainerTest.asAbsPath(log4jdir, "worker.xml"); final String workerRoot = ContainerTest.asAbsPath(stormLocal, "workers", workerId); final String workerTmpDir = ContainerTest.asAbsPath(workerRoot, "tmp"); final StormTopology st = new StormTopology(); st.set_spouts(new HashMap<>()); st.set_bolts(new HashMap<>()); st.set_state_spouts(new HashMap<>()); // minimum 0.x version of supporting STORM-2448 would be 0.10.3 st.set_storm_version("0.10.3"); byte[] serializedState = Utils.gzip(Utils.thriftSerialize(st)); final Map<String, Object> superConf = new HashMap<>(); superConf.put(Config.STORM_LOCAL_DIR, stormLocal); superConf.put(Config.STORM_WORKERS_ARTIFACTS_DIR, stormLocal); superConf.put(DaemonConfig.STORM_LOG4J2_CONF_DIR, log4jdir); superConf.put(Config.WORKER_CHILDOPTS, " -Dtesting=true"); LocalAssignment la = new LocalAssignment(); la.set_topology_id(topoId); AdvancedFSOps ops = mock(AdvancedFSOps.class); when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); when(ops.slurp(stormcode)).thenReturn(serializedState); LocalState ls = mock(LocalState.class); MockResourceIsolationManager iso = new MockResourceIsolationManager(); checkpoint(() -> { MockBasicContainer mc = new MockBasicContainer(ContainerType.LAUNCH, superConf, "SUPERVISOR", supervisorPort, port, la, iso, ls, workerId, new StormMetricsRegistry(), new HashMap<>(), ops, "profile"); mc.launch(); assertEquals(1, iso.workerCmds.size()); CommandRun cmd = iso.workerCmds.get(0); iso.workerCmds.clear(); assertListEquals(Arrays.asList( "java", "-cp", "FRAMEWORK_CP:" + stormjar.getAbsolutePath(), "-Dlogging.sensitivity=S3", "-Dlogfile.name=worker.log", "-Dstorm.home=" + stormHome, "-Dworkers.artifacts=" + stormLocal, "-Dstorm.id=" + topoId, "-Dworker.id=" + workerId, "-Dworker.port=" + port, "-Dstorm.log.dir=" + stormLogDir, "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector", "-Dstorm.local.dir=" + stormLocal, "-Dworker.memory_limit_mb=768", "-Dlog4j.configurationFile=" + workerConf, "backtype.storm.LogWriter", "java", "-server", "-Dlogging.sensitivity=S3", "-Dlogfile.name=worker.log", "-Dstorm.home=" + stormHome, "-Dworkers.artifacts=" + stormLocal, "-Dstorm.id=" + topoId, "-Dworker.id=" + workerId, "-Dworker.port=" + port, "-Dstorm.log.dir=" + stormLogDir, "-DLog4jContextSelector=org.apache.logging.log4j.core.selector.BasicContextSelector", "-Dstorm.local.dir=" + stormLocal, "-Dworker.memory_limit_mb=768", "-Dlog4j.configurationFile=" + workerConf, "-Dtesting=true", "-Djava.library.path=JLP", "-Dstorm.conf.file=", "-Dstorm.options=", "-Djava.io.tmpdir=" + workerTmpDir, "-cp", "FRAMEWORK_CP:" + stormjar.getAbsolutePath(), "backtype.storm.daemon.worker", topoId, "SUPERVISOR", String.valueOf(port), workerId ), cmd.cmd); assertEquals(new File(workerRoot), cmd.pwd); }, ConfigUtils.STORM_HOME, stormHome, "storm.log.dir", stormLogDir); }
public ConfigurationProperty create(String key, String value, String encryptedValue, Boolean isSecure) { ConfigurationProperty configurationProperty = new ConfigurationProperty(); configurationProperty.setConfigurationKey(new ConfigurationKey(key)); if (isNotBlank(value) && isNotBlank(encryptedValue)) { configurationProperty.addError("configurationValue", "You may only specify `value` or `encrypted_value`, not both!"); configurationProperty.addError("encryptedValue", "You may only specify `value` or `encrypted_value`, not both!"); configurationProperty.setConfigurationValue(new ConfigurationValue(value)); configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encryptedValue)); return configurationProperty; } if (isSecure) { if (isNotBlank(encryptedValue)) { configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encryptedValue)); } if (isNotBlank(value)) { configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encrypt(value))); } } else { if (isNotBlank(encryptedValue)) { configurationProperty.addError("encryptedValue", "encrypted_value cannot be specified to a unsecured property."); configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(encryptedValue)); } if (value != null) { configurationProperty.setConfigurationValue(new ConfigurationValue(value)); } } if (isNotBlank(configurationProperty.getEncryptedValue())) { configurationProperty.setEncryptedValue(new EncryptedConfigurationValue(configurationProperty.getEncryptedValue())); } return configurationProperty; }
@Test public void shouldCreateWithErrorsInPresenceOfEncryptedTextInputForUnSecuredProperty() { Property key = new Property("key"); key.with(Property.SECURE, false); ConfigurationProperty property = new ConfigurationPropertyBuilder().create("key", null, "enc_value", false); assertThat(property.errors().get("encryptedValue").get(0), is("encrypted_value cannot be specified to a unsecured property.")); assertThat(property.getEncryptedValue(), is("enc_value")); }
public static int minBytesForPrecision(int precision) { Preconditions.checkArgument(0 <= precision && precision <= MAX_PRECISION); return minBytesForPrecision[precision]; }
@Test public void minBytesForPrecision() { for (int precision = 0; precision <= DecimalUtils.MAX_PRECISION - 2; precision++) { int bytes = DecimalUtils.minBytesForPrecision(precision); Assert.assertTrue(bytes >= 1); Assert.assertTrue(Math.pow(2, Math.max(0, (bytes - 1) * 8 - 1)) <= Math.pow(10, precision)); Assert.assertTrue(Math.pow(2, bytes * 8 - 1) >= Math.pow(10, precision)); } }
@Override public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { metadata.set(Metadata.CONTENT_TYPE, TYPE_SAS7BDAT.toString()); XHTMLContentHandler xhtml = new XHTMLContentHandler(handler, metadata); xhtml.startDocument(); SasFileReader sas = new SasFileReaderImpl(stream); SasFileProperties props = sas.getSasFileProperties(); // Record the interesting parts of the file's metadata metadata.set(TikaCoreProperties.TITLE, props.getName()); metadata.set(TikaCoreProperties.CREATED, props.getDateCreated()); metadata.set(TikaCoreProperties.MODIFIED, props.getDateModified()); metadata.set(PagedText.N_PAGES, (int) props.getPageCount()); metadata.set(Database.COLUMN_COUNT, (int) props.getColumnsCount()); metadata.set(Database.ROW_COUNT, (int) props.getRowCount()); // TODO Can we find more general properties for these / move // these to more general places? metadata.set(HttpHeaders.CONTENT_ENCODING, props.getEncoding()); metadata.set(OfficeOpenXMLExtended.APPLICATION, props.getServerType()); metadata.set(OfficeOpenXMLExtended.APP_VERSION, props.getSasRelease()); metadata.set(MachineMetadata.ARCHITECTURE_BITS, props.isU64() ? "64" : "32"); metadata.set(MachineMetadata.ENDIAN, props.getEndianness() == 1 ? MachineMetadata.Endian.LITTLE.getName() : MachineMetadata.Endian.BIG.getName()); // The following SAS Metadata fields are currently ignored: // compressionMethod // sessionEncoding // fileType // osName - // osType - // mixPageRowCount // headerLength // pageLength // rowLength // Process the column metadata // TODO Find keys to record the format and the type for (Column c : sas.getColumns()) { String name = c.getLabel(); if (name == null || name.isEmpty()) { name = c.getName(); } metadata.add(Database.COLUMN_NAME, name); } // Output file contents as a table xhtml.element("h1", props.getName()); xhtml.startElement("table"); xhtml.newline(); // Do the column headings xhtml.startElement("tr"); for (Column c : sas.getColumns()) { String label = c.getLabel(); if (label == null || label.isEmpty()) { label = c.getName(); } xhtml.startElement("th", "title", c.getName()); xhtml.characters(label); xhtml.endElement("th"); } xhtml.endElement("tr"); xhtml.newline(); //TODO: initialize this on the first row and then apply Map<Integer, Format> formatMap = new HashMap<>(); // Process each row in turn Object[] row = null; while ((row = sas.readNext()) != null) { xhtml.startElement("tr"); for (String val : DataWriterUtil.getRowValues(sas.getColumns(), row, formatMap)) { // Use explicit start/end, rather than element, to // ensure that empty cells still get output xhtml.startElement("td"); xhtml.characters(val); xhtml.endElement("td"); } xhtml.endElement("tr"); xhtml.newline(); } // Finish xhtml.endElement("table"); xhtml.endDocument(); }
@Test public void testSimpleFile() throws Exception { ContentHandler handler = new BodyContentHandler(); Metadata metadata = new Metadata(); try (InputStream stream = getResourceAsStream("/test-documents/testSAS.sas7bdat")) { parser.parse(stream, handler, metadata, new ParseContext()); } assertEquals("application/x-sas-data", metadata.get(Metadata.CONTENT_TYPE)); assertEquals("TESTING", metadata.get(TikaCoreProperties.TITLE)); // Mon Jan 30 07:31:47 GMT 2017 assertEquals("2017-01-30T07:31:47Z", metadata.get(TikaCoreProperties.CREATED)); assertEquals("2017-01-30T07:31:47Z", metadata.get(TikaCoreProperties.MODIFIED)); assertEquals("1", metadata.get(PagedText.N_PAGES)); assertEquals("2", metadata.get(Database.COLUMN_COUNT)); assertEquals("11", metadata.get(Database.ROW_COUNT)); assertEquals("windows-1252", metadata.get(HttpHeaders.CONTENT_ENCODING)); assertEquals("W32_7PRO", metadata.get(OfficeOpenXMLExtended.APPLICATION)); assertEquals("9.0301M2", metadata.get(OfficeOpenXMLExtended.APP_VERSION)); assertEquals("32", metadata.get(MachineMetadata.ARCHITECTURE_BITS)); assertEquals("Little", metadata.get(MachineMetadata.ENDIAN)); assertEquals(Arrays.asList("recnum", "label"), Arrays.asList(metadata.getValues(Database.COLUMN_NAME))); String content = handler.toString(); assertContains("TESTING", content); assertContains("\t3\t", content); assertContains("\t10\t", content); assertContains("\tThis is row", content); assertContains(" of ", content); }
static CorsLogic forAllowedOrigins(Collection<String> allowedOrigins) { Set<String> allowedOriginsVerbatim = new HashSet<>(); List<Pattern> allowedOriginPatterns = new ArrayList<>(); for (String allowedOrigin : allowedOrigins) { if (allowedOrigin.isBlank()) continue; if (allowedOrigin.length() > 0) { if ("*".equals(allowedOrigin)) return new CorsLogic(true, Set.of(), List.of()); else if (allowedOrigin.contains("*")) allowedOriginPatterns.add(Pattern.compile(allowedOrigin.replace(".", "\\.").replace("*", ".*"))); else allowedOriginsVerbatim.add(allowedOrigin); } } return new CorsLogic(false, allowedOriginsVerbatim, allowedOriginPatterns); }
@Test void wildcard_matches_everything() { CorsLogic logic = CorsLogic.forAllowedOrigins(List.of("*")); assertMatches(logic, true, "http://any.origin", "https://any.origin", "http://any.origin:8080"); }
public static void batchCheckInstanceIsLegal(List<Instance> instances) throws NacosException { Set<Instance> newInstanceSet = new HashSet<>(instances); for (Instance instance : newInstanceSet) { checkInstanceIsEphemeral(instance); checkInstanceIsLegal(instance); } }
@Test void testBatchCheckInstanceIsLegal() throws NacosException { // check invalid clusterName Instance instance = new Instance(); instance.setClusterName("cluster1,cluster2"); List<Instance> instanceList = new ArrayList<>(); instanceList.add(instance); try { NamingUtils.batchCheckInstanceIsLegal(instanceList); assertTrue(false); } catch (Exception e) { assertTrue(e instanceof NacosException); assertEquals( "Instance 'clusterName' should be characters with only 0-9a-zA-Z-. (current: cluster1,cluster2)", e.getMessage()); } instanceList.remove(instance); // TODO valid clusterName instance.setClusterName("cluster1"); instanceList.add(instance); NamingUtils.batchCheckInstanceIsLegal(instanceList); assertTrue(true); instanceList.remove(instance); // check heartBeatTimeout, heartBeatInterval, ipDeleteTimeout Map<String, String> meta = new HashMap<>(); meta.put(PreservedMetadataKeys.HEART_BEAT_TIMEOUT, "1"); meta.put(PreservedMetadataKeys.HEART_BEAT_INTERVAL, "2"); meta.put(PreservedMetadataKeys.IP_DELETE_TIMEOUT, "1"); instance.setMetadata(meta); instanceList.add(instance); try { NamingUtils.batchCheckInstanceIsLegal(instanceList); assertTrue(false); } catch (Exception e) { assertTrue(e instanceof NacosException); assertEquals("Instance 'heart beat interval' must less than 'heart beat timeout' and 'ip delete timeout'.", e.getMessage()); } instanceList.remove(instance); meta.put(PreservedMetadataKeys.HEART_BEAT_TIMEOUT, "3"); meta.put(PreservedMetadataKeys.HEART_BEAT_INTERVAL, "2"); meta.put(PreservedMetadataKeys.IP_DELETE_TIMEOUT, "3"); instance.setMetadata(meta); instanceList.add(instance); NamingUtils.batchCheckInstanceIsLegal(instanceList); assertTrue(true); }
@Override public Result invoke(Invoker<?> invoker, Invocation invocation) throws RpcException { URL url = invoker.getUrl(); boolean shouldAuth = url.getParameter(Constants.SERVICE_AUTH, false); if (shouldAuth) { Authenticator authenticator = applicationModel .getExtensionLoader(Authenticator.class) .getExtension(url.getParameter(Constants.AUTHENTICATOR, Constants.DEFAULT_AUTHENTICATOR)); try { authenticator.authenticate(invocation, url); } catch (Exception e) { return AsyncRpcResult.newDefaultAsyncResult(e, invocation); } } return invoker.invoke(invocation); }
@Test void testAuthFailedWhenNoAccessKeyPair() { URL url = URL.valueOf("dubbo://10.10.10.10:2181") .addParameter(CommonConstants.APPLICATION_KEY, "test-provider") .addParameter(Constants.SERVICE_AUTH, true); Invoker invoker = mock(Invoker.class); Invocation invocation = mock(RpcInvocation.class); when(invocation.getObjectAttachment(Constants.REQUEST_SIGNATURE_KEY)).thenReturn("dubbo"); when(invocation.getObjectAttachment(Constants.AK_KEY)).thenReturn("ak"); when(invocation.getObjectAttachment(CommonConstants.CONSUMER)).thenReturn("test-consumer"); when(invocation.getObjectAttachment(Constants.REQUEST_TIMESTAMP_KEY)).thenReturn(System.currentTimeMillis()); when(invoker.getUrl()).thenReturn(url); ProviderAuthFilter providerAuthFilter = new ProviderAuthFilter(ApplicationModel.defaultModel()); Result result = providerAuthFilter.invoke(invoker, invocation); assertTrue(result.hasException()); assertTrue(result.getException() instanceof RpcAuthenticationException); }
@Override protected Class<?> loadClass(final String name, final boolean resolve) throws ClassNotFoundException { synchronized (getClassLoadingLock(name)) { try { final Class<?> loadedClass = findLoadedClass(name); if (loadedClass != null) { return resolveIfNeeded(resolve, loadedClass); } if (isComponentFirstClass(name)) { return loadClassFromComponentFirst(name, resolve); } if (isOwnerFirstClass(name)) { return loadClassFromOwnerFirst(name, resolve); } // making this behavior configurable (component-only/component-first/owner-first) // would allow this class to subsume the FlinkUserCodeClassLoader (with an added // exception handler) return loadClassFromComponentOnly(name, resolve); } catch (ClassNotFoundException e) { // If we know the package of this class Optional<String> foundAssociatedModule = knownPackagePrefixesModuleAssociation.entrySet().stream() .filter(entry -> name.startsWith(entry.getKey())) .map(Map.Entry::getValue) .findFirst(); if (foundAssociatedModule.isPresent()) { throw new ClassNotFoundException( String.format( "Class '%s' not found. Perhaps you forgot to add the module '%s' to the classpath?", name, foundAssociatedModule.get()), e); } throw e; } } }
@Test void testComponentFirstClassNotFoundFallsBackToOwner() throws Exception { TestUrlClassLoader owner = new TestUrlClassLoader(NON_EXISTENT_CLASS_NAME, CLASS_RETURNED_BY_OWNER); final ComponentClassLoader componentClassLoader = new ComponentClassLoader( new URL[0], owner, new String[0], new String[] {NON_EXISTENT_CLASS_NAME}, Collections.emptyMap()); final Class<?> loadedClass = componentClassLoader.loadClass(NON_EXISTENT_CLASS_NAME); assertThat(loadedClass).isSameAs(CLASS_RETURNED_BY_OWNER); }
public static Range<Comparable<?>> safeIntersection(final Range<Comparable<?>> range, final Range<Comparable<?>> connectedRange) { try { return range.intersection(connectedRange); } catch (final ClassCastException ex) { Class<?> clazz = getRangeTargetNumericType(range, connectedRange); if (null == clazz) { throw ex; } Range<Comparable<?>> newRange = createTargetNumericTypeRange(range, clazz); Range<Comparable<?>> newConnectedRange = createTargetNumericTypeRange(connectedRange, clazz); return newRange.intersection(newConnectedRange); } }
@Test void assertSafeIntersectionForInteger() { Range<Comparable<?>> range = Range.closed(10, 2000); Range<Comparable<?>> connectedRange = Range.closed(1500, 4000); Range<Comparable<?>> newRange = SafeNumberOperationUtils.safeIntersection(range, connectedRange); assertThat(newRange.lowerEndpoint(), is(1500)); assertThat(newRange.lowerBoundType(), is(BoundType.CLOSED)); assertThat(newRange.upperEndpoint(), is(2000)); assertThat(newRange.upperBoundType(), is(BoundType.CLOSED)); }
@Override public List<ImportValidationFeedback> verifyRule( Object subject ) { List<ImportValidationFeedback> feedback = new ArrayList<>(); if ( !isEnabled() || !( subject instanceof JobMeta ) ) { return feedback; } JobMeta jobMeta = (JobMeta) subject; String description = jobMeta.getDescription(); if ( null != description && minLength <= description.length() ) { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.APPROVAL, "A description is present" ) ); } else { feedback.add( new ImportValidationFeedback( this, ImportValidationResultType.ERROR, "A description is not present or too short" ) ); } return feedback; }
@Test public void testVerifyRule_LongDescription_EnabledRule() { JobHasDescriptionImportRule importRule = getImportRule( 10, true ); JobMeta jobMeta = new JobMeta(); jobMeta.setDescription( "A very long description that has more characters than the minimum required to be a valid one!" ); List<ImportValidationFeedback> feedbackList = importRule.verifyRule( jobMeta ); assertNotNull( feedbackList ); assertFalse( feedbackList.isEmpty() ); ImportValidationFeedback feedback = feedbackList.get( 0 ); assertNotNull( feedback ); assertEquals( ImportValidationResultType.APPROVAL, feedback.getResultType() ); assertTrue( feedback.isApproval() ); }
public static long convert(String fromUnit, String toUnit, long fromValue) { if (toUnit == null || fromUnit == null) { throw new IllegalArgumentException("One or more arguments are null"); } if (fromUnit.equals(toUnit)) { return fromValue; } Converter fc = getConverter(fromUnit); Converter tc = getConverter(toUnit); long numerator = fc.numerator * tc.denominator; long denominator = fc.denominator * tc.numerator; long numeratorMultiplierLimit = Long.MAX_VALUE / numerator; if (numerator < denominator) { if (numeratorMultiplierLimit < fromValue) { String overflowMsg = "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit + "' will result in an overflow of Long"; throw new IllegalArgumentException(overflowMsg); } return (fromValue * numerator) / denominator; } if (numeratorMultiplierLimit > fromValue) { return (numerator * fromValue) / denominator; } long tmp = numerator / denominator; if ((Long.MAX_VALUE / tmp) < fromValue) { String overflowMsg = "Converting " + fromValue + " from '" + fromUnit + "' to '" + toUnit + "' will result in an overflow of Long"; throw new IllegalArgumentException(overflowMsg); } return fromValue * tmp; }
@Test void testOverflow() { long test = 5 * 1000L * 1000L * 1000L * 1000L * 1000L; try { UnitsConversionUtil.convert("P", "p", test); fail("this operation should result in an overflow"); } catch (IllegalArgumentException ie) { // do nothing } try { UnitsConversionUtil.convert("m", "p", Long.MAX_VALUE - 1); fail("this operation should result in an overflow"); } catch (IllegalArgumentException ie) { // do nothing } }
@Override public void commitJob(JobContext originalContext) throws IOException { JobContext jobContext = TezUtil.enrichContextWithVertexId(originalContext); JobConf jobConf = jobContext.getJobConf(); long startTime = System.currentTimeMillis(); LOG.info("Committing job {} has started", jobContext.getJobID()); Collection<String> outputs = HiveIcebergStorageHandler.outputTables(jobContext.getJobConf()); Collection<String> jobLocations = new ConcurrentLinkedQueue<>(); ExecutorService fileExecutor = fileExecutor(jobConf); ExecutorService tableExecutor = tableExecutor(jobConf, outputs.size()); try { // Commits the changes for the output tables in parallel Tasks.foreach(outputs) .throwFailureWhenFinished() .stopOnFailure() .executeWith(tableExecutor) .run( output -> { Table table = HiveIcebergStorageHandler.table(jobConf, output); if (table != null) { String catalogName = HiveIcebergStorageHandler.catalogName(jobConf, output); jobLocations.add( generateJobLocation(table.location(), jobConf, jobContext.getJobID())); commitTable( table.io(), fileExecutor, jobContext, output, table.location(), catalogName); } else { LOG.info( "CommitJob found no serialized table in config for table: {}. Skipping job commit.", output); } }); } finally { fileExecutor.shutdown(); if (tableExecutor != null) { tableExecutor.shutdown(); } } LOG.info( "Commit took {} ms for job {}", System.currentTimeMillis() - startTime, jobContext.getJobID()); cleanup(jobContext, jobLocations); }
@Test public void testSuccessfulMultipleTasksUnpartitionedWrite() throws IOException { HiveIcebergOutputCommitter committer = new HiveIcebergOutputCommitter(); Table table = table(temp.toFile().getPath(), false); JobConf conf = jobConf(table, 2); List<Record> expected = writeRecords(table.name(), 2, 0, true, false, conf); committer.commitJob(new JobContextImpl(conf, JOB_ID)); HiveIcebergTestUtils.validateFiles(table, conf, JOB_ID, 2); HiveIcebergTestUtils.validateData(table, expected, 0); }
@VisibleForTesting Map<ExecutionVertexID, Collection<ExecutionAttemptID>> findSlowTasks( final ExecutionGraph executionGraph) { final long currentTimeMillis = System.currentTimeMillis(); final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks = new HashMap<>(); final List<ExecutionJobVertex> jobVerticesToCheck = getJobVerticesToCheck(executionGraph); for (ExecutionJobVertex ejv : jobVerticesToCheck) { final ExecutionTimeWithInputBytes baseline = getBaseline(ejv, currentTimeMillis); for (ExecutionVertex ev : ejv.getTaskVertices()) { if (ev.getExecutionState().isTerminal()) { continue; } final List<ExecutionAttemptID> slowExecutions = findExecutionsExceedingBaseline( ev.getCurrentExecutions(), baseline, currentTimeMillis); if (!slowExecutions.isEmpty()) { slowTasks.put(ev.getID(), slowExecutions); } } } return slowTasks; }
@Test void testUnbalancedInput() throws Exception { final int parallelism = 3; final JobVertex jobVertex1 = createNoOpVertex(parallelism); final JobVertex jobVertex2 = createNoOpVertex(parallelism); jobVertex2.connectNewDataSetAsInput( jobVertex1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.PIPELINED); final ExecutionGraph executionGraph = createExecutionGraph(jobVertex1, jobVertex2); final ExecutionTimeBasedSlowTaskDetector slowTaskDetector = createSlowTaskDetector(0.3, 1, 0); final ExecutionVertex ev21 = executionGraph.getJobVertex(jobVertex2.getID()).getTaskVertices()[0]; ev21.setInputBytes(1024); final ExecutionVertex ev22 = executionGraph.getJobVertex(jobVertex2.getID()).getTaskVertices()[1]; ev22.setInputBytes(1_024_000); final ExecutionVertex ev23 = executionGraph.getJobVertex(jobVertex2.getID()).getTaskVertices()[2]; ev23.setInputBytes(4_096_000); Thread.sleep(1000); ev21.getCurrentExecutionAttempt().markFinished(); final Map<ExecutionVertexID, Collection<ExecutionAttemptID>> slowTasks = slowTaskDetector.findSlowTasks(executionGraph); // no task will be detected as slow task assertThat(slowTasks).isEmpty(); }
@Subscribe public void onChatMessage(ChatMessage event) { if (event.getType() != ChatMessageType.GAMEMESSAGE) { return; } String message = Text.removeTags(event.getMessage()); if (message.startsWith("Grand Exchange: Finished")) { notifier.notify(config.notifyOnOfferComplete(), message); } else if (message.startsWith("Grand Exchange:")) { notifier.notify(config.enableNotifications(), message); } }
@Test public void testNotifyPartial() { when(grandExchangeConfig.enableNotifications()).thenReturn(Notification.ON); ChatMessage chatMessage = new ChatMessage(); chatMessage.setType(ChatMessageType.GAMEMESSAGE); chatMessage.setMessage("<col=006060>Grand Exchange: Bought 200 / 80,000 x Acorn.</col>"); grandExchangePlugin.onChatMessage(chatMessage); verify(notifier).notify(any(Notification.class), anyString()); }
public int getCheckForDecommissioningNodesFailedRetrieved() { return numCheckForDecommissioningNodesFailedRetrieved.value(); }
@Test public void testCheckForDecommissioningNodesFailedRetrieved() { long totalBadBefore = metrics.getCheckForDecommissioningNodesFailedRetrieved(); badSubCluster.getCheckForDecommissioningNodesFailed(); Assert.assertEquals(totalBadBefore + 1, metrics.getCheckForDecommissioningNodesFailedRetrieved()); }
@Override public String getInstancingStatement() { BitMask normalizedMask = normalize(); return normalizedMask instanceof LongBitMask ? "new " + LongBitMask.class.getCanonicalName() + "(" + mask + "L)" : normalizedMask.getInstancingStatement(); }
@Test public void testGetInstancingStatement() { assertThat(new LongBitMask(0L).getInstancingStatement()).isEqualTo("org.drools.util.bitmask.EmptyBitMask.get()"); assertThat(new LongBitMask(1L).getInstancingStatement()).isEqualTo("org.drools.util.bitmask.EmptyButLastBitMask.get()"); assertThat(new LongBitMask(Long.MAX_VALUE).getInstancingStatement()).isEqualTo("org.drools.util.bitmask.AllSetButLastBitMask.get()"); assertThat(new LongBitMask(-1L).getInstancingStatement()).isEqualTo("org.drools.util.bitmask.AllSetBitMask.get()"); assertThat(new LongBitMask(2L).getInstancingStatement()).isEqualTo("new org.drools.util.bitmask.LongBitMask(2L)"); }
public T find(JsonParser jp) throws IOException { return find(jp, null); }
@Test public void testLookup() { EnumLookup<TestEnum> lookup = new EnumLookup<>(TestEnum.class, v->v.name.toCharArray()); char[] buffer = "zeroonetwothreefour".toCharArray(); Assert.assertSame(TestEnum.VAL_ONE, lookup.find(buffer, 4, 3)); Assert.assertSame(TestEnum.VAL_TWO, lookup.find(buffer, 7, 3)); Assert.assertSame(TestEnum.VAL_THREE, lookup.find(buffer, 10, 5)); }
public static List<String> shellSplit(CharSequence string) { List<String> tokens = new ArrayList<>(); if ( string == null ) { return tokens; } boolean escaping = false; char quoteChar = ' '; boolean quoting = false; StringBuilder current = new StringBuilder() ; for (int i = 0; i<string.length(); i++) { char c = string.charAt(i); if (escaping) { current.append(c); escaping = false; } else if (c == '\\' && !(quoting && quoteChar == '\'')) { escaping = true; } else if (quoting && c == quoteChar) { quoting = false; } else if (!quoting && (c == '\'' || c == '"')) { quoting = true; quoteChar = c; } else if (!quoting && Character.isWhitespace(c)) { if (current.length() > 0) { tokens.add(current.toString()); current = new StringBuilder(); } } else { current.append(c); } } if (current.length() > 0) { tokens.add(current.toString()); } return tokens; }
@Test public void blankYieldsEmptyArgs() { assertTrue(StringUtils.shellSplit("").isEmpty()); }
public static boolean compare(Object source, Object target) { if (source == target) { return true; } if (source == null || target == null) { return false; } if (source.equals(target)) { return true; } if (source instanceof Boolean) { return compare(((Boolean) source), target); } if (source instanceof Number) { return compare(((Number) source), target); } if (target instanceof Number) { return compare(((Number) target), source); } if (source instanceof Date) { return compare(((Date) source), target); } if (target instanceof Date) { return compare(((Date) target), source); } if (source instanceof String) { return compare(((String) source), target); } if (target instanceof String) { return compare(((String) target), source); } if (source instanceof Collection) { return compare(((Collection) source), target); } if (target instanceof Collection) { return compare(((Collection) target), source); } if (source instanceof Map) { return compare(((Map) source), target); } if (target instanceof Map) { return compare(((Map) target), source); } if (source.getClass().isEnum() || source instanceof Enum) { return compare(((Enum) source), target); } if (target.getClass().isEnum() || source instanceof Enum) { return compare(((Enum) target), source); } if (source.getClass().isArray()) { return compare(((Object[]) source), target); } if (target.getClass().isArray()) { return compare(((Object[]) target), source); } return compare(FastBeanCopier.copy(source, HashMap.class), FastBeanCopier.copy(target, HashMap.class)); }
@Test public void mapTest() { Date date = new Date(); Assert.assertTrue(CompareUtils.compare(Collections.singletonMap("test", "123"), Collections.singletonMap("test", 123))); Assert.assertFalse(CompareUtils.compare(Collections.singletonMap("test", "123"), Collections.emptyMap())); Assert.assertTrue(CompareUtils.compare(Collections.singletonMap("test", "123"), new TestBean("123"))); Assert.assertTrue(CompareUtils.compare(Collections.singletonMap("test", date), new TestBean(DateFormatter.toString(date, "yyyy-MM-dd")))); }
@Override public GoViewDataRespVO getDataBySQL(String sql) { // 1. 执行查询 SqlRowSet sqlRowSet = jdbcTemplate.queryForRowSet(sql); // 2. 构建返回结果 GoViewDataRespVO respVO = new GoViewDataRespVO(); // 2.1 解析元数据 SqlRowSetMetaData metaData = sqlRowSet.getMetaData(); String[] columnNames = metaData.getColumnNames(); respVO.setDimensions(Arrays.asList(columnNames)); // 2.2 解析数据明细 respVO.setSource(new LinkedList<>()); // 由于数据量不确认,使用 LinkedList 虽然内存占用大一点,但是不存在扩容复制的问题 while (sqlRowSet.next()) { Map<String, Object> data = Maps.newHashMapWithExpectedSize(columnNames.length); for (String columnName : columnNames) { data.put(columnName, sqlRowSet.getObject(columnName)); } respVO.getSource().add(data); } return respVO; }
@Test public void testGetDataBySQL() { // 准备参数 String sql = "SELECT id, name FROM system_users"; // mock 方法 SqlRowSet sqlRowSet = mock(SqlRowSet.class); when(jdbcTemplate.queryForRowSet(eq(sql))).thenReturn(sqlRowSet); // mock 元数据 SqlRowSetMetaData metaData = mock(SqlRowSetMetaData.class); when(sqlRowSet.getMetaData()).thenReturn(metaData); when(metaData.getColumnNames()).thenReturn(new String[]{"id", "name"}); // mock 数据明细 when(sqlRowSet.next()).thenReturn(true).thenReturn(true).thenReturn(false); when(sqlRowSet.getObject("id")).thenReturn(1L).thenReturn(2L); when(sqlRowSet.getObject("name")).thenReturn("芋道源码").thenReturn("芋道"); // 调用 GoViewDataRespVO dataBySQL = goViewDataService.getDataBySQL(sql); // 断言 assertEquals(Arrays.asList("id", "name"), dataBySQL.getDimensions()); assertEquals(2, dataBySQL.getDimensions().size()); assertEquals(2, dataBySQL.getSource().get(0).size()); assertEquals(1L, dataBySQL.getSource().get(0).get("id")); assertEquals("芋道源码", dataBySQL.getSource().get(0).get("name")); assertEquals(2, dataBySQL.getSource().get(1).size()); assertEquals(2L, dataBySQL.getSource().get(1).get("id")); assertEquals("芋道", dataBySQL.getSource().get(1).get("name")); }
protected static boolean isDuplicate( List<? extends SharedObjectInterface> objects, SharedObjectInterface object ) { String newName = object.getName(); for ( SharedObjectInterface soi : objects ) { if ( soi.getName().equalsIgnoreCase( newName ) ) { return true; } } return false; }
@Test public void isDuplicate_NotSame() { assertFalse( isDuplicate( singletonList( mockObject( "qwerty" ) ), mockObject( "asdfg" ) ) ); }
public Tuple2<T1, T0> swap() { return new Tuple2<T1, T0>(f1, f0); }
@Test void testSwapValues() { Tuple2<String, Integer> toSwap = new Tuple2<>("Test case", 25); Tuple2<Integer, String> swapped = toSwap.swap(); assertThat(toSwap.f1).isEqualTo(swapped.f0); assertThat(toSwap.f0).isEqualTo(swapped.f1); }
public List<GitLabBranch> getBranches(String gitlabUrl, String pat, Long gitlabProjectId) { String url = format("%s/projects/%s/repository/branches", gitlabUrl, gitlabProjectId); LOG.debug("get branches : [{}]", url); Request request = new Request.Builder() .addHeader(PRIVATE_TOKEN, pat) .get() .url(url) .build(); try (Response response = client.newCall(request).execute()) { checkResponseIsSuccessful(response); String body = response.body().string(); LOG.trace("loading branches payload result : [{}]", body); return Arrays.asList(new GsonBuilder().create().fromJson(body, GitLabBranch[].class)); } catch (JsonSyntaxException e) { throw new IllegalArgumentException("Could not parse GitLab answer to retrieve project branches. Got a non-json payload as result."); } catch (IOException e) { logException(url, e); throw new IllegalStateException(e.getMessage(), e); } }
@Test public void get_branches() { MockResponse response = new MockResponse() .setResponseCode(200) .setBody("[{\n" + " \"name\": \"main\",\n" + " \"default\": true\n" + "},{\n" + " \"name\": \"other\",\n" + " \"default\": false\n" + "}]"); server.enqueue(response); assertThat(underTest.getBranches(gitlabUrl, "pat", 12345L)) .extracting(GitLabBranch::getName, GitLabBranch::isDefault) .containsExactly( tuple("main", true), tuple("other", false) ); }
public static void checkRowIDPartitionComponent(List<HiveColumnHandle> columns, Optional<byte[]> rowIdPartitionComponent) { boolean supplyRowIDs = columns.stream().anyMatch(column -> HiveColumnHandle.isRowIdColumnHandle(column)); if (supplyRowIDs) { checkArgument(rowIdPartitionComponent.isPresent(), "rowIDPartitionComponent required when supplying row IDs"); } }
@Test(expectedExceptions = IllegalArgumentException.class) public void testCheckRowIDPartitionComponent_Missing() { HiveColumnHandle handle = HiveColumnHandle.rowIdColumnHandle(); List<HiveColumnHandle> columns = ImmutableList.of(handle); checkRowIDPartitionComponent(columns, Optional.empty()); }
static MapKeyLoader.Role assignRole(boolean isPartitionOwner, boolean isMapNamePartition, boolean isMapNamePartitionFirstReplica) { if (isMapNamePartition) { if (isPartitionOwner) { // map-name partition owner is the SENDER return MapKeyLoader.Role.SENDER; } else { if (isMapNamePartitionFirstReplica) { // first replica of the map-name partition is the SENDER_BACKUP return MapKeyLoader.Role.SENDER_BACKUP; } else { // other replicas of the map-name partition do not have a role return MapKeyLoader.Role.NONE; } } } else { // ordinary partition owners are RECEIVERs, otherwise no role return isPartitionOwner ? MapKeyLoader.Role.RECEIVER : MapKeyLoader.Role.NONE; } }
@Test public void assignRole_RECEIVER_insignificantFlagFalse() { boolean isPartitionOwner = true; boolean isMapNamePartition = false; boolean insignificant = false; Role role = MapKeyLoaderUtil.assignRole(isPartitionOwner, isMapNamePartition, insignificant); assertEquals(RECEIVER, role); }
@Override public URL getResource(String name) { ClassLoadingStrategy loadingStrategy = getClassLoadingStrategy(name); log.trace("Received request to load resource '{}'", name); for (ClassLoadingStrategy.Source classLoadingSource : loadingStrategy.getSources()) { URL url = null; switch (classLoadingSource) { case APPLICATION: url = super.getResource(name); break; case PLUGIN: url = findResource(name); break; case DEPENDENCIES: url = findResourceFromDependencies(name); break; } if (url != null) { log.trace("Found resource '{}' in {} classpath", name, classLoadingSource); return url; } else { log.trace("Couldn't find resource '{}' in {}", name, classLoadingSource); } } return null; }
@Test void parentLastGetResourceNonExisting() { assertNull(parentLastPluginClassLoader.getResource("META-INF/non-existing-file")); }
@Override protected String buildUndoSQL() { TableRecords afterImage = sqlUndoLog.getAfterImage(); List<Row> afterImageRows = afterImage.getRows(); if (CollectionUtils.isEmpty(afterImageRows)) { throw new ShouldNeverHappenException("Invalid UNDO LOG"); } return generateDeleteSql(afterImageRows,afterImage); }
@Test public void buildUndoSQL() { String sql = executor.buildUndoSQL().toLowerCase(); Assertions.assertNotNull(sql); Assertions.assertTrue(sql.contains("delete")); Assertions.assertTrue(sql.contains("id")); }
public Optional<DateTime> nextTime(JobTriggerDto trigger) { return nextTime(trigger, trigger.nextTime()); }
@Test public void cronNextTimeAfter() { final JobTriggerDto trigger = JobTriggerDto.builderWithClock(clock) .jobDefinitionId("abc-123") .jobDefinitionType("event-processor-execution-v1") .schedule(CronJobSchedule.builder() .cronExpression("0 0 * ? * * *") .timezone("UTC") .build()) .build(); DateTime date = DateTime.parse("2024-01-01T0:00:00.000Z"); DateTime nextTime = strategies.nextTime(trigger, date).orElse(null); assertThat(nextTime) .isNotNull() .satisfies(dateTime -> { assertThat(dateTime.getZone()).isEqualTo(DateTimeZone.forID("UTC")); assertThat(dateTime.toString(DATE_FORMAT)).isEqualTo("01/01/2024 01:00:00"); }); date = DateTime.parse("2024-02-01T0:00:00.000Z"); nextTime = strategies.nextTime(trigger, date).orElse(null); assertThat(nextTime) .isNotNull() .satisfies(dateTime -> { assertThat(dateTime.getZone()).isEqualTo(DateTimeZone.forID("UTC")); assertThat(dateTime.toString(DATE_FORMAT)).isEqualTo("01/02/2024 01:00:00"); }); }
@Override public String getFilename() { if (isDirectory()) { Path parentPath = new Path(uri).getParent(); return parentPath == null ? null : parentPath.getName(); } return new Path(uri).getName(); }
@Test public void testGetFilename() { assertNull(toResourceIdentifier("").getFilename()); assertEquals("abc", toResourceIdentifier("/dirA/abc").getFilename()); assertEquals("abc", toResourceIdentifier("/dirA/abc/").getFilename()); assertEquals("xyz.txt", toResourceIdentifier("/dirA/abc/xyz.txt").getFilename()); }
@Override public boolean tryClaim(OffsetByteProgress position) { if (!rangeTracker.tryClaim(position.lastOffset().value())) { return false; } lastClaimed = position.lastOffset().value(); bytes += position.batchBytes(); return true; }
@Test public void cannotClaimBackwards() { assertTrue(tracker.tryClaim(OffsetByteProgress.of(Offset.of(1_000), MIN_BYTES))); assertThrows( IllegalArgumentException.class, () -> tracker.tryClaim(OffsetByteProgress.of(Offset.of(1_000), MIN_BYTES))); assertThrows( IllegalArgumentException.class, () -> tracker.tryClaim(OffsetByteProgress.of(Offset.of(999), MIN_BYTES))); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) throws IOException { String relayState = request.getParameter(RELAY_STATE_PARAMETER); if (isSamlValidation(relayState)) { URI redirectionEndpointUrl = URI.create(server.getContextPath() + "/") .resolve(SAML_VALIDATION_CONTROLLER_CONTEXT + "/") .resolve(SAML_VALIDATION_KEY); String samlResponse = StringEscapeUtils.escapeHtml3(request.getParameter(SAML_RESPONSE_PARAMETER)); String csrfToken = getCsrfTokenFromRelayState(relayState); String nonce = SamlValidationCspHeaders.addCspHeadersWithNonceToResponse(response); String template = StringUtils.replaceEachRepeatedly(redirectionPageTemplate, new String[]{"%NONCE%", "%WEB_CONTEXT%", "%VALIDATION_URL%", "%SAML_RESPONSE%", "%CSRF_TOKEN%"}, new String[]{nonce, server.getContextPath(), redirectionEndpointUrl.toString(), samlResponse, csrfToken}); response.setContentType("text/html"); response.getWriter().print(template); return; } chain.doFilter(request, response); }
@Test public void do_filter_validation_relay_state_with_malicious_csrfToken() throws IOException { HttpRequest servletRequest = mock(HttpRequest.class); HttpResponse servletResponse = mock(HttpResponse.class); FilterChain filterChain = mock(FilterChain.class); String validSample = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; when(servletRequest.getParameter(matches("SAMLResponse"))).thenReturn(validSample); String maliciousToken = "test\"</input><script>*Malicious Token*</script><input value=\""; when(servletRequest.getParameter(matches("RelayState"))).thenReturn("validation-query/" + maliciousToken); PrintWriter pw = mock(PrintWriter.class); when(servletResponse.getWriter()).thenReturn(pw); underTest.doFilter(servletRequest, servletResponse, filterChain); ArgumentCaptor<String> htmlProduced = ArgumentCaptor.forClass(String.class); verify(pw).print(htmlProduced.capture()); CSP_HEADERS.forEach(h -> verify(servletResponse).setHeader(eq(h), anyString())); assertThat(htmlProduced.getValue()).contains(validSample); assertThat(htmlProduced.getValue()).doesNotContain("<script>/*Malicious Token*/</script>"); }
public Marshaller createMarshaller(Class<?> clazz) throws JAXBException { Marshaller marshaller = getContext(clazz).createMarshaller(); setMarshallerProperties(marshaller); if (marshallerEventHandler != null) { marshaller.setEventHandler(marshallerEventHandler); } marshaller.setSchema(marshallerSchema); return marshaller; }
@Test void buildsMarshallerWithJAXBEncodingProperty() throws Exception { JAXBContextFactory factory = new JAXBContextFactory.Builder().withMarshallerJAXBEncoding("UTF-16").build(); Marshaller marshaller = factory.createMarshaller(Object.class); assertThat(marshaller.getProperty(Marshaller.JAXB_ENCODING)).isEqualTo("UTF-16"); }
protected List<ProfiledTraceSegments> buildProfiledSegmentsList(List<SegmentRecord> segmentRecords, List<String> profiledSegmentIdList) { final Map<String, ProfiledTraceSegments> segments = segmentRecords.stream().map(s -> { try { return Tuple.of(s, SegmentObject.parseFrom(s.getDataBinary())); } catch (InvalidProtocolBufferException e) { log.warn("parsing segment data error", e); return null; } }).filter(java.util.Objects::nonNull).filter(s -> CollectionUtils.isNotEmpty(s._2.getSpansList())).collect(Collectors.toMap( tuple -> tuple._1.getSegmentId(), tuple -> { final IDManager.ServiceInstanceID.InstanceIDDefinition serviceInstance = IDManager.ServiceInstanceID.analysisId(tuple._1.getServiceInstanceId()); final ProfiledTraceSegments seg = new ProfiledTraceSegments(); final boolean profiled = profiledSegmentIdList.contains(tuple._1.getSegmentId()); seg.setTraceId(tuple._1.getTraceId()); seg.setInstanceId(tuple._1.getServiceInstanceId()); seg.setInstanceName(serviceInstance.getName()); seg.getEndpointNames().add(IDManager.EndpointID.analysisId(tuple._1.getEndpointId()).getEndpointName()); seg.setDuration(tuple._1.getLatency()); seg.setStart(String.valueOf(tuple._1.getStartTime())); seg.getSpans().addAll(buildProfiledSpanList(tuple._2, profiled)); seg.setContainsProfiled(profiled); return seg; } )); // trying to find parent final ArrayList<ProfiledTraceSegments> results = new ArrayList<>(); final Iterator<Map.Entry<String, ProfiledTraceSegments>> entryIterator = segments.entrySet().iterator(); final Set<ProfiledSpan> mergedSpans = new HashSet<>(); while (entryIterator.hasNext()) { // keep segment if no ref final Map.Entry<String, ProfiledTraceSegments> current = entryIterator.next(); boolean spanBeenAdded = false; for (ProfiledSpan span : current.getValue().getSpans()) { if (mergedSpans.contains(span)) { continue; } if (CollectionUtils.isEmpty(span.getRefs())) { continue; } // keep segment if ref type is not same process(analyze only match with the same process) final Ref ref = span.getRefs().get(0); if (RefType.CROSS_PROCESS.equals(ref.getType())) { results.add(current.getValue()); spanBeenAdded = true; break; } // find parent segment if exist final ProfiledTraceSegments parentSegments = segments.get(ref.getParentSegmentId()); if (parentSegments != null) { // append merged spans mergedSpans.addAll(current.getValue().getSpans()); // add current segments into parent parentSegments.merge(current.getValue()); // set parent segments(combined) as current segment current.setValue(parentSegments); spanBeenAdded = true; break; } } if (!spanBeenAdded) { results.add(current.getValue()); } } return results.stream().filter(ProfiledTraceSegments::isContainsProfiled).peek(this::removeAllCrossProcessRef).collect(Collectors.toList()); }
@Test public void testBuildProfiledSegmentsList() { // all segment in same process validate(Arrays.asList( buildRecord("1B", "2A", RefType.CrossThread), buildRecord("2A", "", null), buildRecord("3C", "1B", RefType.CrossThread) ), Arrays.asList( Arrays.asList("2A", "1B", "3C") )); // segment with different process validate(Arrays.asList( buildRecord("A", "", null), buildRecord("B", "A", RefType.CrossThread), buildRecord("C", "B", RefType.CrossProcess), buildRecord("D", "Z", RefType.CrossThread) ), Arrays.asList( Arrays.asList("A", "B"), Arrays.asList("C"), Arrays.asList("D") )); }
public static ServiceURI create(String uriStr) { requireNonNull(uriStr, "service uri string is null"); if (uriStr.contains("[") && uriStr.contains("]")) { // deal with ipv6 address Splitter splitter = Splitter.on(CharMatcher.anyOf(",;")); List<String> hosts = splitter.splitToList(uriStr); if (hosts.size() > 1) { // deal with multi ipv6 hosts String firstHost = hosts.get(0); String lastHost = hosts.get(hosts.size() - 1); boolean hasPath = lastHost.contains("/"); String path = hasPath ? lastHost.substring(lastHost.indexOf("/")) : ""; firstHost += path; URI uri = URI.create(firstHost); ServiceURI serviceURI = create(uri); List<String> multiHosts = new ArrayList<>(); multiHosts.add(serviceURI.getServiceHosts()[0]); multiHosts.addAll(hosts.subList(1, hosts.size())); multiHosts = multiHosts .stream() .map(host -> validateHostName(serviceURI.getServiceName(), serviceURI.getServiceInfos(), host)) .collect(Collectors.toList()); return new ServiceURI( serviceURI.getServiceName(), serviceURI.getServiceInfos(), serviceURI.getServiceUser(), multiHosts.toArray(new String[multiHosts.size()]), serviceURI.getServicePath(), serviceURI.getUri()); } } // a service uri first should be a valid java.net.URI URI uri = URI.create(uriStr); return create(uri); }
@Test(expectedExceptions = NullPointerException.class) public void testNullServiceUriInstance() { ServiceURI.create((URI) null); }
@Override public JsonElement next() throws JsonParseException { if (!hasNext()) { throw new NoSuchElementException(); } try { return Streams.parse(parser); } catch (StackOverflowError e) { throw new JsonParseException("Failed parsing JSON source to Json", e); } catch (OutOfMemoryError e) { throw new JsonParseException("Failed parsing JSON source to Json", e); } }
@Test public void testParseTwoStrings() { String actualOne = parser.next().getAsString(); assertThat(actualOne).isEqualTo("one"); String actualTwo = parser.next().getAsString(); assertThat(actualTwo).isEqualTo("two"); }
public V setValueInternal(V value, long ttlMillis) { assert value != null; V oldValue = this.value; this.value = value; this.updateTime = Clock.currentTimeMillis(); this.ttlMillis = ttlMillis; return oldValue; }
@Test public void testSetValueInternal() { assertEquals(0, replicatedRecord.getHits()); assertEquals("value", replicatedRecord.getValueInternal()); replicatedRecord.setValueInternal("newValue", 0); assertEquals(0, replicatedRecord.getHits()); assertEquals("newValue", replicatedRecord.getValueInternal()); }
@Override public NetconfDevice getNetconfDevice(DeviceId deviceInfo) { return netconfDeviceMap.get(deviceInfo); }
@Test public void testGetNetconfDeviceWithIPPort() { NetconfDevice fetchedDevice1 = ctrl.getNetconfDevice(IpAddress.valueOf(DEVICE_1_IP), DEVICE_1_PORT); assertEquals("Incorrect device fetched", fetchedDevice1.getDeviceInfo().ip(), device1.getDeviceInfo().ip()); NetconfDevice fetchedDevice2 = ctrl.getNetconfDevice(IpAddress.valueOf(DEVICE_2_IP), DEVICE_2_PORT); assertEquals("Incorrect device fetched", fetchedDevice2.getDeviceInfo().ip(), device2.getDeviceInfo().ip()); }
@VisibleForTesting void validateUsernameUnique(Long id, String username) { if (StrUtil.isBlank(username)) { return; } AdminUserDO user = userMapper.selectByUsername(username); if (user == null) { return; } // 如果 id 为空,说明不用比较是否为相同 id 的用户 if (id == null) { throw exception(USER_USERNAME_EXISTS); } if (!user.getId().equals(id)) { throw exception(USER_USERNAME_EXISTS); } }
@Test public void testValidateUsernameUnique_usernameExistsForUpdate() { // 准备参数 Long id = randomLongId(); String username = randomString(); // mock 数据 userMapper.insert(randomAdminUserDO(o -> o.setUsername(username))); // 调用,校验异常 assertServiceException(() -> userService.validateUsernameUnique(id, username), USER_USERNAME_EXISTS); }
public static Class<?> forName(String name, ClassLoader classLoader) throws ClassNotFoundException, LinkageError { Objects.requireNonNull(name, "Name must not be null"); Class<?> clazz = resolvePrimitiveClassName(name); if (clazz == null) { clazz = COMMON_CLASS_CACHE.get(name); } if (clazz != null) { return clazz; } // "java.lang.String[]" style arrays if (name.endsWith(ARRAY_SUFFIX)) { String elementClassName = name.substring(0, name.length() - ARRAY_SUFFIX.length()); Class<?> elementClass = forName(elementClassName, classLoader); return Array.newInstance(elementClass, 0).getClass(); } // "[Ljava.lang.String;" style arrays if (name.startsWith(NON_PRIMITIVE_ARRAY_PREFIX) && name.endsWith(SEMICOLON_SEPARATOR)) { String elementName = name.substring(NON_PRIMITIVE_ARRAY_PREFIX.length(), name.length() - 1); Class<?> elementClass = forName(elementName, classLoader); return Array.newInstance(elementClass, 0).getClass(); } // "[[I" or "[[Ljava.lang.String;" style arrays if (name.startsWith(INTERNAL_ARRAY_PREFIX)) { String elementName = name.substring(INTERNAL_ARRAY_PREFIX.length()); Class<?> elementClass = forName(elementName, classLoader); return Array.newInstance(elementClass, 0).getClass(); } ClassLoader clToUse = classLoader; if (clToUse == null) { clToUse = getDefaultClassLoader(); } try { return Class.forName(name, false, clToUse); } catch (ClassNotFoundException ex) { int lastDotIndex = name.lastIndexOf(PACKAGE_SEPARATOR); if (lastDotIndex != -1) { String nestedClassName = name.substring(0, lastDotIndex) + NESTED_CLASS_SEPARATOR + name.substring(lastDotIndex + 1); try { return Class.forName(nestedClassName, false, clToUse); } catch (ClassNotFoundException e) { // Swallow - let original exception get through } } throw ex; } }
@Test void testForNameNonExist() throws ClassNotFoundException { assertThrows(ClassNotFoundException.class, () -> { ClassUtils.forName("com.alibaba.nacos.common.NonExistClass", null); }); }
@Override public void info(String msg) { logger.info(msg); }
@Test public void testInfoWithException() { Logger mockLogger = mock(Logger.class); when(mockLogger.getName()).thenReturn("foo"); InternalLogger logger = new Slf4JLogger(mockLogger); logger.info("a", e); verify(mockLogger).getName(); verify(mockLogger).info("a", e); }
@Override public void setValue(String value) throws IOException { checkValue(value); // if there are export values/an Opt entry there is a different // approach to setting the value if (!getExportValues().isEmpty()) { updateByOption(value); } else { updateByValue(value); } applyChange(); }
@Test void setCheckboxGroupInvalidValue() throws IOException { PDCheckBox checkbox = (PDCheckBox) acrobatAcroForm.getField("CheckboxGroup"); // Set a value which doesn't match the radio button list assertThrows(IllegalArgumentException.class, () -> checkbox.setValue("InvalidValue")); }
@Override public void isEqualTo(@Nullable Object expected) { @SuppressWarnings("UndefinedEquals") // method contract requires testing iterables for equality boolean equal = Objects.equal(actual, expected); if (equal) { return; } // Fail but with a more descriptive message: if (actual instanceof List && expected instanceof List) { containsExactlyElementsIn((List<?>) expected).inOrder(); } else if ((actual instanceof Set && expected instanceof Set) || (actual instanceof Multiset && expected instanceof Multiset)) { containsExactlyElementsIn((Collection<?>) expected); } else { /* * TODO(b/18430105): Consider a special message if comparing incompatible collection types * (similar to what MultimapSubject has). */ super.isEqualTo(expected); } }
@Test public void isEqualToNotConsistentWithEquals() { TreeSet<String> actual = new TreeSet<>(CASE_INSENSITIVE_ORDER); TreeSet<String> expected = new TreeSet<>(CASE_INSENSITIVE_ORDER); actual.add("one"); expected.add("ONE"); /* * Our contract doesn't guarantee that the following test will pass. It *currently* does, * though, and if we change that behavior, we want this test to let us know. */ assertThat(actual).isEqualTo(expected); }
@Override public ReservationSubmissionResponse submitReservation( ReservationSubmissionRequest request) throws YarnException, IOException { // Check if reservation system is enabled checkReservationSystem(); ReservationSubmissionResponse response = recordFactory.newRecordInstance(ReservationSubmissionResponse.class); ReservationId reservationId = request.getReservationId(); // Validate the input Plan plan = rValidator.validateReservationSubmissionRequest(reservationSystem, request, reservationId); ReservationAllocation allocation = plan.getReservationById(reservationId); if (allocation != null) { boolean isNewDefinition = !allocation.getReservationDefinition().equals( request.getReservationDefinition()); if (isNewDefinition) { String message = "Reservation allocation already exists with the " + "reservation id " + reservationId.toString() + ", but a different" + " reservation definition was provided. Please try again with a " + "new reservation id, or consider updating the reservation instead."; throw RPCUtil.getRemoteException(message); } else { return response; } } // Check ACLs String queueName = request.getQueue(); String user = checkReservationACLs(queueName, AuditConstants.SUBMIT_RESERVATION_REQUEST, null); try { // Try to place the reservation using the agent boolean result = plan.getReservationAgent().createReservation(reservationId, user, plan, request.getReservationDefinition()); if (result) { // add the reservation id to valid ones maintained by reservation // system reservationSystem.setQueueForReservation(reservationId, queueName); // create the reservation synchronously if required refreshScheduler(queueName, request.getReservationDefinition(), reservationId.toString()); // return the reservation id } } catch (PlanningException e) { RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_RESERVATION_REQUEST, e.getMessage(), "ClientRMService", "Unable to create the reservation: " + reservationId); throw RPCUtil.getRemoteException(e); } RMAuditLogger.logSuccess(user, AuditConstants.SUBMIT_RESERVATION_REQUEST, "ClientRMService: " + reservationId); return response; }
@Test public void testCreateReservation() { resourceManager = setupResourceManager(); ClientRMService clientService = resourceManager.getClientRMService(); Clock clock = new UTCClock(); long arrival = clock.getTime(); long duration = 60000; long deadline = (long) (arrival + 1.05 * duration); ReservationSubmissionRequest sRequest = submitReservationTestHelper(clientService, arrival, deadline, duration); // Submit the reservation again with the same request and make sure it // passes. try { clientService.submitReservation(sRequest); } catch (Exception e) { Assert.fail(e.getMessage()); } // Submit the reservation with the same reservation id but different // reservation definition, and ensure YarnException is thrown. arrival = clock.getTime(); ReservationDefinition rDef = sRequest.getReservationDefinition(); rDef.setArrival(arrival + duration); sRequest.setReservationDefinition(rDef); try { clientService.submitReservation(sRequest); Assert.fail("Reservation submission should fail if a duplicate " + "reservation id is used, but the reservation definition has been " + "updated."); } catch (Exception e) { Assert.assertTrue(e instanceof YarnException); } }
public static long calculate(PhysicalRel rel, ExpressionEvalContext evalContext) { GcdCalculatorVisitor visitor = new GcdCalculatorVisitor(evalContext); visitor.go(rel); if (visitor.gcd == 0) { // there's no window aggr in the rel, return the value for joins, which is already capped at some reasonable value return visitor.maximumIntervalForJoins; } // if there's window aggr, cap it with the maximumIntervalForJoins return Math.min(visitor.gcd, visitor.maximumIntervalForJoins); }
@Test public void when_onlySlidingWindowInTree_then_returnWindowSize() { HazelcastTable streamTable = streamGeneratorTable("_stream", 10); List<QueryDataType> parameterTypes = Collections.singletonList(INT); final String sql = "SELECT MAX(v) FROM " + "TABLE(HOP(" + " (SELECT * FROM TABLE(IMPOSE_ORDER((SELECT * FROM TABLE(GENERATE_STREAM(10))), DESCRIPTOR(v), 1)))" + " , DESCRIPTOR(v) , 6, 3)) " + "GROUP BY window_start, v"; PhysicalRel optimizedPhysicalRel = optimizePhysical(sql, parameterTypes, streamTable).getPhysical(); assertPlan(optimizedPhysicalRel, plan( planRow(0, CalcPhysicalRel.class), planRow(1, SlidingWindowAggregatePhysicalRel.class), planRow(2, CalcPhysicalRel.class), planRow(3, FullScanPhysicalRel.class) )); assertThat(WatermarkThrottlingFrameSizeCalculator.calculate(optimizedPhysicalRel, MOCK_EEC)) .isEqualTo(3L); }
@Override public void receiveSmsStatus(String channelCode, String text) throws Throwable { // 获得渠道对应的 SmsClient 客户端 SmsClient smsClient = smsChannelService.getSmsClient(channelCode); Assert.notNull(smsClient, "短信客户端({}) 不存在", channelCode); // 解析内容 List<SmsReceiveRespDTO> receiveResults = smsClient.parseSmsReceiveStatus(text); if (CollUtil.isEmpty(receiveResults)) { return; } // 更新短信日志的接收结果. 因为量一般不大,所以先使用 for 循环更新 receiveResults.forEach(result -> smsLogService.updateSmsReceiveResult(result.getLogId(), result.getSuccess(), result.getReceiveTime(), result.getErrorCode(), result.getErrorMsg())); }
@Test public void testReceiveSmsStatus() throws Throwable { // 准备参数 String channelCode = randomString(); String text = randomString(); // mock SmsClientFactory 的方法 SmsClient smsClient = spy(SmsClient.class); when(smsChannelService.getSmsClient(eq(channelCode))).thenReturn(smsClient); // mock SmsClient 的方法 List<SmsReceiveRespDTO> receiveResults = randomPojoList(SmsReceiveRespDTO.class); // 调用 smsSendService.receiveSmsStatus(channelCode, text); // 断言 receiveResults.forEach(result -> smsLogService.updateSmsReceiveResult(eq(result.getLogId()), eq(result.getSuccess()), eq(result.getReceiveTime()), eq(result.getErrorCode()), eq(result.getErrorCode()))); }
public static MapBackedDMNContext of(Map<String, Object> ctx) { return new MapBackedDMNContext(ctx); }
@Test void contextWithEntriesAndMetadata() { MapBackedDMNContext ctx1 = MapBackedDMNContext.of(new HashMap<>(DEFAULT_ENTRIES), new HashMap<>(DEFAULT_METADATA)); testCloneAndAlter(ctx1, DEFAULT_ENTRIES, DEFAULT_METADATA); MapBackedDMNContext ctx2 = MapBackedDMNContext.of(new HashMap<>(DEFAULT_ENTRIES), new HashMap<>(DEFAULT_METADATA)); testPushAndPopScope(ctx2, DEFAULT_ENTRIES, DEFAULT_METADATA); }
public static Object parse(String element) throws PathSegment.PathSegmentSyntaxException { Queue<Token> tokens = tokenizeElement(element); Object result = parseElement(tokens); if (!tokens.isEmpty()) { throw new PathSegment.PathSegmentSyntaxException("tokens left over after parsing; first excess token: " + tokens.peek().toErrorString() ); } return result; }
@Test(dataProvider = "encoded") public void testEncodedDecoding(String encodedString, Object expectedObj) throws PathSegment.PathSegmentSyntaxException { Object actualObj = URIElementParser.parse(encodedString); Assert.assertEquals(actualObj, expectedObj); }
@Override public void close() { close(DEFAULT_CLOSE_DURATION); }
@Test public void shouldCloseWithoutBeingUsed() { SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(EMPTY_CONFIG, factory); // When closed before being used sharedAdmin.close(); // Then should not create or close admin verifyTopicAdminCreatesAndCloses(0); }
public ConvertedTime getConvertedTime(long duration) { Set<Seconds> keys = RULES.keySet(); for (Seconds seconds : keys) { if (duration <= seconds.getSeconds()) { return RULES.get(seconds).getConvertedTime(duration); } } return new TimeConverter.OverTwoYears().getConvertedTime(duration); }
@Test public void testShouldReportLessThanOneMinutesFor0To29Seconds() { assertEquals(TimeConverter.LESS_THAN_A_MINUTE_AGO, timeConverter.getConvertedTime(29)); }
public String join(final Stream<?> parts) { return join(parts.iterator()); }
@Test public void shouldHandleThreeItems() { assertThat(joiner.join(ImmutableList.of(1, 2, 3)), is("1, 2 or 3")); }
@Override public Flux<BooleanResponse<RenameCommand>> rename(Publisher<RenameCommand> commands) { return execute(commands, command -> { Assert.notNull(command.getKey(), "Key must not be null!"); Assert.notNull(command.getNewKey(), "New name must not be null!"); byte[] keyBuf = toByteArray(command.getKey()); byte[] newKeyBuf = toByteArray(command.getNewKey()); if (executorService.getConnectionManager().calcSlot(keyBuf) == executorService.getConnectionManager().calcSlot(newKeyBuf)) { return super.rename(commands); } return read(keyBuf, ByteArrayCodec.INSTANCE, RedisCommands.DUMP, keyBuf) .filter(Objects::nonNull) .zipWith( Mono.defer(() -> pTtl(command.getKey()) .filter(Objects::nonNull) .map(ttl -> Math.max(0, ttl)) .switchIfEmpty(Mono.just(0L)) ) ) .flatMap(valueAndTtl -> { return write(newKeyBuf, StringCodec.INSTANCE, RedisCommands.RESTORE, newKeyBuf, valueAndTtl.getT2(), valueAndTtl.getT1()); }) .thenReturn(new BooleanResponse<>(command, true)) .doOnSuccess((ignored) -> del(command.getKey())); }); }
@Test public void testRename_keyNotExist() { Integer originalSlot = getSlotForKey(originalKey); newKey = getNewKeyForSlot(new String(originalKey.array()), getTargetSlot(originalSlot)); if (sameSlot) { // This is a quirk of the implementation - since same-slot renames use the non-cluster version, // the result is a Redis error. This behavior matches other spring-data-redis implementations assertThatThrownBy(() -> connection.keyCommands().rename(originalKey, newKey).block()) .isInstanceOf(RedisSystemException.class); } else { Boolean response = connection.keyCommands().rename(originalKey, newKey).block(); assertThat(response).isTrue(); final ByteBuffer newKeyValue = connection.stringCommands().get(newKey).block(); assertThat(newKeyValue).isEqualTo(null); } }
@Override public HttpResponse send(HttpRequest httpRequest) throws IOException { return send(httpRequest, null); }
@Test public void send_whenHostnameAndIpInRequest_useHostnameAsProxy() throws IOException { InetAddress loopbackAddress = InetAddress.getLoopbackAddress(); String host = "host.com"; mockWebServer.setDispatcher(new HostnameTestDispatcher(host)); mockWebServer.start(loopbackAddress, 0); int port = mockWebServer.url("/").port(); NetworkService networkService = NetworkService.newBuilder() .setNetworkEndpoint( NetworkEndpointUtils.forIpHostnameAndPort( loopbackAddress.getHostAddress(), host, port)) .build(); // The request to host.com should be sent through mockWebServer's IP. HttpResponse response = httpClient.send( get(String.format("http://host.com:%d/test/get", port)).withEmptyHeaders().build(), networkService); assertThat(response.status()).isEqualTo(HttpStatus.OK); }
public String getPublishableConnectString(TransportConnector connector) throws Exception { return getPublishableConnectURI(connector).toString(); }
@Test public void testDefaultReturnsHost() throws Exception { assertTrue("contains bob", underTest.getPublishableConnectString(dummyTransportConnector).contains("bob")); }
@Override public double read() { return gaugeSource.read(); }
@Test public void whenDoubleProbe() { metricsRegistry.registerStaticProbe(this, "foo", MANDATORY, (DoubleProbeFunction) o -> 10); DoubleGauge gauge = metricsRegistry.newDoubleGauge("foo"); double actual = gauge.read(); assertEquals(10, actual, 0.1); }
public abstract List<Consumer<T>> getInputConsumers();
@Test(dataProvider = "sourceImpls") public void testInputConsumersGetter(PulsarSourceConfig pulsarSourceConfig) throws Exception { PulsarSource<GenericRecord> pulsarSource = getPulsarSource(pulsarSourceConfig); pulsarSource.open(new HashMap<>(), null); if (pulsarSourceConfig instanceof SingleConsumerPulsarSourceConfig) { SingleConsumerPulsarSourceConfig cfg = (SingleConsumerPulsarSourceConfig) pulsarSourceConfig; Assert.assertEquals(pulsarSource.getInputConsumers().size(), 1); return; } if (pulsarSourceConfig instanceof MultiConsumerPulsarSourceConfig) { MultiConsumerPulsarSourceConfig cfg = (MultiConsumerPulsarSourceConfig) pulsarSourceConfig; Assert.assertEquals(pulsarSource.getInputConsumers().size(), cfg.getTopicSchema().size()); return; } fail("Unknown config type"); }
T getFunction(final List<SqlArgument> arguments) { // first try to get the candidates without any implicit casting Optional<T> candidate = findMatchingCandidate(arguments, false); if (candidate.isPresent()) { return candidate.get(); } else if (!supportsImplicitCasts) { throw createNoMatchingFunctionException(arguments); } // if none were found (candidate isn't present) try again with implicit casting candidate = findMatchingCandidate(arguments, true); if (candidate.isPresent()) { return candidate.get(); } throw createNoMatchingFunctionException(arguments); }
@Test public void shouldChooseCorrectStruct() { // Given: givenFunctions( function(OTHER, -1, STRUCT2), function(EXPECTED, -1, STRUCT1) ); // When: final KsqlScalarFunction fun = udfIndex.getFunction(ImmutableList.of(SqlArgument.of(STRUCT1_ARG))); // Then: assertThat(fun.name(), equalTo(EXPECTED)); }
public static boolean contains(Object obj, Object element) { if (obj == null) { return false; } if (obj instanceof String) { if (element == null) { return false; } return ((String) obj).contains(element.toString()); } if (obj instanceof Collection) { return ((Collection<?>) obj).contains(element); } if (obj instanceof Map) { return ((Map<?, ?>) obj).containsValue(element); } if (obj instanceof Iterator) { final Iterator<?> iter = (Iterator<?>) obj; while (iter.hasNext()) { final Object o = iter.next(); if (equal(o, element)) { return true; } } return false; } if (obj instanceof Enumeration) { final Enumeration<?> enumeration = (Enumeration<?>) obj; while (enumeration.hasMoreElements()) { final Object o = enumeration.nextElement(); if (equal(o, element)) { return true; } } return false; } if (obj.getClass().isArray() == true) { final int len = Array.getLength(obj); for (int i = 0; i < len; i++) { final Object o = Array.get(obj, i); if (equal(o, element)) { return true; } } } return false; }
@Test public void containsTest() { int[] array = new int[]{1, 2, 3, 4, 5}; final boolean contains = ObjectUtil.contains(array, 1); assertTrue(contains); }
public static JsonMapper validateJsonMapper(JsonMapper jsonMapper) { try { final String serializedJob = jsonMapper.serialize(getJobForTesting()); testTimeFields(serializedJob); testUseFieldsNotMethods(serializedJob); testUsePolymorphism(serializedJob); testCanConvertBackToJob(jsonMapper, serializedJob); return jsonMapper; } catch (Exception e) { throw new IllegalArgumentException("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.", e); } }
@Test void testInvalidJsonbJsonMapper() { assertThatThrownBy(() -> validateJsonMapper(new InvalidJsonbJsonMapper(new JsonbConfig())) ) .isInstanceOf(IllegalArgumentException.class) .hasMessage("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way."); }
synchronized void recalculatePartitions() { delayedTasks.values().forEach(future -> future.cancel(false)); delayedTasks.clear(); partitionService.recalculatePartitions(serviceInfoProvider.getServiceInfo(), getOtherServers()); }
@Test public void restartNodeInTimeTest() throws Exception { startNode(childData); verify(partitionService, times(1)).recalculatePartitions(eq(currentInfo), eq(List.of(childInfo))); reset(partitionService); stopNode(childData); assertEquals(1, zkDiscoveryService.delayedTasks.size()); verify(partitionService, never()).recalculatePartitions(any(), any()); startNode(childData); verify(partitionService, never()).recalculatePartitions(any(), any()); Thread.sleep(RECALCULATE_DELAY * 2); verify(partitionService, never()).recalculatePartitions(any(), any()); assertTrue(zkDiscoveryService.delayedTasks.isEmpty()); }
@Override public PageData<AuditLog> findAuditLogsByTenantId(UUID tenantId, List<ActionType> actionTypes, TimePageLink pageLink) { return DaoUtil.toPageData( auditLogRepository.findByTenantId( tenantId, pageLink.getTextSearch(), pageLink.getStartTime(), pageLink.getEndTime(), actionTypes, DaoUtil.toPageable(pageLink))); }
@Test public void testFindAuditLogsByTenantId() { List<AuditLog> foundedAuditLogs = auditLogDao.findAuditLogsByTenantId(tenantId, List.of(ActionType.ADDED), new TimePageLink(40)).getData(); checkFoundedAuditLogsList(foundedAuditLogs, 30); }
public String getNamingAddr() { if (namingServerAddressCache != null) { return namingServerAddressCache; } Map<String, AtomicInteger> availableNamingserverMap = new HashMap<>(AVAILABLE_NAMINGSERVER_MAP); List<String> availableNamingserverList = new ArrayList<>(); for (Map.Entry<String, AtomicInteger> entry : availableNamingserverMap.entrySet()) { String namingServerAddress = entry.getKey(); Integer numberOfFailures = entry.getValue().get(); if (numberOfFailures < HEALTH_CHECK_THRESHOLD) { availableNamingserverList.add(namingServerAddress); } } if (availableNamingserverList.isEmpty()) { throw new NamingRegistryException("no available namingserver address!"); } else { namingServerAddressCache = availableNamingserverList.get(ThreadLocalRandom.current().nextInt(availableNamingserverList.size())); return namingServerAddressCache; } }
@Test public void getNamingAddrTest() { NamingserverRegistryServiceImpl namingserverRegistryService = NamingserverRegistryServiceImpl.getInstance(); String addr = namingserverRegistryService.getNamingAddr(); assertEquals(addr, "127.0.0.1:8080"); }
@Override public AbstractByteBuf encode(final Object object, final Map<String, String> context) throws SofaRpcException { if (object == null) { throw buildSerializeError("Unsupported null message!"); } ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader(); try { fury.setClassLoader(contextClassLoader); CustomSerializer customSerializer = getObjCustomSerializer(object); if (customSerializer != null) { return customSerializer.encodeObject(object, context); } else { MemoryBuffer writeBuffer = MemoryBuffer.newHeapBuffer(32); writeBuffer.writerIndex(0); fury.serialize(writeBuffer, object); return new ByteArrayWrapperByteBuf(writeBuffer.getBytes(0, writeBuffer.writerIndex())); } } catch (Exception e) { throw buildSerializeError(e.getMessage(), e); } finally { fury.clearClassLoader(contextClassLoader); } }
@Test public void testChecker() throws Exception { // default fury checkMode is STRICT WhiteClassHasBlackClass whiteClassNullBlackClass = new WhiteClassHasBlackClass(); NoneClassHasBlackClass noneClassNullBlackClass = new NoneClassHasBlackClass(); BlackListClass blackListClass = new BlackListClass(); WhiteClassHasBlackClass whiteClassHasBlackClass = new WhiteClassHasBlackClass(); whiteClassHasBlackClass.setBlackListClass(blackListClass); NoneClassHasBlackClass noneClassHasBlackClass = new NoneClassHasBlackClass(); noneClassHasBlackClass.setBlackListClass(blackListClass); try { serializer.encode(noneClassNullBlackClass, null); Assert.fail(); } catch (Exception e) { } try { serializer.encode(noneClassHasBlackClass, null); Assert.fail(); } catch (Exception e) { } try { serializer.encode(blackListClass, null); Assert.fail(); } catch (Exception e) { } serializer.encode(whiteClassNullBlackClass, null); try { serializer.encode(whiteClassHasBlackClass, null); Assert.fail(); } catch (Exception e) { } // test change fury checkMode to blacklist System.getProperties().put("sofa.rpc.codec.serialize.checkMode", "WARN"); FurySerializer furySerializer = new FurySerializer(); furySerializer.encode(noneClassNullBlackClass, null); try { furySerializer.encode(noneClassHasBlackClass, null); Assert.fail(); } catch (Exception e) { } try { //Not registered this class furySerializer.encode(blackListClass, null); Assert.fail(); } catch (Exception e) { } furySerializer.encode(whiteClassNullBlackClass, null); try { furySerializer.encode(whiteClassHasBlackClass, null); Assert.fail(); } catch (Exception e) { } System.getProperties().remove("sofa.rpc.codec.serialize.checkMode"); // test change fury checkMode to none System.getProperties().put("sofa.rpc.codec.serialize.checkMode", "DISABLE"); FurySerializer noneFurySerializer = new FurySerializer(); noneFurySerializer.encode(noneClassNullBlackClass, null); noneFurySerializer.encode(noneClassHasBlackClass, null); noneFurySerializer.encode(blackListClass, null); noneFurySerializer.encode(whiteClassNullBlackClass, null); noneFurySerializer.encode(whiteClassHasBlackClass, null); System.getProperties().remove("sofa.rpc.codec.serialize.checkMode"); }
@Override public KeyValueStore<K, V> build() { return new MeteredKeyValueStore<>( maybeWrapCaching(maybeWrapLogging(storeSupplier.get())), storeSupplier.metricsScope(), time, keySerde, valueSerde); }
@Test public void shouldNotHaveChangeLoggingStoreWhenDisabled() { setUp(); final KeyValueStore<String, String> store = builder.withLoggingDisabled().build(); final StateStore next = ((WrappedStateStore) store).wrapped(); assertThat(next, CoreMatchers.equalTo(inner)); }
@Override public AdminUserDO getUser(Long id) { return userMapper.selectById(id); }
@Test public void testGetUser() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 Long userId = dbUser.getId(); // 调用 AdminUserDO user = userService.getUser(userId); // 断言 assertPojoEquals(dbUser, user); }
public void add(double value) { this.slidingWindow.currentPane().getValue().add(value); }
@Test @RepeatedTest(100) void testMulti() { ExecutorService executorService = Executors.newFixedThreadPool(200); TimeWindowQuantile quantile = new TimeWindowQuantile(100, 10, 120); int index = 0; while (index < 100) { for (int i = 0; i < 100; i++) { int finalI = i; Assertions.assertDoesNotThrow(() -> quantile.add(finalI)); executorService.execute(() -> quantile.add(finalI)); } index++; // try { // Thread.sleep(1); // } catch (InterruptedException e) { // e.printStackTrace(); // } } executorService.shutdown(); }
@Override public Properties info(RedisClusterNode node) { Map<String, String> info = execute(node, RedisCommands.INFO_ALL); Properties result = new Properties(); for (Entry<String, String> entry : info.entrySet()) { result.setProperty(entry.getKey(), entry.getValue()); } return result; }
@Test public void testInfo() { RedisClusterNode master = getFirstMaster(); Properties info = connection.info(master); assertThat(info.size()).isGreaterThan(10); }
public static void rotate(File imageFile, int degree, File outFile) throws IORuntimeException { BufferedImage image = null; try { image = read(imageFile); rotate(image, degree, outFile); } finally { flush(image); } }
@Test @Disabled public void rotateTest() throws IOException { Image image = ImgUtil.rotate(ImageIO.read(FileUtil.file("e:/pic/366466.jpg")), 180); ImgUtil.write(image, FileUtil.file("e:/pic/result.png")); }
@Override public SqlServerConnectorEmbeddedDebeziumConfiguration getConfiguration() { return configuration; }
@Test void testIfConnectorEndpointCreatedWithConfig() throws Exception { final Map<String, Object> params = new HashMap<>(); params.put("offsetStorageFileName", "/offset_test_file"); params.put("databaseHostname", "localhost"); params.put("databaseUser", "dbz"); params.put("databasePassword", "pwd"); params.put("topicPrefix", "test"); params.put("databaseServerId", 1234); params.put("schemaHistoryInternalFileFilename", "/db_history_file_test"); final String remaining = "test_name"; final String uri = "debezium?name=test_name&offsetStorageFileName=/test&" + "databaseHostname=localhost&databaseServerId=1234&databaseUser=dbz&databasePassword=pwd&" + "topicPrefix=test&schemaHistoryInternalFileFilename=/test"; try (final DebeziumComponent debeziumComponent = new DebeziumSqlserverComponent(new DefaultCamelContext())) { debeziumComponent.start(); final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining, params); assertNotNull(debeziumEndpoint); // test for config final SqlServerConnectorEmbeddedDebeziumConfiguration configuration = (SqlServerConnectorEmbeddedDebeziumConfiguration) debeziumEndpoint.getConfiguration(); assertEquals("test_name", configuration.getName()); assertEquals("/offset_test_file", configuration.getOffsetStorageFileName()); assertEquals("localhost", configuration.getDatabaseHostname()); assertEquals("dbz", configuration.getDatabaseUser()); assertEquals("pwd", configuration.getDatabasePassword()); assertEquals("test", configuration.getTopicPrefix()); assertEquals("/db_history_file_test", configuration.getSchemaHistoryInternalFileFilename()); } }
public void publishCreated(Cache<K, V> cache, K key, V value) { publish(cache, EventType.CREATED, key, /* hasOldValue */ false, /* oldValue */ null, /* newValue */ value, /* quiet */ false); }
@Test public void publishCreated() { var dispatcher = new EventDispatcher<Integer, Integer>(Runnable::run); registerAll(dispatcher); dispatcher.publishCreated(cache, 1, 2); verify(createdListener, times(4)).onCreated(any()); assertThat(dispatcher.pending.get()).hasSize(2); assertThat(dispatcher.dispatchQueues.values().stream() .flatMap(queue -> queue.entrySet().stream())).isEmpty(); }