focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
Set<String> getAllDatabases(@Nullable String filter) throws TException { if (filter == null || filter.isEmpty()) { return new HashSet<>(client.get_all_databases()); } return client.get_all_databases() .stream() .filter(n -> n.matches(filter)) .collect(Collectors.toSet()); }
@Test public void getAllDatabases() throws Exception { Set<String> databases = client.getAllDatabases(null); MatcherAssert.assertThat(databases, Matchers.hasItem("default")); MatcherAssert.assertThat(databases, Matchers.hasItem(TEST_DATABASE.toLowerCase())); assertThat(client.getAllDatabases(TEST_DATABASE.toLowerCase()), Matchers.contains(TEST_DATABASE.toLowerCase())); }
public List<String> prefixSearch(String key) { List<String> value = new ArrayList<String>(); if (StringUtil.isEmpty(key)) { return value; } char k = key.charAt(0); int index; if (Character.isUpperCase(k)) { index = k - UPPERCASE_STAR; } else { index = k - LOWERCASE_STAR; } if (root.children != null && root.children[index] != null) { return query(root.children[index], value, key.substring(1), String.valueOf(k)); } return value; }
@Test public void prefixSearch8() throws Exception { TrieTree trieTree = new TrieTree(); List<String> ab = trieTree.prefixSearch(""); String result = ""; for (String s : ab) { result += s + ","; System.out.println(s); } Assert.assertTrue(result.equals("")); }
@Override public short readShort() throws EOFException { if (availableLong() < 2) { throw new EOFException(); } short result = _dataBuffer.getShort(_currentOffset); _currentOffset += 2; return result; }
@Test void testReadShort() throws EOFException { short read = _dataBufferPinotInputStream.readShort(); assertEquals(read, _byteBuffer.getShort(0)); assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), Short.BYTES); }
public static Configuration readSSLConfiguration(Configuration conf, Mode mode) { Configuration sslConf = new Configuration(false); sslConf.setBoolean(SSL_REQUIRE_CLIENT_CERT_KEY, conf.getBoolean( SSL_REQUIRE_CLIENT_CERT_KEY, SSL_REQUIRE_CLIENT_CERT_DEFAULT)); String sslConfResource; if (mode == Mode.CLIENT) { sslConfResource = conf.get(SSL_CLIENT_CONF_KEY, SSL_CLIENT_CONF_DEFAULT); } else { sslConfResource = conf.get(SSL_SERVER_CONF_KEY, SSL_SERVER_CONF_DEFAULT); } sslConf.addResource(sslConfResource); // Only fallback to input config if classpath SSL config does not load for // backward compatibility. if (sslConf.getResource(sslConfResource) == null) { LOG.debug("{} can't be loaded form classpath, fallback using SSL" + " config from input configuration.", sslConfResource); sslConf = conf; } return sslConf; }
@Test public void testSslConfClassPathFirst() throws Exception { // Generate a valid ssl-client.xml into classpath. // This will be the preferred approach. Configuration conf = createConfiguration(false, true); // Injecting fake ssl config into input conf. conf.addResource(FAKE_SSL_CONFIG); // Classpath SSL config will be preferred if both input conf and // the classpath SSL config exist for backward compatibility. Configuration sslConfLoaded = SSLFactory.readSSLConfiguration(conf, CLIENT); String clientTsLoc = sslConfLoaded.get(getClientTrustStoreKeyName()); assertNotEquals("trustStoreLocation", clientTsLoc); assertNotEquals(conf, sslConfLoaded); }
@Override public void close() { }
@Test public void shouldFail_LocaleNodeException() throws ExecutionException, InterruptedException { // Given: final AtomicReference<Set<KsqlNode>> nodes = new AtomicReference<>( ImmutableSet.of(ksqlNodeLocal, ksqlNodeRemote)); final PushRouting routing = new PushRouting(sqr -> nodes.get(), 50, true); // When: final PushConnectionsHandle handle = handlePushRouting(routing); context.runOnContext(v -> { remotePublisher.accept(REMOTE_ROW1); remotePublisher.accept(REMOTE_ROW2); localPublisher.error(new RuntimeException("Random local error")); }); final Throwable throwable = waitOnException(handle); handle.close(); // Then: assertThat(throwable.getMessage(), containsString("Random local error")); }
@SuppressWarnings("MethodLength") public static ChannelUri parse(final CharSequence cs) { int position = 0; final String prefix; if (startsWith(cs, 0, SPY_PREFIX)) { prefix = SPY_QUALIFIER; position = SPY_PREFIX.length(); } else { prefix = ""; } if (!startsWith(cs, position, AERON_PREFIX)) { throw new IllegalArgumentException("Aeron URIs must start with 'aeron:', found: " + cs); } else { position += AERON_PREFIX.length(); } final StringBuilder builder = new StringBuilder(); final Object2ObjectHashMap<String, String> params = new Object2ObjectHashMap<>(); String media = null; String key = null; State state = State.MEDIA; for (int i = position, length = cs.length(); i < length; i++) { final char c = cs.charAt(i); switch (state) { case MEDIA: switch (c) { case '?': media = builder.toString(); builder.setLength(0); state = State.PARAMS_KEY; break; case ':': case '|': case '=': throw new IllegalArgumentException( "encountered '" + c + "' within media definition at index " + i + " in " + cs); default: builder.append(c); } break; case PARAMS_KEY: if (c == '=') { if (0 == builder.length()) { throw new IllegalStateException("empty key not allowed at index " + i + " in " + cs); } key = builder.toString(); builder.setLength(0); state = State.PARAMS_VALUE; } else { if (c == '|') { throw new IllegalStateException("invalid end of key at index " + i + " in " + cs); } builder.append(c); } break; case PARAMS_VALUE: if (c == '|') { params.put(key, builder.toString()); builder.setLength(0); state = State.PARAMS_KEY; } else { builder.append(c); } break; default: throw new IllegalStateException("unexpected state=" + state + " in " + cs); } } switch (state) { case MEDIA: media = builder.toString(); validateMedia(media); break; case PARAMS_VALUE: params.put(key, builder.toString()); break; default: throw new IllegalStateException("no more input found, state=" + state + " in " + cs); } return new ChannelUri(prefix, media, params); }
@Test void equalsReturnsTrueWhenTheSameInstance() { final ChannelUri channelUri = ChannelUri.parse( "aeron:udp?endpoint=224.10.9.8|port=4567|interface=192.168.0.3|ttl=16"); assertEquals(channelUri, channelUri); }
public static FromEndOfWindow pastEndOfWindow() { return new FromEndOfWindow(); }
@Test public void testLateFiringsToString() { Trigger trigger = AfterWatermark.pastEndOfWindow().withLateFirings(StubTrigger.named("t1")); assertEquals("AfterWatermark.pastEndOfWindow().withLateFirings(t1)", trigger.toString()); }
public File getOrCreateDirectoryForTask(final TaskId taskId) { final File taskParentDir = getTaskDirectoryParentName(taskId); final File taskDir = new File(taskParentDir, StateManagerUtil.toTaskDirString(taskId)); if (hasPersistentStores) { if (!taskDir.exists()) { synchronized (taskDirCreationLock) { // to avoid a race condition, we need to check again if the directory does not exist: // otherwise, two threads might pass the outer `if` (and enter the `then` block), // one blocks on `synchronized` while the other creates the directory, // and the blocking one fails when trying to create it after it's unblocked if (!taskParentDir.exists() && !taskParentDir.mkdir()) { throw new ProcessorStateException( String.format("Parent [%s] of task directory [%s] doesn't exist and couldn't be created", taskParentDir.getPath(), taskDir.getPath())); } if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException( String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } } } else if (!taskDir.isDirectory()) { throw new ProcessorStateException( String.format("state directory [%s] can't be created as there is an existing file with the same name", taskDir.getPath())); } } return taskDir; }
@Test public void shouldThrowProcessorStateExceptionIfTestDirOccupied() throws IOException { final TaskId taskId = new TaskId(0, 0); // Replace taskDir to a regular file final File taskDir = new File(appDir, toTaskDirString(taskId)); Utils.delete(taskDir); Files.createFile(taskDir.toPath()); // Error: ProcessorStateException should be thrown. assertThrows(ProcessorStateException.class, () -> directory.getOrCreateDirectoryForTask(taskId)); }
@Override public Graph<Entity> resolveForInstallation(Entity entity, Map<String, ValueReference> parameters, Map<EntityDescriptor, Entity> entities) { if (entity instanceof EntityV1) { return resolveForInstallationV1((EntityV1) entity, parameters, entities); } else { throw new IllegalArgumentException("Unsupported entity version: " + entity.getClass()); } }
@Test @MongoDBFixtures("InputFacadeTest.json") public void resolveEntity() { final Map<String, Object> configuration = new HashMap<>(); configuration.put("override_source", null); configuration.put("recv_buffer_size", 262144); configuration.put("bind_address", "127.0.0.1"); configuration.put("port", 5555); configuration.put("number_worker_threads", 8); final Entity entity = EntityV1.builder() .id(ModelId.of("5acc84f84b900a4ff290d9a7")) .type(ModelTypes.INPUT_V1) .data(objectMapper.convertValue(InputEntity.create( ValueReference.of("Local Raw UDP"), ReferenceMapUtils.toReferenceMap(configuration), Collections.emptyMap(), ValueReference.of("org.graylog2.inputs.raw.udp.RawUDPInput"), ValueReference.of(false), Collections.emptyList()), JsonNode.class)) .build(); final Graph<Entity> graph = facade.resolveForInstallation(entity, Collections.emptyMap(), Collections.emptyMap()); assertThat(graph.nodes()).containsOnly(entity); }
public void sample() { // Loop attempts to find a measurement that is accurate to a given threshold long bestInitialCurrentNanoTime = 0, bestInitialNanoTime = 0; long bestNanoTimeWindow = Long.MAX_VALUE; final int maxMeasurementRetries = this.maxMeasurementRetries; final long measurementThresholdNs = this.measurementThresholdNs; for (int i = 0; i < maxMeasurementRetries; i++) { final long firstNanoTime = System.nanoTime(); final long initialCurrentTimeMillis = System.currentTimeMillis(); final long secondNanoTime = System.nanoTime(); final long nanoTimeWindow = secondNanoTime - firstNanoTime; if (nanoTimeWindow < measurementThresholdNs) { timeFields = new TimeFields( MILLISECONDS.toNanos(initialCurrentTimeMillis), (firstNanoTime + secondNanoTime) >> 1, true); return; } else if (nanoTimeWindow < bestNanoTimeWindow) { bestInitialCurrentNanoTime = MILLISECONDS.toNanos(initialCurrentTimeMillis); bestInitialNanoTime = (firstNanoTime + secondNanoTime) >> 1; bestNanoTimeWindow = nanoTimeWindow; } } // If we never get a time below the threshold, pick the narrowest window we've seen so far. timeFields = new TimeFields( bestInitialCurrentNanoTime, bestInitialNanoTime, false); }
@Test void shouldResampleSaneEpochTimestamp() { final OffsetEpochNanoClock clock = new OffsetEpochNanoClock(); clock.sample(); assertSaneEpochTimestamp(clock); }
public synchronized TopologyDescription describe() { return internalTopologyBuilder.describe(); }
@Test public void shouldDescribeEmptyTopology() { assertThat(topology.describe(), equalTo(expectedDescription)); }
void snapshotConsensusModuleState( final long nextSessionId, final long nextServiceSessionId, final long logServiceSessionId, final int pendingMessageCapacity) { final int length = MessageHeaderEncoder.ENCODED_LENGTH + ConsensusModuleEncoder.BLOCK_LENGTH; idleStrategy.reset(); while (true) { final long result = publication.tryClaim(length, bufferClaim); if (result > 0) { consensusModuleEncoder .wrapAndApplyHeader(bufferClaim.buffer(), bufferClaim.offset(), messageHeaderEncoder) .nextSessionId(nextSessionId) .nextServiceSessionId(nextServiceSessionId) .logServiceSessionId(logServiceSessionId) .pendingMessageCapacity(pendingMessageCapacity); bufferClaim.commit(); break; } checkResultAndIdle(result); } }
@Test void snapshotConsensusModuleState() { final int offset = 32; final int length = MessageHeaderEncoder.ENCODED_LENGTH + ConsensusModuleEncoder.BLOCK_LENGTH; final long nextSessionId = 42; final long nextServiceSessionId = -4_000_000_001L; final long logServiceSessionId = -4_000_000_000L; final int pendingMessageCapacity = 4096; when(publication.tryClaim(eq(length), any())) .thenReturn(BACK_PRESSURED, ADMIN_ACTION) .thenAnswer(mockTryClaim(offset)); snapshotTaker.snapshotConsensusModuleState( nextSessionId, nextServiceSessionId, logServiceSessionId, pendingMessageCapacity); final InOrder inOrder = inOrder(idleStrategy, publication); inOrder.verify(idleStrategy).reset(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verify(idleStrategy).idle(); inOrder.verify(publication).tryClaim(anyInt(), any()); inOrder.verifyNoMoreInteractions(); consensusModuleDecoder.wrapAndApplyHeader(buffer, offset + HEADER_LENGTH, messageHeaderDecoder); assertEquals(nextSessionId, consensusModuleDecoder.nextSessionId()); assertEquals(nextServiceSessionId, consensusModuleDecoder.nextServiceSessionId()); assertEquals(logServiceSessionId, consensusModuleDecoder.logServiceSessionId()); assertEquals(pendingMessageCapacity, consensusModuleDecoder.pendingMessageCapacity()); }
public static <T> String join(T[] array, CharSequence conjunction) { return join(array, conjunction, null, null); }
@Test public void joinTest() { String[] array = {"aa", "bb", "cc", "dd"}; String join = ArrayUtil.join(array, ",", "[", "]"); assertEquals("[aa],[bb],[cc],[dd]", join); Object array2 = new String[]{"aa", "bb", "cc", "dd"}; String join2 = ArrayUtil.join(array2, ","); assertEquals("aa,bb,cc,dd", join2); }
public byte nshMdType() { return nshMdType; }
@Test public void testConstruction() { final NiciraNshMdType nshMdType = new NiciraNshMdType(mdType1); assertThat(nshMdType, is(notNullValue())); assertThat(nshMdType.nshMdType(), is(mdType1)); }
String getAuthConfigView(String pluginId) { return pluginRequestHelper.submitRequest(pluginId, REQUEST_GET_AUTH_CONFIG_VIEW, new DefaultPluginInteractionCallback<>() { @Override public String onSuccess(String responseBody, Map<String, String> responseHeaders, String resolvedExtensionVersion) { return getMessageConverter(resolvedExtensionVersion).getPluginConfigurationViewFromResponseBody(responseBody); } }); }
@Test void shouldTalkToPlugin_To_GetAuthConfigView() { String responseBody = "{ \"template\": \"<div>This is view snippet</div>\" }"; when(pluginManager.submitTo(eq(PLUGIN_ID), eq(AUTHORIZATION_EXTENSION), requestArgumentCaptor.capture())).thenReturn(new DefaultGoPluginApiResponse(SUCCESS_RESPONSE_CODE, responseBody)); String pluginConfigurationView = authorizationExtension.getAuthConfigView(PLUGIN_ID); assertRequest(requestArgumentCaptor.getValue(), AUTHORIZATION_EXTENSION, "2.0", REQUEST_GET_AUTH_CONFIG_VIEW, null); assertThat(pluginConfigurationView).isEqualTo("<div>This is view snippet</div>"); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } try { try { for(final DavResource resource : this.list(file)) { if(resource.isDirectory()) { if(!file.getType().contains(Path.Type.directory)) { throw new NotfoundException(String.format("File %s has set MIME type %s", file.getAbsolute(), DavResource.HTTPD_UNIX_DIRECTORY_CONTENT_TYPE)); } } else { if(!file.getType().contains(Path.Type.file)) { throw new NotfoundException(String.format("File %s has set MIME type %s", file.getAbsolute(), resource.getContentType())); } } return this.toAttributes(resource); } throw new NotfoundException(file.getAbsolute()); } catch(SardineException e) { try { throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(InteroperabilityException | ConflictException i) { // PROPFIND Method not allowed if(log.isWarnEnabled()) { log.warn(String.format("Failure with PROPFIND request for %s. %s", file, i.getMessage())); } final PathAttributes attr = this.head(file); if(PathAttributes.EMPTY == attr) { throw i; } return attr; } } } catch(SardineException e) { throw new DAVExceptionMappingService().map("Failure to read attributes of {0}", e, file); } catch(IOException e) { throw new HttpExceptionMappingService().map(e, file); } }
@Test(expected = NotfoundException.class) public void testFindNotFound() throws Exception { final Path test = new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final DAVAttributesFinderFeature f = new DAVAttributesFinderFeature(session); try { f.find(test); } catch(NotfoundException e) { assertTrue(StringUtils.startsWith(e.getDetail(), "Unexpected response")); throw e; } }
@Transactional @Cacheable(CACHE_DATABASE_SEARCH) @CacheEvict(value = CACHE_AVERAGE_REVIEW_RATING, allEntries = true) public SearchHits<ExtensionSearch> search(ISearchService.Options options) { // grab all extensions var matchingExtensions = repositories.findAllActiveExtensions(); // no extensions in the database if (matchingExtensions.isEmpty()) { return new SearchHitsImpl<>(0,TotalHitsRelation.OFF, 0f, null, null, Collections.emptyList(), null, null); } // exlude namespaces if(options.namespacesToExclude != null) { for(var namespaceToExclude : options.namespacesToExclude) { matchingExtensions = matchingExtensions.filter(extension -> !extension.getNamespace().getName().equals(namespaceToExclude)); } } // filter target platform if(TargetPlatform.isValid(options.targetPlatform)) { matchingExtensions = matchingExtensions.filter(extension -> extension.getVersions().stream().anyMatch(ev -> ev.getTargetPlatform().equals(options.targetPlatform))); } // filter category if (options.category != null) { matchingExtensions = matchingExtensions.filter(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); return latest.getCategories().stream().anyMatch(category -> category.equalsIgnoreCase(options.category)); }); } // filter text if (options.queryString != null) { matchingExtensions = matchingExtensions.filter(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); return extension.getName().toLowerCase().contains(options.queryString.toLowerCase()) || extension.getNamespace().getName().contains(options.queryString.toLowerCase()) || (latest.getDescription() != null && latest.getDescription() .toLowerCase().contains(options.queryString.toLowerCase())) || (latest.getDisplayName() != null && latest.getDisplayName() .toLowerCase().contains(options.queryString.toLowerCase())); }); } // need to perform the sortBy () // 'relevance' | 'timestamp' | 'rating' | 'downloadCount'; Stream<ExtensionSearch> searchEntries; if("relevance".equals(options.sortBy) || "rating".equals(options.sortBy)) { var searchStats = new SearchStats(repositories); searchEntries = matchingExtensions.stream().map(extension -> relevanceService.toSearchEntry(extension, searchStats)); } else { searchEntries = matchingExtensions.stream().map(extension -> { var latest = repositories.findLatestVersion(extension, null, false, true); var targetPlatforms = repositories.findExtensionTargetPlatforms(extension); return extension.toSearch(latest, targetPlatforms); }); } var comparators = new HashMap<>(Map.of( "relevance", new RelevanceComparator(), "timestamp", new TimestampComparator(), "rating", new RatingComparator(), "downloadCount", new DownloadedCountComparator() )); var comparator = comparators.get(options.sortBy); if(comparator != null) { searchEntries = searchEntries.sorted(comparator); } var sortedExtensions = searchEntries.collect(Collectors.toList()); // need to do sortOrder // 'asc' | 'desc'; if ("desc".equals(options.sortOrder)) { // reverse the order Collections.reverse(sortedExtensions); } // Paging var totalHits = sortedExtensions.size(); var endIndex = Math.min(sortedExtensions.size(), options.requestedOffset + options.requestedSize); var startIndex = Math.min(endIndex, options.requestedOffset); sortedExtensions = sortedExtensions.subList(startIndex, endIndex); List<SearchHit<ExtensionSearch>> searchHits; if (sortedExtensions.isEmpty()) { searchHits = Collections.emptyList(); } else { // client is interested only in the extension IDs searchHits = sortedExtensions.stream().map(extensionSearch -> new SearchHit<>(null, null, null, 0.0f, null, null, null, null, null, null, extensionSearch)).collect(Collectors.toList()); } return new SearchHitsImpl<>(totalHits, TotalHitsRelation.OFF, 0f, null, null, searchHits, null, null); }
@Test public void testQueryStringDescription() { var ext1 = mockExtension("yaml", 3.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages")); var ext2 = mockExtension("java", 4.0, 100, 0, "redhat", List.of("Snippets", "Programming Languages")); ext2.getVersions().get(0).setDescription("another desc"); var ext3 = mockExtension("openshift", 4.0, 100, 0, "redhat", List.of("Snippets", "Other")); ext3.getVersions().get(0).setDescription("my custom desc"); var ext4 = mockExtension("foo", 4.0, 100, 0, "bar", List.of("Other")); Mockito.when(repositories.findAllActiveExtensions()).thenReturn(Streamable.of(List.of(ext1, ext2, ext3, ext4))); var searchOptions = new ISearchService.Options("my custom desc", null, TargetPlatform.NAME_UNIVERSAL, 50, 0, null, null, false); var result = search.search(searchOptions); // custom description assertThat(result.getTotalHits()).isEqualTo(1); // Check it found the correct extension var hits = result.getSearchHits(); assertThat(getIdFromExtensionHits(hits, 0)).isEqualTo(getIdFromExtensionName("openshift")); }
private Optional<InstancePublishInfo> getInstanceInfo(String clientId, Service service) { Client client = clientManager.getClient(clientId); if (null == client) { return Optional.empty(); } return Optional.ofNullable(client.getInstancePublishInfo(service)); }
@Test void testGetInstanceInfo() throws NoSuchMethodException, InvocationTargetException, IllegalAccessException { Class<ServiceStorage> serviceStorageClass = ServiceStorage.class; Method getInstanceInfo = serviceStorageClass.getDeclaredMethod("getInstanceInfo", String.class, Service.class); getInstanceInfo.setAccessible(true); Optional<InstancePublishInfo> optionalInstancePublishInfo = (Optional<InstancePublishInfo>) getInstanceInfo.invoke(serviceStorage, NACOS, SERVICE); assertFalse(optionalInstancePublishInfo.isPresent()); }
@Override public ReservationUpdateResponse updateReservation( ReservationUpdateRequest request) throws YarnException, IOException { // Check if reservation system is enabled checkReservationSystem(); ReservationUpdateResponse response = recordFactory.newRecordInstance(ReservationUpdateResponse.class); // Validate the input Plan plan = rValidator.validateReservationUpdateRequest(reservationSystem, request); ReservationId reservationId = request.getReservationId(); String queueName = reservationSystem.getQueueForReservation(reservationId); // Check ACLs String user = checkReservationACLs(queueName, AuditConstants.UPDATE_RESERVATION_REQUEST, reservationId); // Try to update the reservation using default agent try { boolean result = plan.getReservationAgent().updateReservation(reservationId, user, plan, request.getReservationDefinition()); if (!result) { String errMsg = "Unable to update reservation: " + reservationId; RMAuditLogger.logFailure(user, AuditConstants.UPDATE_RESERVATION_REQUEST, errMsg, "ClientRMService", errMsg); throw RPCUtil.getRemoteException(errMsg); } } catch (PlanningException e) { RMAuditLogger.logFailure(user, AuditConstants.UPDATE_RESERVATION_REQUEST, e.getMessage(), "ClientRMService", "Unable to update the reservation: " + reservationId); throw RPCUtil.getRemoteException(e); } RMAuditLogger.logSuccess(user, AuditConstants.UPDATE_RESERVATION_REQUEST, "ClientRMService: " + reservationId); return response; }
@Test public void testUpdateReservation() { resourceManager = setupResourceManager(); ClientRMService clientService = resourceManager.getClientRMService(); Clock clock = new UTCClock(); long arrival = clock.getTime(); long duration = 60000; long deadline = (long) (arrival + 1.05 * duration); ReservationSubmissionRequest sRequest = submitReservationTestHelper(clientService, arrival, deadline, duration); ReservationDefinition rDef = sRequest.getReservationDefinition(); ReservationRequest rr = rDef.getReservationRequests().getReservationResources().get(0); ReservationId reservationID = sRequest.getReservationId(); rr.setNumContainers(5); arrival = clock.getTime(); duration = 30000; deadline = (long) (arrival + 1.05 * duration); rr.setDuration(duration); rDef.setArrival(arrival); rDef.setDeadline(deadline); ReservationUpdateRequest uRequest = ReservationUpdateRequest.newInstance(rDef, reservationID); ReservationUpdateResponse uResponse = null; try { uResponse = clientService.updateReservation(uRequest); } catch (Exception e) { Assert.fail(e.getMessage()); } Assert.assertNotNull(uResponse); System.out.println("Update reservation response: " + uResponse); }
@Override public Optional<Mail> tryTake(int priority) { checkIsMailboxThread(); checkTakeStateConditions(); Mail head = takeOrNull(batch, priority); if (head != null) { return Optional.of(head); } if (!hasNewMail) { return Optional.empty(); } final ReentrantLock lock = this.lock; lock.lock(); try { final Mail value = takeOrNull(queue, priority); if (value == null) { return Optional.empty(); } hasNewMail = !queue.isEmpty(); return Optional.ofNullable(value); } finally { lock.unlock(); } }
@Test void testConcurrentPutTakeNonBlockingAndWait() throws Exception { testPutTake( (mailbox -> { Optional<Mail> optionalMail = mailbox.tryTake(DEFAULT_PRIORITY); while (!optionalMail.isPresent()) { optionalMail = mailbox.tryTake(DEFAULT_PRIORITY); } return optionalMail.get(); })); }
@Override public ScalarOperator visitCall(CallOperator call, Void context) { return shuttleIfUpdate(call); }
@Test void testCallOperator() { CallOperator operator = new CallOperator("count", INT, Lists.newArrayList()); { ScalarOperator newOperator = shuttle.visitCall(operator, null); assertEquals(operator, newOperator); } { ScalarOperator newOperator = shuttle2.visitCall(operator, null); assertEquals(operator, newOperator); } }
public EndpointResponse getServerClusterId() { return EndpointResponse.ok(serverMetadata.getClusterId()); }
@Test public void shouldReturnServerClusterId() { // When: final EndpointResponse response = serverMetadataResource.getServerClusterId(); // Then: assertThat(response.getStatus(), equalTo(200)); assertThat(response.getEntity(), instanceOf(ServerClusterId.class)); final ServerClusterId serverClusterId = (ServerClusterId)response.getEntity(); assertThat( serverClusterId, equalTo(ServerClusterId.of(KAFKA_CLUSTER_ID, KSQL_SERVICE_ID)) ); }
private FixedBitIntReader(PinotDataBuffer dataBuffer) { _dataBuffer = dataBuffer; }
@Test public void testFixedBitIntReader() throws Exception { int[] values = new int[NUM_VALUES]; for (int numBits = 1; numBits <= 31; numBits++) { File indexFile = new File(INDEX_DIR, "bit-" + numBits); try ( FixedBitSVForwardIndexWriter indexWriter = new FixedBitSVForwardIndexWriter(indexFile, NUM_VALUES, numBits)) { int maxValue = numBits < 31 ? 1 << numBits : Integer.MAX_VALUE; for (int i = 0; i < NUM_VALUES; i++) { int value = RANDOM.nextInt(maxValue); values[i] = value; indexWriter.putDictId(value); } } try (PinotDataBuffer dataBuffer = PinotDataBuffer.mapReadOnlyBigEndianFile(indexFile)) { FixedBitIntReader intReader = FixedBitIntReader.getReader(dataBuffer, numBits); for (int i = 0; i < NUM_VALUES; i++) { assertEquals(intReader.read(i), values[i]); } for (int i = 0; i < NUM_VALUES - 2; i++) { assertEquals(intReader.readUnchecked(i), values[i]); } int[] out = new int[64]; intReader.read32(0, out, 0); intReader.read32(32, out, 32); for (int i = 0; i < 64; i++) { assertEquals(out[i], values[i]); } } } }
@Override public DataflowPipelineJob run(Pipeline pipeline) { // Multi-language pipelines and pipelines that include upgrades should automatically be upgraded // to Runner v2. if (DataflowRunner.isMultiLanguagePipeline(pipeline) || includesTransformUpgrades(pipeline)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_runner_v2")) { LOG.info( "Automatically enabling Dataflow Runner v2 since the pipeline used cross-language" + " transforms or pipeline needed a transform upgrade."); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("use_runner_v2").build()); } } if (useUnifiedWorker(options)) { if (hasExperiment(options, "disable_runner_v2") || hasExperiment(options, "disable_runner_v2_until_2023") || hasExperiment(options, "disable_prime_runner_v2")) { throw new IllegalArgumentException( "Runner V2 both disabled and enabled: at least one of ['beam_fn_api', 'use_unified_worker', 'use_runner_v2', 'use_portable_job_submission'] is set and also one of ['disable_runner_v2', 'disable_runner_v2_until_2023', 'disable_prime_runner_v2'] is set."); } List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("use_runner_v2")) { experiments.add("use_runner_v2"); } if (!experiments.contains("use_unified_worker")) { experiments.add("use_unified_worker"); } if (!experiments.contains("beam_fn_api")) { experiments.add("beam_fn_api"); } if (!experiments.contains("use_portable_job_submission")) { experiments.add("use_portable_job_submission"); } options.setExperiments(ImmutableList.copyOf(experiments)); } logWarningIfPCollectionViewHasNonDeterministicKeyCoder(pipeline); logWarningIfBigqueryDLQUnused(pipeline); if (shouldActAsStreaming(pipeline)) { options.setStreaming(true); if (useUnifiedWorker(options)) { options.setEnableStreamingEngine(true); List<String> experiments = new ArrayList<>(options.getExperiments()); // non-null if useUnifiedWorker is true if (!experiments.contains("enable_streaming_engine")) { experiments.add("enable_streaming_engine"); } if (!experiments.contains("enable_windmill_service")) { experiments.add("enable_windmill_service"); } } } if (!ExperimentalOptions.hasExperiment(options, "disable_projection_pushdown")) { ProjectionPushdownOptimizer.optimize(pipeline); } LOG.info( "Executing pipeline on the Dataflow Service, which will have billing implications " + "related to Google Compute Engine usage and other Google Cloud Services."); DataflowPipelineOptions dataflowOptions = options.as(DataflowPipelineOptions.class); String workerHarnessContainerImageURL = DataflowRunner.getContainerImageForJob(dataflowOptions); // This incorrectly puns the worker harness container image (which implements v1beta3 API) // with the SDK harness image (which implements Fn API). // // The same Environment is used in different and contradictory ways, depending on whether // it is a v1 or v2 job submission. RunnerApi.Environment defaultEnvironmentForDataflow = Environments.createDockerEnvironment(workerHarnessContainerImageURL); // The SdkComponents for portable an non-portable job submission must be kept distinct. Both // need the default environment. SdkComponents portableComponents = SdkComponents.create(); portableComponents.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); RunnerApi.Pipeline portablePipelineProto = PipelineTranslation.toProto(pipeline, portableComponents, false); // Note that `stageArtifacts` has to be called before `resolveArtifact` because // `resolveArtifact` updates local paths to staged paths in pipeline proto. portablePipelineProto = resolveAnyOfEnvironments(portablePipelineProto); List<DataflowPackage> packages = stageArtifacts(portablePipelineProto); portablePipelineProto = resolveArtifacts(portablePipelineProto); portablePipelineProto = applySdkEnvironmentOverrides(portablePipelineProto, options); if (LOG.isDebugEnabled()) { LOG.debug( "Portable pipeline proto:\n{}", TextFormat.printer().printToString(portablePipelineProto)); } // Stage the portable pipeline proto, retrieving the staged pipeline path, then update // the options on the new job // TODO: add an explicit `pipeline` parameter to the submission instead of pipeline options LOG.info("Staging portable pipeline proto to {}", options.getStagingLocation()); byte[] serializedProtoPipeline = portablePipelineProto.toByteArray(); DataflowPackage stagedPipeline = options.getStager().stageToFile(serializedProtoPipeline, PIPELINE_FILE_NAME); dataflowOptions.setPipelineUrl(stagedPipeline.getLocation()); if (useUnifiedWorker(options)) { LOG.info("Skipping v1 transform replacements since job will run on v2."); } else { // Now rewrite things to be as needed for v1 (mutates the pipeline) // This way the job submitted is valid for v1 and v2, simultaneously replaceV1Transforms(pipeline); } // Capture the SdkComponents for look up during step translations SdkComponents dataflowV1Components = SdkComponents.create(); dataflowV1Components.registerEnvironment( defaultEnvironmentForDataflow .toBuilder() .addAllDependencies(getDefaultArtifacts()) .addAllCapabilities(Environments.getJavaCapabilities()) .build()); // No need to perform transform upgrading for the Runner v1 proto. RunnerApi.Pipeline dataflowV1PipelineProto = PipelineTranslation.toProto(pipeline, dataflowV1Components, true, false); if (LOG.isDebugEnabled()) { LOG.debug( "Dataflow v1 pipeline proto:\n{}", TextFormat.printer().printToString(dataflowV1PipelineProto)); } // Set a unique client_request_id in the CreateJob request. // This is used to ensure idempotence of job creation across retried // attempts to create a job. Specifically, if the service returns a job with // a different client_request_id, it means the returned one is a different // job previously created with the same job name, and that the job creation // has been effectively rejected. The SDK should return // Error::Already_Exists to user in that case. int randomNum = new Random().nextInt(9000) + 1000; String requestId = DateTimeFormat.forPattern("YYYYMMddHHmmssmmm") .withZone(DateTimeZone.UTC) .print(DateTimeUtils.currentTimeMillis()) + "_" + randomNum; JobSpecification jobSpecification = translator.translate( pipeline, dataflowV1PipelineProto, dataflowV1Components, this, packages); if (!isNullOrEmpty(dataflowOptions.getDataflowWorkerJar()) && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); if (!experiments.contains("use_staged_dataflow_worker_jar")) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("use_staged_dataflow_worker_jar") .build()); } } Job newJob = jobSpecification.getJob(); try { newJob .getEnvironment() .setSdkPipelineOptions( MAPPER.readValue(MAPPER_WITH_MODULES.writeValueAsBytes(options), Map.class)); } catch (IOException e) { throw new IllegalArgumentException( "PipelineOptions specified failed to serialize to JSON.", e); } newJob.setClientRequestId(requestId); DataflowRunnerInfo dataflowRunnerInfo = DataflowRunnerInfo.getDataflowRunnerInfo(); String version = dataflowRunnerInfo.getVersion(); checkState( !"${pom.version}".equals(version), "Unable to submit a job to the Dataflow service with unset version ${pom.version}"); LOG.info("Dataflow SDK version: {}", version); newJob.getEnvironment().setUserAgent((Map) dataflowRunnerInfo.getProperties()); // The Dataflow Service may write to the temporary directory directly, so // must be verified. if (!isNullOrEmpty(options.getGcpTempLocation())) { newJob .getEnvironment() .setTempStoragePrefix( dataflowOptions.getPathValidator().verifyPath(options.getGcpTempLocation())); } newJob.getEnvironment().setDataset(options.getTempDatasetId()); if (options.getWorkerRegion() != null) { newJob.getEnvironment().setWorkerRegion(options.getWorkerRegion()); } if (options.getWorkerZone() != null) { newJob.getEnvironment().setWorkerZone(options.getWorkerZone()); } if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.COST_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_COST_OPTIMIZED"); } else if (options.getFlexRSGoal() == DataflowPipelineOptions.FlexResourceSchedulingGoal.SPEED_OPTIMIZED) { newJob.getEnvironment().setFlexResourceSchedulingGoal("FLEXRS_SPEED_OPTIMIZED"); } // Represent the minCpuPlatform pipeline option as an experiment, if not already present. if (!isNullOrEmpty(dataflowOptions.getMinCpuPlatform())) { List<String> experiments = firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()); List<String> minCpuFlags = experiments.stream() .filter(p -> p.startsWith("min_cpu_platform")) .collect(Collectors.toList()); if (minCpuFlags.isEmpty()) { dataflowOptions.setExperiments( ImmutableList.<String>builder() .addAll(experiments) .add("min_cpu_platform=" + dataflowOptions.getMinCpuPlatform()) .build()); } else { LOG.warn( "Flag min_cpu_platform is defined in both top level PipelineOption, " + "as well as under experiments. Proceed using {}.", minCpuFlags.get(0)); } } newJob .getEnvironment() .setExperiments( ImmutableList.copyOf( firstNonNull(dataflowOptions.getExperiments(), Collections.emptyList()))); // Set the Docker container image that executes Dataflow worker harness, residing in Google // Container Registry. Translator is guaranteed to create a worker pool prior to this point. // For runner_v1, only worker_harness_container is set. // For runner_v2, both worker_harness_container and sdk_harness_container are set to the same // value. String containerImage = getContainerImageForJob(options); for (WorkerPool workerPool : newJob.getEnvironment().getWorkerPools()) { workerPool.setWorkerHarnessContainerImage(containerImage); } configureSdkHarnessContainerImages(options, portablePipelineProto, newJob); newJob.getEnvironment().setVersion(getEnvironmentVersion(options)); if (hooks != null) { hooks.modifyEnvironmentBeforeSubmission(newJob.getEnvironment()); } // enable upload_graph when the graph is too large byte[] jobGraphBytes = DataflowPipelineTranslator.jobToString(newJob).getBytes(UTF_8); int jobGraphByteSize = jobGraphBytes.length; if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES && !hasExperiment(options, "upload_graph") && !useUnifiedWorker(options)) { List<String> experiments = firstNonNull(options.getExperiments(), Collections.emptyList()); options.setExperiments( ImmutableList.<String>builder().addAll(experiments).add("upload_graph").build()); LOG.info( "The job graph size ({} in bytes) is larger than {}. Automatically add " + "the upload_graph option to experiments.", jobGraphByteSize, CREATE_JOB_REQUEST_LIMIT_BYTES); } if (hasExperiment(options, "upload_graph") && useUnifiedWorker(options)) { ArrayList<String> experiments = new ArrayList<>(options.getExperiments()); while (experiments.remove("upload_graph")) {} options.setExperiments(experiments); LOG.warn( "The upload_graph experiment was specified, but it does not apply " + "to runner v2 jobs. Option has been automatically removed."); } // Upload the job to GCS and remove the graph object from the API call. The graph // will be downloaded from GCS by the service. if (hasExperiment(options, "upload_graph")) { DataflowPackage stagedGraph = options.getStager().stageToFile(jobGraphBytes, DATAFLOW_GRAPH_FILE_NAME); newJob.getSteps().clear(); newJob.setStepsLocation(stagedGraph.getLocation()); } if (!isNullOrEmpty(options.getDataflowJobFile()) || !isNullOrEmpty(options.getTemplateLocation())) { boolean isTemplate = !isNullOrEmpty(options.getTemplateLocation()); if (isTemplate) { checkArgument( isNullOrEmpty(options.getDataflowJobFile()), "--dataflowJobFile and --templateLocation are mutually exclusive."); } String fileLocation = firstNonNull(options.getTemplateLocation(), options.getDataflowJobFile()); checkArgument( fileLocation.startsWith("/") || fileLocation.startsWith("gs://"), "Location must be local or on Cloud Storage, got %s.", fileLocation); ResourceId fileResource = FileSystems.matchNewResource(fileLocation, false /* isDirectory */); String workSpecJson = DataflowPipelineTranslator.jobToString(newJob); try (PrintWriter printWriter = new PrintWriter( new BufferedWriter( new OutputStreamWriter( Channels.newOutputStream(FileSystems.create(fileResource, MimeTypes.TEXT)), UTF_8)))) { printWriter.print(workSpecJson); LOG.info("Printed job specification to {}", fileLocation); } catch (IOException ex) { String error = String.format("Cannot create output file at %s", fileLocation); if (isTemplate) { throw new RuntimeException(error, ex); } else { LOG.warn(error, ex); } } if (isTemplate) { LOG.info("Template successfully created."); return new DataflowTemplateJob(); } } String jobIdToUpdate = null; if (options.isUpdate()) { jobIdToUpdate = getJobIdFromName(options.getJobName()); newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setReplaceJobId(jobIdToUpdate); } if (options.getCreateFromSnapshot() != null && !options.getCreateFromSnapshot().isEmpty()) { newJob.setTransformNameMapping(options.getTransformNameMapping()); newJob.setCreatedFromSnapshotId(options.getCreateFromSnapshot()); } Job jobResult; try { jobResult = dataflowClient.createJob(newJob); } catch (GoogleJsonResponseException e) { String errorMessages = "Unexpected errors"; if (e.getDetails() != null) { if (jobGraphByteSize >= CREATE_JOB_REQUEST_LIMIT_BYTES) { errorMessages = "The size of the serialized JSON representation of the pipeline " + "exceeds the allowable limit. " + "For more information, please see the documentation on job submission:\n" + "https://cloud.google.com/dataflow/docs/guides/deploying-a-pipeline#jobs"; } else { errorMessages = e.getDetails().getMessage(); } } throw new RuntimeException("Failed to create a workflow job: " + errorMessages, e); } catch (IOException e) { throw new RuntimeException("Failed to create a workflow job", e); } // Use a raw client for post-launch monitoring, as status calls may fail // regularly and need not be retried automatically. DataflowPipelineJob dataflowPipelineJob = new DataflowPipelineJob( DataflowClient.create(options), jobResult.getId(), options, jobSpecification != null ? jobSpecification.getStepNames() : Collections.emptyMap(), portablePipelineProto); // If the service returned client request id, the SDK needs to compare it // with the original id generated in the request, if they are not the same // (i.e., the returned job is not created by this request), throw // DataflowJobAlreadyExistsException or DataflowJobAlreadyUpdatedException // depending on whether this is a reload or not. if (jobResult.getClientRequestId() != null && !jobResult.getClientRequestId().isEmpty() && !jobResult.getClientRequestId().equals(requestId)) { // If updating a job. if (options.isUpdate()) { throw new DataflowJobAlreadyUpdatedException( dataflowPipelineJob, String.format( "The job named %s with id: %s has already been updated into job id: %s " + "and cannot be updated again.", newJob.getName(), jobIdToUpdate, jobResult.getId())); } else { throw new DataflowJobAlreadyExistsException( dataflowPipelineJob, String.format( "There is already an active job named %s with id: %s. If you want to submit a" + " second job, try again by setting a different name using --jobName.", newJob.getName(), jobResult.getId())); } } LOG.info( "To access the Dataflow monitoring console, please navigate to {}", MonitoringUtil.getJobMonitoringPageURL( options.getProject(), options.getRegion(), jobResult.getId())); LOG.info("Submitted job: {}", jobResult.getId()); LOG.info( "To cancel the job using the 'gcloud' tool, run:\n> {}", MonitoringUtil.getGcloudCancelCommand(options, jobResult.getId())); return dataflowPipelineJob; }
@Test public void testRunStreamingJobNotUsingPAssertThatSucceeds() throws Exception { options.setStreaming(true); Pipeline p = TestPipeline.create(options); p.apply(Create.of(1, 2, 3)); DataflowPipelineJob mockJob = Mockito.mock(DataflowPipelineJob.class); when(mockJob.getState()).thenReturn(State.DONE); when(mockJob.waitUntilFinish(any(Duration.class), any(JobMessagesHandler.class))) .thenReturn(State.DONE); when(mockJob.getProjectId()).thenReturn("test-project"); when(mockJob.getJobId()).thenReturn("test-job"); DataflowRunner mockRunner = Mockito.mock(DataflowRunner.class); when(mockRunner.run(any(Pipeline.class))).thenReturn(mockJob); when(mockClient.getJobMetrics(anyString())) .thenReturn(generateMockStreamingMetricResponse(ImmutableMap.of())); TestDataflowRunner runner = TestDataflowRunner.fromOptionsAndClient(options, mockClient); runner.run(p, mockRunner); }
@Override public void setIsProcessingBacklog(boolean isProcessingBacklog) { CheckpointCoordinator checkpointCoordinator = getCoordinatorContext().getCheckpointCoordinator(); OperatorID operatorID = getCoordinatorContext().getOperatorId(); if (checkpointCoordinator != null) { checkpointCoordinator.setIsProcessingBacklog(operatorID, isProcessingBacklog); } backlog = TernaryBoolean.fromBoolean(isProcessingBacklog); callInCoordinatorThread( () -> { final IsProcessingBacklogEvent isProcessingBacklogEvent = new IsProcessingBacklogEvent(isProcessingBacklog); for (int i = 0; i < getCoordinatorContext().currentParallelism(); i++) { sendEventToSourceOperatorIfTaskReady(i, isProcessingBacklogEvent); } return null; }, "Failed to send BacklogEvent to reader."); }
@Test void testSetIsProcessingBacklog() throws Exception { sourceReady(); registerReader(0, 0); context.setIsProcessingBacklog(true); for (int i = 0; i < context.currentParallelism(); ++i) { final List<OperatorEvent> events = receivingTasks.getSentEventsForSubtask(i); assertThat(events.get(events.size() - 1)).isEqualTo(new IsProcessingBacklogEvent(true)); } registerReader(1, 0); context.setIsProcessingBacklog(false); registerReader(2, 0); for (int i = 0; i < context.currentParallelism(); ++i) { final List<OperatorEvent> events = receivingTasks.getSentEventsForSubtask(i); assertThat(events.get(events.size() - 1)) .isEqualTo(new IsProcessingBacklogEvent(false)); } }
public ConfigCheckResult checkConfig() { Optional<Long> appId = getAppId(); if (appId.isEmpty()) { return failedApplicationStatus(INVALID_APP_ID_STATUS); } GithubAppConfiguration githubAppConfiguration = new GithubAppConfiguration(appId.get(), gitHubSettings.privateKey(), gitHubSettings.apiURLOrDefault()); return checkConfig(githubAppConfiguration); }
@Test public void checkConfig_whenIllegalArgumentExceptionWhileFetchingTheApp_shouldReturnFailedAppCheck() { mockGithubConfiguration(); ArgumentCaptor<GithubAppConfiguration> appConfigurationCaptor = ArgumentCaptor.forClass(GithubAppConfiguration.class); IllegalArgumentException illegalArgumentException = mock(IllegalArgumentException.class); when(illegalArgumentException.getMessage()).thenReturn("Exception while fetching the App."); when(githubClient.getApp(appConfigurationCaptor.capture())).thenThrow(illegalArgumentException); ConfigCheckResult checkResult = configValidator.checkConfig(); assertThat(checkResult.application().autoProvisioning()).isEqualTo(ConfigStatus.failed(APP_FETCHING_FAILED)); assertThat(checkResult.application().jit()).isEqualTo(ConfigStatus.failed(APP_FETCHING_FAILED)); assertThat(checkResult.installations()).isEmpty(); }
public void setSuperClass(FullyQualifiedJavaType superClass) { this.superClass = superClass; }
@Test void testSetSuperClass() { InnerClass clazz = new InnerClass("com.foo.UserClass"); assertFalse(clazz.getSuperClass().isPresent()); clazz.setSuperClass("com.hoge.SuperClass"); assertNotNull(clazz.getSuperClass()); assertEquals("com.hoge.SuperClass", clazz.getSuperClass().get().getFullyQualifiedName()); }
@Override public Checksum upload(final Path file, final Local local, final BandwidthThrottle throttle, final StreamListener listener, final TransferStatus status, final ConnectionCallback callback) throws BackgroundException { try { final IRODSFileSystemAO fs = session.getClient(); final IRODSFile f = fs.getIRODSFileFactory().instanceIRODSFile(file.getAbsolute()); final TransferControlBlock block = DefaultTransferControlBlock.instance(StringUtils.EMPTY, new HostPreferences(session.getHost()).getInteger("connection.retry")); final TransferOptions options = new DefaultTransferOptionsConfigurer().configure(new TransferOptions()); if(Host.TransferType.unknown.equals(session.getHost().getTransferType())) { options.setUseParallelTransfer(Host.TransferType.valueOf(PreferencesFactory.get().getProperty("queue.transfer.type")).equals(Host.TransferType.concurrent)); } else { options.setUseParallelTransfer(session.getHost().getTransferType().equals(Host.TransferType.concurrent)); } block.setTransferOptions(options); final DataTransferOperations transfer = fs.getIRODSAccessObjectFactory().getDataTransferOperations(fs.getIRODSAccount()); transfer.putOperation(new File(local.getAbsolute()), f, new DefaultTransferStatusCallbackListener( status, listener, block ), block); if(status.isComplete()) { final DataObjectChecksumUtilitiesAO checksum = fs .getIRODSAccessObjectFactory() .getDataObjectChecksumUtilitiesAO(fs.getIRODSAccount()); final ChecksumValue value = checksum.computeChecksumOnDataObject(f); final Checksum fingerprint = Checksum.parse(value.getChecksumStringValue()); if(null == fingerprint) { log.warn(String.format("Unsupported checksum algorithm %s", value.getChecksumEncoding())); } else { if(file.getType().contains(Path.Type.encrypted)) { log.warn(String.format("Skip checksum verification for %s with client side encryption enabled", file)); } else { final Checksum expected = ChecksumComputeFactory.get(fingerprint.algorithm).compute(local.getInputStream(), new TransferStatus(status)); if(!expected.equals(fingerprint)) { throw new ChecksumException(MessageFormat.format(LocaleFactory.localizedString("Upload {0} failed", "Error"), file.getName()), MessageFormat.format("Mismatch between {0} hash {1} of uploaded data and ETag {2} returned by the server", fingerprint.algorithm.toString(), expected, fingerprint.hash)); } } } return fingerprint; } return null; } catch(JargonException e) { throw new IRODSExceptionMappingService().map(e); } }
@Test public void testWrite() throws Exception { final ProtocolFactory factory = new ProtocolFactory(new HashSet<>(Collections.singleton(new IRODSProtocol()))); final Profile profile = new ProfilePlistReader(factory).read( this.getClass().getResourceAsStream("/iRODS (iPlant Collaborative).cyberduckprofile")); final Host host = new Host(profile, profile.getDefaultHostname(), new Credentials( PROPERTIES.get("irods.key"), PROPERTIES.get("irods.secret") )); final IRODSSession session = new IRODSSession(host); session.open(new DisabledProxyFinder(), new DisabledHostKeyCallback(), new DisabledLoginCallback(), new DisabledCancelCallback()); session.login(new DisabledLoginCallback(), new DisabledCancelCallback()); final Local local = new Local(System.getProperty("java.io.tmpdir"), UUID.randomUUID().toString()); final int length = 32770; final byte[] content = RandomUtils.nextBytes(length); final OutputStream out = local.getOutputStream(false); IOUtils.write(content, out); out.close(); final Checksum checksum; final Path test = new Path(new IRODSHomeFinderService(session).find(), UUID.randomUUID().toString(), EnumSet.of(Path.Type.file)); final TransferStatus status = new TransferStatus().withLength(content.length); final TransferStatus copy = new TransferStatus(status); final BytecountStreamListener count = new BytecountStreamListener(); checksum = new IRODSUploadFeature(session).upload( test, local, new BandwidthThrottle(BandwidthThrottle.UNLIMITED), count, status, new DisabledConnectionCallback()); assertTrue(status.isComplete()); assertEquals(content.length, count.getSent()); assertEquals(checksum, new MD5ChecksumCompute().compute(new FileInputStream(local.getAbsolute()), copy)); final byte[] buffer = new byte[content.length]; final InputStream in = new IRODSReadFeature(session).read(test, new TransferStatus().withLength(content.length), new DisabledConnectionCallback()); IOUtils.readFully(in, buffer); in.close(); assertArrayEquals(content, buffer); new IRODSDeleteFeature(session).delete(Collections.singletonList(test), new DisabledLoginCallback(), new Delete.DisabledCallback()); session.close(); }
public void set(Collection<ActiveRule> activeRules) { requireNonNull(activeRules, "Active rules cannot be null"); checkState(activeRulesByKey == null, "Active rules have already been initialized"); Map<RuleKey, ActiveRule> temp = new HashMap<>(); for (ActiveRule activeRule : activeRules) { ActiveRule previousValue = temp.put(activeRule.getRuleKey(), activeRule); if (previousValue != null) { throw new IllegalArgumentException("Active rule must not be declared multiple times: " + activeRule.getRuleKey()); } } activeRulesByKey = ImmutableMap.copyOf(temp); }
@Test public void can_not_set_twice() { assertThatThrownBy(() -> { underTest.set(asList(new ActiveRule(RULE_KEY, Severity.BLOCKER, Collections.emptyMap(), SOME_DATE, PLUGIN_KEY, QP_KEY))); underTest.set(Collections.emptyList()); }) .isInstanceOf(IllegalStateException.class) .hasMessage("Active rules have already been initialized"); }
public <T> T read(byte[] data, int offset, int length, Class<T> type) { final Asn1Entity entity = type.getAnnotation(Asn1Entity.class); if (entity == null) { throw new Asn1Exception("Class should have Asn1Entity annotation"); } final boolean tlv = entity.tagNo() != 0; try (final Asn1ObjectInputStream in = new Asn1ObjectInputStream(data, offset, length, tlv)) { if (tlv && in.tagNo != entity.tagNo()) { throw new Asn1Exception("Tag %x does not match, expected %x", in.tagNo, entity.tagNo()); } return readValue(in, entity.converter(), type); } }
@Test public void shouldCallPostConstructAfterConstruction() { final byte[] data = new byte[] { 0x30, 3, 0x02, 1, 31 }; assertEquals(31, mapper.read(data, ConstructedObj.class).check); }
@Override public OrganizedImports organizeImports(List<Import> imports) { // Group into static and non-static. Each group is a set sorted by type. Map<Boolean, ImmutableSortedSet<Import>> partionedByStatic = imports.stream() .collect( Collectors.partitioningBy( Import::isStatic, toImmutableSortedSet(Comparator.comparing(Import::getType)))); return new OrganizedImports() // Add groups, in the appropriate order. .addGroups(partionedByStatic, order.groupOrder()); }
@Test public void staticLastOrdering() { BasicImportOrganizer organizer = new BasicImportOrganizer(StaticOrder.STATIC_LAST); ImportOrganizer.OrganizedImports organized = organizer.organizeImports(IMPORTS); assertThat(organized.asImportBlock()) .isEqualTo( "import android.foo;\n" + "import com.android.blah;\n" + "import java.ping;\n" + "import javax.pong;\n" + "import net.wilma;\n" + "import unknown.barney;\n" + "import unknown.fred;\n" + "\n" + "import static android.foo.bar;\n" + "import static com.android.blah.blah;\n" + "import static java.ping.pong;\n" + "import static javax.pong.ping;\n" + "import static net.wilma.flintstone;\n" + "import static unknown.fred.flintstone;\n"); }
public static void main(final String[] args) { // The captain can only operate rowing boats but with adapter he is able to // use fishing boats as well var captain = new Captain(new FishingBoatAdapter()); captain.row(); }
@Test void shouldExecuteApplicationWithoutException() { assertDoesNotThrow(() -> App.main(new String[]{})); }
public void byteOrder(final ByteOrder byteOrder) { if (null == byteOrder) { throw new IllegalArgumentException("byteOrder cannot be null"); } this.byteOrder = byteOrder; }
@Test void shouldReadChar() throws Throwable { final UnsafeBuffer buffer = toUnsafeBuffer((out) -> out.writeChars("zażółć gęślą jaźń北查爾斯頓")); final DirectBufferDataInput dataInput = new DirectBufferDataInput(buffer, 30, 14); dataInput.byteOrder(byteOrder()); assertEquals('ź', dataInput.readChar()); assertEquals('ń', dataInput.readChar()); assertEquals('北', dataInput.readChar()); assertEquals('查', dataInput.readChar()); }
@Override public boolean isSchemaAvailable() { return true; }
@Test void assertIsSchemaAvailable() { assertTrue(dialectDatabaseMetaData.isSchemaAvailable()); }
public int indexOf(final String str) { return indexOf(str, 0); }
@Test public void testIndexOf() { final UnicodeHelper lh = new UnicodeHelper("a", Method.CODEPOINTS); assertEquals(-1, lh.indexOf("b")); final UnicodeHelper lh2 = new UnicodeHelper( "a" + new String(Character.toChars(0x1f600)) + "a" + UCSTR + "A" + "k\u035fh" + "z" + "a" + new String(Character.toChars(0x1f600)) + "a" + UCSTR + "A" + "k\u035fh" + "z", Method.CODEPOINTS); assertEquals(1, lh2.indexOf(new String(Character.toChars(0x1f600)))); assertEquals(14, lh2.indexOf(new String(Character.toChars(0x1f600)), 13)); assertEquals(3, lh2.indexOf(UCSTR)); assertEquals(16, lh2.indexOf(UCSTR, 13)); assertEquals(10, lh2.indexOf("\u035f")); assertEquals(23, lh2.indexOf("\u035f", 13)); }
@Override public Class<?> getNativeDataTypeClass() throws KettleValueException { if ( nativeType == null ) { try { nativeType = ValueMetaFactory.createValueMeta( getType() ); } catch ( KettlePluginException e ) { throw new KettleValueException( e ); } } return nativeType.getNativeDataTypeClass(); }
@Test public void testGetNativeDataTypeClass() throws KettleException { PluginRegistry.addPluginType( ValueMetaPluginType.getInstance() ); PluginRegistry.init(); String[] valueMetaNames = ValueMetaFactory.getValueMetaNames(); for ( int i = 0; i < valueMetaNames.length; i++ ) { int vmId = ValueMetaFactory.getIdForValueMeta( valueMetaNames[i] ); ValueMeta vm = new ValueMeta( "", vmId ); ValueMetaInterface vmi = ValueMetaFactory.createValueMeta( vmId ); assertTrue( vm.getNativeDataTypeClass().equals( vmi.getNativeDataTypeClass() ) ); } }
public static boolean isFolderEmpty(File folder) { if (folder == null) { return true; } File[] files = folder.listFiles(); return files == null || files.length == 0; }
@Test void folderIsEmptyWhenItHasNoContents() { assertThat(FileUtil.isFolderEmpty(folder)).isTrue(); }
static List<KiePMMLLinearNorm> getKiePMMLLinearNorms(List<LinearNorm> linearNorms) { return linearNorms.stream().map(KiePMMLLinearNormInstanceFactory::getKiePMMLLinearNorm).collect(Collectors.toList()); }
@Test void getKiePMMLLinearNorms() { List<LinearNorm> toConvert = IntStream.range(0, 3).mapToObj(i -> getRandomLinearNorm()).collect(Collectors.toList()); List<KiePMMLLinearNorm> retrieved = KiePMMLLinearNormInstanceFactory.getKiePMMLLinearNorms(toConvert); IntStream.range(0, 3).forEach(i -> commonVerifyKiePMMLLinearNorm(retrieved.get(i), toConvert.get(i))); }
public void mirrorKeys() { /* how to mirror? width = 55 [0..15] [20..35] [40..55] phase 1: multiple by -1 [0] [-20] [-40] phase 2: add keyboard width [55] [35] [15] phase 3: subtracting the key's width [40] [20] [0] cool? */ final int keyboardWidth = getMinWidth(); for (Key k : getKeys()) { var newX = -1 * k.x; // phase 1 newX += keyboardWidth; // phase 2 newX -= k.width; // phase 3 k.x = newX; } }
@Test public void testKeyboardPopupSupportsMirrorMultipleRowsNotFullNotBalanced() throws Exception { String popupCharacters = "qwertas"; // as // qwert AnyPopupKeyboard keyboard = new AnyPopupKeyboard( new DefaultAddOn(getApplicationContext(), getApplicationContext()), getApplicationContext(), popupCharacters, SIMPLE_KeyboardDimens, "POP_KEYBOARD"); int vGap = (int) SIMPLE_KeyboardDimens.getRowVerticalGap(); int keyHeight = (int) SIMPLE_KeyboardDimens.getNormalKeyHeight(); int hGap = (int) SIMPLE_KeyboardDimens.getKeyHorizontalGap(); final int keyWidth = (int) (SIMPLE_KeyboardDimens.getKeyboardMaxWidth() - SIMPLE_KeyboardDimens.getKeyHorizontalGap() * popupCharacters.length()) / 10; Assert.assertEquals(7, keyboard.getKeys().size()); assertKeyValues(keyboard, 'q', vGap + keyHeight + vGap, 0); assertKeyValues(keyboard, 'w', vGap + keyHeight + vGap, keyWidth); assertKeyValues(keyboard, 'e', vGap + keyHeight + vGap, hGap + 2 * keyWidth); assertKeyValues(keyboard, 'r', vGap + keyHeight + vGap, 2 * hGap + 3 * keyWidth); assertKeyValues(keyboard, 't', vGap + keyHeight + vGap, 3 * hGap + 4 * keyWidth); assertKeyValues(keyboard, 'a', vGap, 0); assertKeyValues(keyboard, 's', vGap, keyWidth); keyboard.mirrorKeys(); // same order, mirrored X position Assert.assertEquals(7, keyboard.getKeys().size()); assertKeyValues(keyboard, 'q', vGap + keyHeight + vGap, 5 * hGap + 4 * keyWidth); assertKeyValues(keyboard, 'w', vGap + keyHeight + vGap, 4 * hGap + 3 * keyWidth); assertKeyValues(keyboard, 'e', vGap + keyHeight + vGap, 3 * hGap + 2 * keyWidth); assertKeyValues(keyboard, 'r', vGap + keyHeight + vGap, 2 * hGap + keyWidth); assertKeyValues(keyboard, 't', vGap + keyHeight + vGap, hGap); assertKeyValues(keyboard, 'a', vGap, 5 * hGap + 4 * keyWidth); assertKeyValues(keyboard, 's', vGap, 4 * hGap + 3 * keyWidth); }
public static <V> TimestampedValue<V> atMinimumTimestamp(V value) { return of(value, BoundedWindow.TIMESTAMP_MIN_VALUE); }
@Test public void testAtMinimumTimestamp() { TimestampedValue<String> tsv = TimestampedValue.atMinimumTimestamp("foobar"); assertEquals(BoundedWindow.TIMESTAMP_MIN_VALUE, tsv.getTimestamp()); }
@Override public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception { log.info("Open MongoDB Source"); mongoSourceConfig = MongoSourceConfig.load(config, sourceContext); mongoSourceConfig.validate(); if (clientProvider != null) { mongoClient = clientProvider.get(); } else { mongoClient = MongoClients.create(mongoSourceConfig.getMongoUri()); } String mongoDatabase = mongoSourceConfig.getDatabase(); if (StringUtils.isEmpty(mongoDatabase)) { // Watch all databases log.info("Watch all databases"); stream = mongoClient.watch(); } else { final MongoDatabase db = mongoClient.getDatabase(mongoDatabase); String mongoCollection = mongoSourceConfig.getCollection(); if (StringUtils.isEmpty(mongoCollection)) { // Watch all collections in a database log.info("Watch db: {}", db.getName()); stream = db.watch(); } else { // Watch a collection final MongoCollection<Document> collection = db.getCollection(mongoCollection); log.info("Watch collection: {}.{}", db.getName(), mongoCollection); stream = collection.watch(); } } stream.batchSize(mongoSourceConfig.getBatchSize()) .fullDocument(FullDocument.UPDATE_LOOKUP); if (mongoSourceConfig.getSyncType() == SyncType.FULL_SYNC) { // sync currently existing messages // startAtOperationTime is the starting point for the change stream // setting startAtOperationTime to 0 means the start point is the earliest // see https://www.mongodb.com/docs/v4.2/reference/method/db.collection.watch/ for more information stream.startAtOperationTime(new BsonTimestamp(0L)); } stream.subscribe(new Subscriber<ChangeStreamDocument<Document>>() { private ObjectMapper mapper = new ObjectMapper(); private Subscription subscription; @Override public void onSubscribe(Subscription subscription) { this.subscription = subscription; this.subscription.request(Integer.MAX_VALUE); } @Override public void onNext(ChangeStreamDocument<Document> doc) { try { log.info("New change doc: {}", doc); BsonDocument documentKey = doc.getDocumentKey(); if (documentKey == null) { log.warn("The document key is null"); return; } // Build a record with the essential information final Map<String, Object> recordValue = new HashMap<>(); recordValue.put("fullDocument", doc.getFullDocument()); recordValue.put("ns", doc.getNamespace()); recordValue.put("operation", doc.getOperationType()); consume(new DocRecord( Optional.of(documentKey.toJson()), mapper.writeValueAsString(recordValue).getBytes(StandardCharsets.UTF_8))); } catch (JsonProcessingException e) { log.error("Processing doc from mongo", e); } } @Override public void onError(Throwable error) { log.error("Subscriber error", error); } @Override public void onComplete() { log.info("Subscriber complete"); } }); }
@Test public void testOpen() throws Exception { source.open(map, mockSourceContext); }
public static PathOutputCommitter createCommitter(Path outputPath, TaskAttemptContext context) throws IOException { return getCommitterFactory(outputPath, context.getConfiguration()) .createOutputCommitter(outputPath, context); }
@Test public void testNamedCommitterFactoryNullPath() throws Throwable { Configuration conf = new Configuration(); // set up for the schema factory conf.set(COMMITTER_FACTORY_CLASS, NAMED_COMMITTER_FACTORY); conf.set(NAMED_COMMITTER_CLASS, SimpleCommitter.class.getName()); SimpleCommitter sc = createCommitter( NamedCommitterFactory.class, SimpleCommitter.class, null, conf); assertNull(sc.getOutputPath()); }
static boolean shouldUpdate(AmazonInfo newInfo, AmazonInfo oldInfo) { if (newInfo.getMetadata().isEmpty()) { logger.warn("Newly resolved AmazonInfo is empty, skipping an update cycle"); } else if (!newInfo.equals(oldInfo)) { if (isBlank(newInfo.get(AmazonInfo.MetaDataKey.instanceId))) { logger.warn("instanceId is blank, skipping an update cycle"); return false; } else if (isBlank(newInfo.get(AmazonInfo.MetaDataKey.localIpv4))) { logger.warn("localIpv4 is blank, skipping an update cycle"); return false; } else { Set<String> newKeys = new HashSet<>(newInfo.getMetadata().keySet()); Set<String> oldKeys = new HashSet<>(oldInfo.getMetadata().keySet()); Set<String> union = new HashSet<>(newKeys); union.retainAll(oldKeys); newKeys.removeAll(union); oldKeys.removeAll(union); for (String key : newKeys) { logger.info("Adding new metadata {}={}", key, newInfo.getMetadata().get(key)); } for (String key : oldKeys) { logger.info("Removing old metadata {}={}", key, oldInfo.getMetadata().get(key)); } } return true; } return false; }
@Test public void testAmazonInfoNoUpdateIfEmpty() { AmazonInfo oldInfo = (AmazonInfo) instanceInfo.getDataCenterInfo(); AmazonInfo newInfo = new AmazonInfo(); assertThat(RefreshableAmazonInfoProvider.shouldUpdate(newInfo, oldInfo), is(false)); }
@Override public void visit(Entry entry) { if(Boolean.FALSE.equals(entry.getAttribute("allowed"))) return; if (containsSubmenu(entry)) addSubmenu(entry); else addActionItem(entry); }
@Test public void createsSubmenuWithoutAction() { Entry parentMenuEntry = new Entry(); final JMenu parentMenu = new JMenu(); new EntryAccessor().setComponent(parentMenuEntry, parentMenu); parentMenuEntry.addChild(menuEntry); menuEntry.addChild(actionEntry); menuActionGroupBuilder.visit(menuEntry); JMenu item = (JMenu)new EntryAccessor().getComponent(menuEntry); assertThat(item.getParent(), CoreMatchers.equalTo(parentMenu.getPopupMenu())); }
@Override public List<?> deserialize(final String topic, final byte[] bytes) { if (bytes == null) { return null; } try { final String recordCsvString = new String(bytes, StandardCharsets.UTF_8); final List<CSVRecord> csvRecords = CSVParser.parse(recordCsvString, csvFormat) .getRecords(); if (csvRecords.isEmpty()) { throw new SerializationException("No fields in record"); } final CSVRecord csvRecord = csvRecords.get(0); if (csvRecord == null || csvRecord.size() == 0) { throw new SerializationException("No fields in record."); } SerdeUtils.throwOnColumnCountMismatch(parsers.size(), csvRecord.size(), false, topic); final List<Object> values = new ArrayList<>(parsers.size()); final Iterator<Parser> pIt = parsers.iterator(); for (int i = 0; i < csvRecord.size(); i++) { final String value = csvRecord.get(i); final Parser parser = pIt.next(); final Object parsed = value == null || value.isEmpty() ? null : parser.parse(value); values.add(parsed); } return values; } catch (final Exception e) { throw new SerializationException("Error deserializing delimited", e); } }
@Test public void shouldThrowOnNonBase64Bytes() { // Given: KsqlDelimitedDeserializer deserializer = createDeserializer(persistenceSchema( column( "bytes", SqlTypes.BYTES ) )); final byte[] bytes = "a".getBytes(StandardCharsets.UTF_8); // When: final Exception e = assertThrows( SerializationException.class, () -> deserializer.deserialize("", bytes) ); // Then: assertThat(e.getCause().getMessage(), containsString("Value is not a valid Base64 encoded string: a")); }
private boolean mkdirs(ChannelSftp client, Path file, FsPermission permission) throws IOException { boolean created = true; Path workDir; try { workDir = new Path(client.pwd()); } catch (SftpException e) { throw new IOException(e); } Path absolute = makeAbsolute(workDir, file); String pathName = absolute.getName(); if (!exists(client, absolute)) { Path parent = absolute.getParent(); created = (parent == null || mkdirs(client, parent, FsPermission.getDefault())); if (created) { String parentDir = parent.toUri().getPath(); boolean succeeded = true; try { final String previousCwd = client.pwd(); client.cd(parentDir); client.mkdir(pathName); client.cd(previousCwd); } catch (SftpException e) { throw new IOException(String.format(E_MAKE_DIR_FORPATH, pathName, parentDir)); } created = created & succeeded; } } else if (isFile(client, absolute)) { throw new IOException(String.format(E_DIR_CREATE_FROMFILE, absolute)); } return created; }
@Test public void testMkDirs() throws IOException { Path path = new Path(localDir.toUri().getPath(), new Path(name.getMethodName(), "subdirectory")); sftpFs.mkdirs(path); assertTrue(localFs.exists(path)); assertTrue(localFs.getFileStatus(path).isDirectory()); assertThat( ((SFTPFileSystem) sftpFs).getConnectionPool().getLiveConnCount()) .isEqualTo(1); }
@Override public int initiateUpgrade(String appName, String fileName, boolean autoFinalize) throws IOException, YarnException { int result; try { Service service = loadAppJsonFromLocalFS(fileName, appName, null, null); if (autoFinalize) { service.setState(ServiceState.UPGRADING_AUTO_FINALIZE); } else { service.setState(ServiceState.UPGRADING); } String buffer = jsonSerDeser.toJson(service); ClientResponse response = getApiClient(getServicePath(appName)) .put(ClientResponse.class, buffer); result = processResponse(response); } catch (Exception e) { LOG.error("Failed to upgrade application: ", e); result = EXIT_EXCEPTION_THROWN; } return result; }
@Test void testInitiateServiceUpgrade() { String appName = "example-app"; String upgradeFileName = "target/test-classes/example-app.json"; try { int result = asc.initiateUpgrade(appName, upgradeFileName, false); assertEquals(EXIT_SUCCESS, result); } catch (IOException | YarnException e) { fail(); } }
public static int dayOfYear(Date date) { return DateTime.of(date).dayOfYear(); }
@Test public void dayOfYearTest() { final int dayOfYear = DateUtil.dayOfYear(DateUtil.parse("2020-01-01")); assertEquals(1, dayOfYear); final int lengthOfYear = DateUtil.lengthOfYear(2020); assertEquals(366, lengthOfYear); }
@Override @Nonnull public <T> Future<T> submit(@Nonnull Callable<T> task) { throwRejectedExecutionExceptionIfShutdown(); try { T result = task.call(); return new CompletedFuture<>(result, null); } catch (Exception e) { return new CompletedFuture<>(null, e); } }
@Test void testRejectedSubmitWithResult() { testRejectedExecutionException(testInstance -> testInstance.submit(() -> {}, null)); }
@Override public boolean isActive() { return isActive; }
@Test(timeOut = 30000, skipFailedInvocations = true) public void testCreateProducerBookieTimeout() throws Exception { resetChannel(); setChannelConnected(); // Delay the topic creation in a deterministic way CompletableFuture<Runnable> openFailedTopic = new CompletableFuture<>(); doAnswer(invocationOnMock -> { openFailedTopic.complete( () -> ((OpenLedgerCallback) invocationOnMock.getArguments()[2]).openLedgerComplete(ledgerMock, null)); return null; }).when(pulsarTestContext.getManagedLedgerFactory()) .asyncOpen(matches(".*fail.*"), any(ManagedLedgerConfig.class), any(OpenLedgerCallback.class), any(Supplier.class), any()); // In a create producer timeout from client side we expect to see this sequence of commands : // 1. create a failure producer which will timeout creation after 100msec // 2. close producer // 3. Recreate producer (triggered by reconnection logic) // 4. Wait till the timeout of 1, and create producer again. // These operations need to be serialized, to allow the last create producer to finally succeed // (There can be more create/close pairs in the sequence, depending on the client timeout String producerName = "my-producer"; ByteBuf createProducer1 = Commands.newProducer(failTopicName, 1 /* producer id */, 1 /* request id */, producerName, Collections.emptyMap(), false); channel.writeInbound(createProducer1); ByteBuf closeProducer = Commands.newCloseProducer(1 /* producer id */, 2 /* request id */); channel.writeInbound(closeProducer); ByteBuf createProducer2 = Commands.newProducer(successTopicName, 1 /* producer id */, 3 /* request id */, producerName, Collections.emptyMap(), false); channel.writeInbound(createProducer2); // Now the topic gets opened.. It will make 2nd producer creation successful openFailedTopic.get().run(); // Close succeeds Object response = getResponse(); assertEquals(response.getClass(), CommandSuccess.class); assertEquals(((CommandSuccess) response).getRequestId(), 2); // 2nd producer success as topic is opened response = getResponse(); assertEquals(response.getClass(), CommandProducerSuccess.class); assertEquals(((CommandProducerSuccess) response).getRequestId(), 3); // Wait till the failtopic timeout interval Thread.sleep(500); ByteBuf createProducer3 = Commands.newProducer(successTopicName, 1 /* producer id */, 4 /* request id */, producerName, Collections.emptyMap(), false); channel.writeInbound(createProducer3); // 3rd producer succeeds because 2nd is already connected response = getResponse(); assertEquals(response.getClass(), CommandProducerSuccess.class); assertEquals(((CommandProducerSuccess) response).getRequestId(), 4); Thread.sleep(500); // We should not receive response for 1st producer, since it was cancelled by the close assertTrue(channel.outboundMessages().isEmpty()); assertTrue(channel.isActive()); channel.finish(); }
public List<TabletCommitInfo> buildTabletCommitInfo() { return tasks.stream().map(CompactionTask::buildTabletCommitInfo).flatMap(List::stream).collect(Collectors.toList()); }
@Test public void testBuildTabletCommitInfo() { Database db = new Database(); Table table = new Table(Table.TableType.CLOUD_NATIVE); PhysicalPartition partition = new PhysicalPartitionImpl(0, "", 1, 2, null); CompactionJob job = new CompactionJob(db, table, partition, 10010, false); assertDoesNotThrow(() -> { job.buildTabletCommitInfo(); }); }
private void collectTomcatInformations(List<JavaInformations> javaInformationsList) throws IOException { int tomcatBusyThreads = 0; long bytesReceived = 0; long bytesSent = 0; boolean tomcatUsed = false; for (final JavaInformations javaInformations : javaInformationsList) { for (final TomcatInformations tomcatInformations : javaInformations .getTomcatInformationsList()) { tomcatBusyThreads = add(tomcatInformations.getCurrentThreadsBusy(), tomcatBusyThreads); bytesReceived = add(tomcatInformations.getBytesReceived(), bytesReceived); bytesSent = add(tomcatInformations.getBytesSent(), bytesSent); tomcatUsed = true; } } if (tomcatUsed) { // collecte des informations de Tomcat collectTomcatValues(tomcatBusyThreads, bytesReceived, bytesSent); } }
@Test public void testCollectTomcatInformations() throws JMException { final MBeanServer mBeanServer = MBeans.getPlatformMBeanServer(); final List<ObjectName> mBeans = new ArrayList<>(); try { mBeans.add(mBeanServer .registerMBean(new ThreadPool(), new ObjectName("Catalina:type=ThreadPool,name=jk-8009")) .getObjectName()); mBeans.add( mBeanServer .registerMBean(new GlobalRequestProcessor(), new ObjectName( "Catalina:type=GlobalRequestProcessor,name=jk-8009")) .getObjectName()); TomcatInformations.initMBeans(); final Collector collector = new Collector(TEST, List.of(new Counter("http", null))); // first time to initialize against NOT_A_NUMBER collector.collectWithoutErrors( Collections.singletonList(new JavaInformations(null, true))); // second time to add value collector.collectWithoutErrors( Collections.singletonList(new JavaInformations(null, true))); } finally { for (final ObjectName registeredMBean : mBeans) { mBeanServer.unregisterMBean(registeredMBean); } TomcatInformations.initMBeans(); } }
public T send() throws IOException { return web3jService.send(this, responseType); }
@Test public void testEthGetLogs() throws Exception { web3j.ethGetLogs( new EthFilter() .addSingleTopic( "0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b")) .send(); verifyResult( "{\"jsonrpc\":\"2.0\",\"method\":\"eth_getLogs\"," + "\"params\":[{\"topics\":[" + "\"0x000000000000000000000000a94f5374fce5edbc8e2a8697c15331677e6ebf0b\"]}]," + "\"id\":1}"); }
public static String countSql(String sourceSql, String dbType) { switch (dbType) { case "mysql": case "h2": case "oceanbase": case "oracle": case "dm": return sourceSql.replaceAll("(?i)(?<=select)(.*)(?=from)", " count(1) "); case "postgresql": case "sqlserver": int lastIndexOfOrderBy = sourceSql.toLowerCase().lastIndexOf("order by"); if (lastIndexOfOrderBy != -1) { return sourceSql.substring(0, lastIndexOfOrderBy).replaceAll("(?i)(?<=select)(.*)(?=from)", " count(1) "); } return sourceSql.replaceAll("(?i)(?<=select)(.*)(?=from)", " count(1) "); default: throw new NotSupportYetException("PageUtil not support this dbType:" + dbType); } }
@Test void testCountSql() { String sourceSql = "select * from test where a = 1"; String targetSql = "select count(1) from test where a = 1"; assertEquals(PageUtil.countSql(sourceSql, "mysql"), targetSql); assertEquals(PageUtil.countSql(sourceSql, "h2"), targetSql); assertEquals(PageUtil.countSql(sourceSql, "postgresql"), targetSql); assertEquals(PageUtil.countSql(sourceSql, "oceanbase"), targetSql); assertEquals(PageUtil.countSql(sourceSql, "dm"), targetSql); assertEquals(PageUtil.countSql(sourceSql, "oracle"), targetSql); assertEquals(PageUtil.countSql(sourceSql, "sqlserver"), targetSql); assertThrows(NotSupportYetException.class, () -> PageUtil.countSql(sourceSql, "xxx")); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() != 5 && data.size() != 7) { onInvalidDataReceived(device, data); return; } final int timeOffset = data.getIntValue(Data.FORMAT_UINT16_LE, 0); final int warningStatus = data.getIntValue(Data.FORMAT_UINT8, 2); final int calibrationTempStatus = data.getIntValue(Data.FORMAT_UINT8, 3); final int sensorStatus = data.getIntValue(Data.FORMAT_UINT8, 4); final boolean crcPresent = data.size() == 7; if (crcPresent) { final int actualCrc = CRC16.MCRF4XX(data.getValue(), 0, 5); final int expectedCrc = data.getIntValue(Data.FORMAT_UINT16_LE, 5); if (actualCrc != expectedCrc) { onContinuousGlucoseMonitorStatusReceivedWithCrcError(device, data); return; } } final CGMStatus status = new CGMStatus(warningStatus, calibrationTempStatus, sensorStatus); onContinuousGlucoseMonitorStatusChanged(device, status, timeOffset, crcPresent); }
@Test public void onContinuousGlucoseMonitorStatusChanged_noCrc() { final DataReceivedCallback callback = new CGMStatusDataCallback() { @Override public void onContinuousGlucoseMonitorStatusChanged(@NonNull final BluetoothDevice device, @NonNull final CGMStatus status, final int timeOffset, final boolean secured) { assertNotNull("Status present", status); assertTrue(status.sessionStopped); assertFalse(status.deviceBatteryLow); assertFalse(status.sensorTypeIncorrectForDevice); assertFalse(status.sensorMalfunction); assertFalse(status.deviceSpecificAlert); assertFalse(status.generalDeviceFault); assertTrue(status.timeSyncRequired); assertFalse(status.calibrationNotAllowed); assertFalse(status.calibrationRecommended); assertFalse(status.calibrationRequired); assertFalse(status.sensorTemperatureTooHigh); assertFalse(status.sensorTemperatureTooLow); assertTrue(status.sensorResultLowerThenPatientLowLevel); assertFalse(status.sensorResultHigherThenPatientHighLevel); assertFalse(status.sensorResultLowerThenHypoLevel); assertFalse(status.sensorResultHigherThenHyperLevel); assertFalse(status.sensorRateOfDecreaseExceeded); assertFalse(status.sensorRateOfIncreaseExceeded); assertFalse(status.sensorResultLowerThenDeviceCanProcess); assertFalse(status.sensorResultHigherThenDeviceCanProcess); assertEquals("Time offset", 6, timeOffset); assertFalse(secured); } @Override public void onContinuousGlucoseMonitorStatusReceivedWithCrcError(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct data reported as CRC error", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Correct data reported as invalid", 1, 2); } }; final MutableData data = new MutableData(new byte[5]); data.setValue(6, Data.FORMAT_UINT16_LE, 0); data.setValue(0x010101, Data.FORMAT_UINT24_LE, 2); callback.onDataReceived(null, data); }
public Zone zone() { return zone; }
@Test void testZone() { Zone zone = Zone.from("dev.us-west-1"); zone = Zone.from(zone.toString()); assertEquals(Environment.dev, zone.environment()); assertEquals("us-west-1", zone.region()); Zone sameZone = Zone.from("dev.us-west-1"); assertEquals(sameZone.hashCode(), zone.hashCode()); assertEquals(sameZone, zone); try { Zone.from("invalid"); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("A zone string must be on the form [environment].[region], but was 'invalid'", e.getMessage()); } try { Zone.from("invalid.us-west-1"); fail("Expected exception"); } catch (IllegalArgumentException e) { assertEquals("Invalid zone 'invalid.us-west-1': No environment named 'invalid'", e.getMessage()); } }
@Override public MergeAppend appendFile(DataFile file) { add(file); return this; }
@TestTemplate public void testIncludedPartitionSummaries() { table.updateProperties().set(TableProperties.WRITE_PARTITION_SUMMARY_LIMIT, "1").commit(); table.newFastAppend().appendFile(FILE_A).commit(); Set<String> partitionSummaryKeys = table.currentSnapshot().summary().keySet().stream() .filter(key -> key.startsWith(SnapshotSummary.CHANGED_PARTITION_PREFIX)) .collect(Collectors.toSet()); assertThat(partitionSummaryKeys).hasSize(1); assertThat(table.currentSnapshot().summary()) .containsEntry(SnapshotSummary.PARTITION_SUMMARY_PROP, "true") .containsEntry(SnapshotSummary.CHANGED_PARTITION_COUNT_PROP, "1") .containsEntry( SnapshotSummary.CHANGED_PARTITION_PREFIX + "data_bucket=0", "added-data-files=1,added-records=1,added-files-size=10"); }
public List<String> getAllDatabaseNames() { return get(databaseNamesCache, ""); }
@Test public void testGetAllDatabaseNames() { CachingHiveMetastore cachingHiveMetastore = new CachingHiveMetastore( metastore, executor, expireAfterWriteSec, refreshAfterWriteSec, 1000, false); List<String> databaseNames = cachingHiveMetastore.getAllDatabaseNames(); Assert.assertEquals(Lists.newArrayList("db1", "db2"), databaseNames); CachingHiveMetastore queryLevelCache = CachingHiveMetastore.createQueryLevelInstance(cachingHiveMetastore, 100); Assert.assertEquals(Lists.newArrayList("db1", "db2"), queryLevelCache.getAllDatabaseNames()); }
@Override public Serde.Deserializer deserializer(String topic, Serde.Target type) { return new Serde.Deserializer() { @SneakyThrows @Override public DeserializeResult deserialize(RecordHeaders headers, byte[] data) { try { UnknownFieldSet unknownFields = UnknownFieldSet.parseFrom(data); return new DeserializeResult(unknownFields.toString(), DeserializeResult.Type.STRING, Map.of()); } catch (Exception e) { throw new ValidationException(e.getMessage()); } } }; }
@Test void deserializeNullMessage() { var deserializer = serde.deserializer(DUMMY_TOPIC, Serde.Target.VALUE); assertThatThrownBy(() -> deserializer.deserialize(null, null)) .isInstanceOf(ValidationException.class) .hasMessageContaining("Cannot read the array length"); }
@Override public Integer clusterGetSlotForKey(byte[] key) { RFuture<Integer> f = executorService.readAsync((String)null, StringCodec.INSTANCE, RedisCommands.KEYSLOT, key); return syncFuture(f); }
@Test public void testClusterGetSlotForKey() { testInCluster(connection -> { Integer slot = connection.clusterGetSlotForKey("123".getBytes()); assertThat(slot).isNotNull(); }); }
@Override public boolean retryRequest(HttpResponse response, int executionCount, HttpContext ctx) { log.fine(() -> String.format("retryRequest(responseCode='%s', executionCount='%d', ctx='%s'", response.getStatusLine().getStatusCode(), executionCount, ctx)); HttpClientContext clientCtx = HttpClientContext.adapt(ctx); if (!predicate.test(response, clientCtx)) { log.fine(() -> String.format("Not retrying for '%s'", ctx)); return false; } if (executionCount > maxRetries) { log.fine(() -> String.format("Max retries exceeded for '%s'", ctx)); retryFailedConsumer.onRetryFailed(response, executionCount, clientCtx); return false; } Duration delay = delaySupplier.getDelay(executionCount); log.fine(() -> String.format("Retrying after %s for '%s'", delay, ctx)); retryInterval.set(delay.toMillis()); retryConsumer.onRetry(response, delay, executionCount, clientCtx); return true; }
@Test void does_not_retry_for_non_listed_exception() { DelayedResponseLevelRetryHandler handler = DelayedResponseLevelRetryHandler.Builder .withFixedDelay(Duration.ofSeconds(2), 2) .retryForStatusCodes(List.of(HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_BAD_GATEWAY)) .build(); HttpResponse response = createResponse(HttpStatus.SC_OK); HttpClientContext ctx = new HttpClientContext(); assertFalse(handler.retryRequest(response, 1, ctx)); }
synchronized ActivateWorkResult activateWorkForKey(ExecutableWork executableWork) { ShardedKey shardedKey = executableWork.work().getShardedKey(); Deque<ExecutableWork> workQueue = activeWork.getOrDefault(shardedKey, new ArrayDeque<>()); // This key does not have any work queued up on it. Create one, insert Work, and mark the work // to be executed. if (!activeWork.containsKey(shardedKey) || workQueue.isEmpty()) { workQueue.addLast(executableWork); activeWork.put(shardedKey, workQueue); incrementActiveWorkBudget(executableWork.work()); return ActivateWorkResult.EXECUTE; } // Check to see if we have this work token queued. Iterator<ExecutableWork> workIterator = workQueue.iterator(); while (workIterator.hasNext()) { ExecutableWork queuedWork = workIterator.next(); if (queuedWork.id().equals(executableWork.id())) { return ActivateWorkResult.DUPLICATE; } if (queuedWork.id().cacheToken() == executableWork.id().cacheToken()) { if (executableWork.id().workToken() > queuedWork.id().workToken()) { // Check to see if the queuedWork is active. We only want to remove it if it is NOT // currently active. if (!queuedWork.equals(workQueue.peek())) { workIterator.remove(); decrementActiveWorkBudget(queuedWork.work()); } // Continue here to possibly remove more non-active stale work that is queued. } else { return ActivateWorkResult.STALE; } } } // Queue the work for later processing. workQueue.addLast(executableWork); incrementActiveWorkBudget(executableWork.work()); return ActivateWorkResult.QUEUED; }
@Test public void testActivateWorkForKey_QUEUED() { ShardedKey shardedKey = shardedKey("someKey", 1L); // ActivateWork with the same shardedKey, but different workTokens. activeWorkState.activateWorkForKey(createWork(createWorkItem(1L, 1L, shardedKey))); ActivateWorkResult activateWorkResult = activeWorkState.activateWorkForKey(createWork(createWorkItem(2L, 1L, shardedKey))); assertEquals(ActivateWorkResult.QUEUED, activateWorkResult); }
public void put(K key, V value) { checkState( !isClosed, "Multimap user state is no longer usable because it is closed for %s", keysStateRequest.getStateKey()); Object keyStructuralValue = mapKeyCoder.structuralValue(key); pendingAdds.putIfAbsent(keyStructuralValue, KV.of(key, new ArrayList<>())); pendingAdds.get(keyStructuralValue).getValue().add(value); }
@Test public void testPut() throws Exception { FakeBeamFnStateClient fakeClient = new FakeBeamFnStateClient( ImmutableMap.of( createMultimapKeyStateKey(), KV.of(ByteArrayCoder.of(), singletonList(A1)), createMultimapValueStateKey(A1), KV.of(StringUtf8Coder.of(), asList("V1", "V2")))); MultimapUserState<byte[], String> userState = new MultimapUserState<>( Caches.noop(), fakeClient, "instructionId", createMultimapKeyStateKey(), ByteArrayCoder.of(), StringUtf8Coder.of()); Iterable<String> initValues = userState.get(A1); userState.put(A1, "V3"); assertArrayEquals(new String[] {"V1", "V2"}, Iterables.toArray(initValues, String.class)); assertArrayEquals( new String[] {"V1", "V2", "V3"}, Iterables.toArray(userState.get(A1), String.class)); userState.asyncClose(); assertThrows(IllegalStateException.class, () -> userState.put(A1, "V2")); }
@Override public void start(Callback<None> callback) { LOG.info("{} enabled", _printName); Callback<None> prepareWarmUpCallback = new Callback<None>() { @Override public void onError(Throwable e) { if (e instanceof TimeoutException) { LOG.info("{} hit timeout: {}ms. The WarmUp will continue in background", _printName, _warmUpTimeoutMillis); callback.onSuccess(None.none()); } else { LOG.error("{} failed to fetch dual read mode, continuing warmup.", _printName, e); } continueWarmUp(callback); } @Override public void onSuccess(None result) { continueWarmUp(callback); } }; _loadBalancer.start(new Callback<None>() { @Override public void onError(Throwable e) { callback.onError(e); } @Override public void onSuccess(None result) { _allStartTime = _timeSupplier.get(); _executorService.submit(() -> prepareWarmUp(prepareWarmUpCallback)); } }); }
@Test(timeOut = 10000, retryAnalyzer = ThreeRetries.class) public void testThrottling() throws InterruptedException { int NRequests = 100; createNServicesIniFiles(NRequests); TestLoadBalancer balancer = new TestLoadBalancer(50); AtomicInteger requestCount = balancer.getRequestCount(); LoadBalancer warmUpLoadBalancer = new WarmUpLoadBalancer(balancer, balancer, Executors.newSingleThreadScheduledExecutor(), _tmpdir.getAbsolutePath(), MY_SERVICES_FS, _FSBasedDownstreamServicesFetcher, WarmUpLoadBalancer.DEFAULT_SEND_REQUESTS_TIMEOUT_SECONDS, WarmUpLoadBalancer.DEFAULT_CONCURRENT_REQUESTS); FutureCallback<None> callback = new FutureCallback<>(); warmUpLoadBalancer.start(callback); boolean triggeredAtLeastOnce = false; while (!callback.isDone()) { triggeredAtLeastOnce = true; int currentConcurrentRequests = balancer.getRequestCount().get() - balancer.getCompletedRequestCount().get(); if (currentConcurrentRequests > WarmUpLoadBalancer.DEFAULT_CONCURRENT_REQUESTS) { Assert.fail("The concurrent requests (" + currentConcurrentRequests + ") are greater than the allowed (" + WarmUpLoadBalancer.DEFAULT_CONCURRENT_REQUESTS + ")"); } Thread.sleep(50); } Assert.assertTrue(triggeredAtLeastOnce); Assert.assertEquals(NRequests, requestCount.get()); }
public static Version of(int major, int minor) { if (major == UNKNOWN_VERSION && minor == UNKNOWN_VERSION) { return UNKNOWN; } else { return new Version(major, minor); } }
@Test(expected = AssertionError.class) @RequireAssertEnabled public void construct_withOverflowingMajor() { Version.of(Byte.MAX_VALUE + 1, 1); }
private void deleteWord(String word) { mCurrentDictionary.deleteWord(word); }
@Test public void testDeleteWord() { // adding a few words to the dictionary UserDictionary userDictionary = new UserDictionary(getApplicationContext(), "en"); userDictionary.loadDictionary(); userDictionary.addWord("hello", 1); userDictionary.addWord("you", 2); userDictionary.close(); UserDictionaryEditorFragment fragment = startEditorFragment(); TestRxSchedulers.drainAllTasks(); RecyclerView wordsRecyclerView = fragment.getView().findViewById(R.id.words_recycler_view); // http://stackoverflow.com/questions/27052866/android-robolectric-click-recyclerview-item wordsRecyclerView.measure(0, 0); wordsRecyclerView.layout(0, 0, 100, 10000); Assert.assertEquals( 3 /*two words, and one AddNew*/, wordsRecyclerView.getAdapter().getItemCount()); View helloRowView = wordsRecyclerView.findViewHolderForAdapterPosition(0).itemView; Assert.assertNotNull(helloRowView); View deleteButtonView = helloRowView.findViewById(R.id.delete_user_word); Assert.assertNotNull(deleteButtonView); TextView helloTextView = helloRowView.findViewById(R.id.word_view); Assert.assertNotNull(helloTextView); Assert.assertEquals("hello", helloTextView.getText().toString()); // deleting word Shadows.shadowOf(deleteButtonView).getOnClickListener().onClick(deleteButtonView); TestRxSchedulers.drainAllTasks(); Assert.assertEquals(2, wordsRecyclerView.getAdapter().getItemCount()); }
public String getJobName() { if (isBuilding()) { try { return buildLocator.split("/")[4]; } catch (ArrayIndexOutOfBoundsException e) { return null; } } return null; }
@Test public void shouldReturnNullTheJobName() { AgentBuildingInfo agentBuildingInfo = new AgentBuildingInfo("buildInfo", "foo"); assertNull(agentBuildingInfo.getJobName()); }
@Override public List<MessageTrack> messageTrackDetail( MessageExt msg) throws RemotingException, MQClientException, InterruptedException, MQBrokerException { return this.defaultMQAdminExtImpl.messageTrackDetail(msg); }
@Test public void testMessageTrackDetail() throws InterruptedException, RemotingException, MQClientException, MQBrokerException { MessageExt messageExt = new MessageExt(); messageExt.setMsgId("msgId"); messageExt.setTopic("unit-test"); List<MessageTrack> messageTrackList = defaultMQAdminExt.messageTrackDetail(messageExt); assertThat(messageTrackList.size()).isEqualTo(2); ConsumerConnection connection = new ConsumerConnection(); connection.setMessageModel(MessageModel.BROADCASTING); connection.setConsumeType(ConsumeType.CONSUME_PASSIVELY); HashSet<Connection> connections = new HashSet<>(); connections.add(new Connection()); connection.setConnectionSet(connections); when(mQClientAPIImpl.getConsumerConnectionList(anyString(), anyString(), anyLong())).thenReturn(connection); ConsumeStats consumeStats = new ConsumeStats(); when(mQClientAPIImpl.getConsumeStats(anyString(), anyString(), isNull(), anyLong())).thenReturn(consumeStats); List<MessageTrack> broadcastMessageTracks = defaultMQAdminExt.messageTrackDetail(messageExt); assertThat(broadcastMessageTracks.size()).isEqualTo(2); assertThat(broadcastMessageTracks.get(0).getTrackType()).isEqualTo(TrackType.CONSUME_BROADCASTING); }
@Override public Long zLexCount(byte[] key, org.springframework.data.domain.Range range) { String min = value(range.getLowerBound(), "-"); String max = value(range.getUpperBound(), "+"); return read(key, StringCodec.INSTANCE, ZLEXCOUNT, key, min, max); }
@Test public void testZLexCount() { StringRedisTemplate redisTemplate = new StringRedisTemplate(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); redisTemplate.boundZSetOps("test").add("1", 10); redisTemplate.boundZSetOps("test").add("2", 20); redisTemplate.boundZSetOps("test").add("3", 30); Long size = redisTemplate.boundZSetOps("test").lexCount(Range.closed("1", "2")); assertThat(size).isEqualTo(2); }
public void updateTimerState(final String eventName, long startTime, final boolean isPause) { try { SADataHelper.assertEventName(eventName); synchronized (mTrackTimer) { EventTimer eventTimer = mTrackTimer.get(eventName); if (eventTimer != null && eventTimer.isPaused() != isPause) { eventTimer.setTimerState(isPause, startTime); } } } catch (Exception e) { SALog.printStackTrace(e); } }
@Test public void updateTimerState() { mInstance.addEventTimer("EventTimer", new EventTimer(TimeUnit.SECONDS, 10000L)); mInstance.updateTimerState("EventTimer", 10000L,false); Assert.assertNotNull(mInstance.getEventTimer("EventTimer")); }
@Override public Set<Link> getIngressLinks(ConnectPoint connectPoint) { checkNotNull(connectPoint, CONNECT_POINT_NULL); return manager.getVirtualLinks(this.networkId()) .stream() .filter(link -> (connectPoint.equals(link.src()))) .collect(Collectors.toSet()); }
@Test(expected = NullPointerException.class) public void testGetIngressLinksByNullId() { manager.registerTenantId(TenantId.tenantId(tenantIdValue1)); VirtualNetwork virtualNetwork = manager.createVirtualNetwork(TenantId.tenantId(tenantIdValue1)); LinkService linkService = manager.get(virtualNetwork.id(), LinkService.class); // test the getIngressLinks() method with a null connect point. linkService.getIngressLinks(null); }
public static boolean isWebService(Optional<String> serviceName) { return serviceName.isPresent() && IS_PLAIN_HTTP_BY_KNOWN_WEB_SERVICE_NAME.containsKey( Ascii.toLowerCase(serviceName.get())); }
@Test public void isWebService_whenHttpAltService_returnsTrue() { assertThat( NetworkServiceUtils.isWebService( NetworkService.newBuilder().setServiceName("http-alt").build())) .isTrue(); }
@Override public long estimate() { final double raw = (1 / computeE()) * alpha() * m * m; return applyRangeCorrection(raw); }
@Test public void testAlpha_withMemoryFootprintOf128() { DenseHyperLogLogEncoder encoder = new DenseHyperLogLogEncoder(7); encoder.estimate(); }
@Override protected void verifyConditions(ScesimModelDescriptor scesimModelDescriptor, ScenarioRunnerData scenarioRunnerData, ExpressionEvaluatorFactory expressionEvaluatorFactory, Map<String, Object> requestContext) { DMNResult dmnResult = (DMNResult) requestContext.get(DMNScenarioExecutableBuilder.DMN_RESULT); List<DMNMessage> dmnMessages = dmnResult.getMessages(); for (ScenarioExpect output : scenarioRunnerData.getExpects()) { FactIdentifier factIdentifier = output.getFactIdentifier(); String decisionName = factIdentifier.getName(); DMNDecisionResult decisionResult = dmnResult.getDecisionResultByName(decisionName); if (decisionResult == null) { throw new ScenarioException("DMN execution has not generated a decision result with name " + decisionName); } for (FactMappingValue expectedResult : output.getExpectedResult()) { ExpressionIdentifier expressionIdentifier = expectedResult.getExpressionIdentifier(); FactMapping factMapping = scesimModelDescriptor.getFactMapping(factIdentifier, expressionIdentifier) .orElseThrow(() -> new IllegalStateException("Wrong expression, this should not happen")); ExpressionEvaluator expressionEvaluator = expressionEvaluatorFactory.getOrCreate(expectedResult); ScenarioResult scenarioResult = fillResult(expectedResult, () -> getSingleFactValueResult(factMapping, expectedResult, decisionResult, dmnMessages, expressionEvaluator), expressionEvaluator); scenarioRunnerData.addResult(scenarioResult); } } }
@Test public void verifyConditions_decisionResultContainsANull() { // test 2 - when decisionResult contains a null value skip the steps and just do the comparison (that should be false in this case) ScenarioRunnerData scenarioRunnerData = new ScenarioRunnerData(); scenarioRunnerData.addExpect(new ScenarioExpect(personFactIdentifier, List.of(firstNameExpectedValue))); when(dmnResultMock.getDecisionResultByName(anyString())).thenReturn(dmnDecisionResultMock); when(dmnDecisionResultMock.getEvaluationStatus()).thenReturn(DecisionEvaluationStatus.SUCCEEDED); runnerHelper.verifyConditions(simulation.getScesimModelDescriptor(), scenarioRunnerData, expressionEvaluatorFactory, requestContextMock); assertThat(scenarioRunnerData.getResults()).hasSize(1); assertThat(scenarioRunnerData.getResults().get(0).getResult()).isFalse(); }
public static JsonMapper validateJsonMapper(JsonMapper jsonMapper) { try { final String serializedJob = jsonMapper.serialize(getJobForTesting()); testTimeFields(serializedJob); testUseFieldsNotMethods(serializedJob); testUsePolymorphism(serializedJob); testCanConvertBackToJob(jsonMapper, serializedJob); return jsonMapper; } catch (Exception e) { throw new IllegalArgumentException("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.", e); } }
@Test void testInvalidJacksonJsonMapperNoPolymorphism() { assertThatThrownBy(() -> validateJsonMapper(new InvalidJacksonJsonMapper(new ObjectMapper() .registerModule(new JavaTimeModule()) .setDateFormat(new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ")) .setVisibility(PropertyAccessor.ALL, JsonAutoDetect.Visibility.NONE) .setVisibility(PropertyAccessor.FIELD, JsonAutoDetect.Visibility.ANY) )) ) .isInstanceOf(IllegalArgumentException.class) .hasMessage("The JsonMapper you provided cannot be used as it deserializes jobs in an incorrect way.") .hasRootCauseMessage("Polymorphism is not supported as no @class annotation is present with fully qualified name of the different Job states."); }
public static boolean isMapEmpty(Map<?, ?> map) { return (map == null || map.isEmpty()); }
@Test void testIsMapEmpty() { assertTrue(CollectionUtils.isMapEmpty(null)); assertTrue(CollectionUtils.isMapEmpty(Collections.emptyMap())); }
public OpenConfigConfigOfAssignmentHandler addAssignmentType( AssignmentTypeEnum assignmentType) { modelObject.assignmentType(assignmentType); return this; }
@Test public void testAddAssignmentType() { // test Handler OpenConfigConfigOfAssignmentHandler config = new OpenConfigConfigOfAssignmentHandler(parent); // call addAssignmentType config.addAssignmentType(AssignmentTypeEnum.LOGICAL_CHANNEL); // expected ModelObject DefaultConfig modelObject = new DefaultConfig(); modelObject.assignmentType(AssignmentTypeEnum.LOGICAL_CHANNEL); assertEquals("[NG]addAssignmentType:ModelObject(AssignmentType added) is not an expected one.\n", modelObject, config.getModelObject()); }
@Override public void createSecurityGroup(KubevirtSecurityGroup sg) { checkNotNull(sg, ERR_NULL_SG); checkArgument(!Strings.isNullOrEmpty(sg.id()), ERR_NULL_SG_ID); sgStore.createSecurityGroup(sg); log.info(String.format(MSG_SG, sg.id(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreateDuplicateSecurityGroup() { target.createSecurityGroup(sg1); target.createSecurityGroup(sg1); }
public long retrieveConsumerId(HttpServletRequest request) { Object value = request.getAttribute(CONSUMER_ID); try { return Long.parseLong(value.toString()); } catch (Throwable ex) { throw new IllegalStateException("No consumer id!", ex); } }
@Test(expected = IllegalStateException.class) public void testRetrieveConsumerIdWithConsumerIdInvalid() throws Exception { String someInvalidConsumerId = "abc"; when(request.getAttribute(ConsumerAuthUtil.CONSUMER_ID)).thenReturn(someInvalidConsumerId); consumerAuthUtil.retrieveConsumerId(request); }
@Override public void open() throws Exception { super.open(); final String operatorID = getRuntimeContext().getOperatorUniqueID(); this.workerPool = ThreadPools.newWorkerPool("iceberg-worker-pool-" + operatorID, workerPoolSize); }
@TestTemplate public void testMaxContinuousEmptyCommits() throws Exception { table.updateProperties().set(MAX_CONTINUOUS_EMPTY_COMMITS, "3").commit(); JobID jobId = new JobID(); long checkpointId = 0; long timestamp = 0; try (OneInputStreamOperatorTestHarness<WriteResult, Void> harness = createStreamSink(jobId)) { harness.setup(); harness.open(); assertSnapshotSize(0); for (int i = 1; i <= 9; i++) { harness.snapshot(++checkpointId, ++timestamp); harness.notifyOfCompletedCheckpoint(checkpointId); assertSnapshotSize(i / 3); } } }
public boolean isRepeated() { return repeated; }
@Test public void isRepeated() { SAExposureConfig saExposureConfig = new SAExposureConfig(1,1,true); saExposureConfig.setAreaRate(2); assertTrue(saExposureConfig.isRepeated()); }
public static MappingRuleAction createUpdateDefaultAction(String queue) { return new VariableUpdateAction(DEFAULT_QUEUE_VARIABLE, queue); }
@Test public void testVariableUpdateAction() { VariableContext variables = new VariableContext(); variables.put("%default", "root.default"); variables.put("%immutable", "immutable"); variables.put("%empty", ""); variables.put("%null", null); variables.put("%sub", "xxx"); variables.setImmutables("%immutable"); MappingRuleAction updateDefaultManual = new MappingRuleActions.VariableUpdateAction("%default", "root.%sub"); MappingRuleAction updateDefaultHelper = MappingRuleActions.createUpdateDefaultAction("root.%sub%sub"); MappingRuleAction updateImmutable = new MappingRuleActions.VariableUpdateAction("%immutable", "changed"); MappingRuleAction updateEmpty = new MappingRuleActions.VariableUpdateAction("%empty", "something"); MappingRuleAction updateNull = new MappingRuleActions.VariableUpdateAction("%null", "non-null"); MappingRuleResult result; result = updateDefaultManual.execute(variables); assertSkipResult(result); assertEquals("root.xxx", variables.get("%default")); result = updateDefaultHelper.execute(variables); assertSkipResult(result); assertEquals("root.xxxxxx", variables.get("%default")); result = updateEmpty.execute(variables); assertSkipResult(result); assertEquals("something", variables.get("%empty")); result = updateNull.execute(variables); assertSkipResult(result); assertEquals("non-null", variables.get("%null")); try { updateImmutable.execute(variables); fail("Should've failed with exception"); } catch (Exception e){ assertTrue(e instanceof IllegalStateException); } }
public static String from(Query query) { AbstractProducedQuery abstractProducedQuery = query.unwrap(AbstractProducedQuery.class); String[] sqls = abstractProducedQuery .getProducer() .getFactory() .getQueryPlanCache() .getHQLQueryPlan(abstractProducedQuery.getQueryString(), false, Collections.emptyMap()) .getSqlStrings(); return sqls.length > 0 ? sqls[0] : null; }
@Test public void testJPQL() { doInJPA(entityManager -> { Query jpql = entityManager .createQuery( "select " + " YEAR(p.createdOn) as year, " + " count(p) as postCount " + "from " + " Post p " + "group by " + " YEAR(p.createdOn)", Tuple.class); String sql = SQLExtractor.from(jpql); assertNotNull(sql); LOGGER.info( "The JPQL query: [\n{}\n]\ngenerates the following SQL query: [\n{}\n]", jpql.unwrap(org.hibernate.query.Query.class).getQueryString(), sql ); }); }
@Override public void readFully(byte[] b, int off, int len) throws EOFException { if (len < 0) { throw new IndexOutOfBoundsException("len is negative: " + len); } if (off < 0 || off + len > b.length) { throw new IndexOutOfBoundsException("off=" + off + ", len=" + len + ", b.length=" + b.length); } // the javadoc of DataInput.readFully(byte[], int, int) says that the method will block until the requested // number of bytes has been read, end of file is detected, or an exception is thrown. // So being formal, we should modify the buffer even if we know we are going to reach EOF. boolean eof = availableLong() < len; _dataBuffer.copyTo(_currentOffset, b, off, len); _currentOffset += len; if (eof) { throw new EOFException(); } }
@Test void testReadFully() throws IOException { byte[] buffer = new byte[BUFFER_SIZE]; _dataBufferPinotInputStream.readFully(buffer); for (int i = 0; i < BUFFER_SIZE; i++) { assertEquals(buffer[i], _byteBuffer.get(i)); } assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), BUFFER_SIZE); }
@Override public Optional<HouseTable> findById(HouseTablePrimaryKey houseTablePrimaryKey) { return getHtsRetryTemplate( Arrays.asList( HouseTableRepositoryStateUnkownException.class, IllegalStateException.class)) .execute( context -> apiInstance .getUserTable( houseTablePrimaryKey.getDatabaseId(), houseTablePrimaryKey.getTableId()) .map(EntityResponseBodyUserTable::getEntity) .map(houseTableMapper::toHouseTable) .switchIfEmpty(Mono.empty()) .onErrorResume(this::handleHtsHttpError) .blockOptional(Duration.ofSeconds(REQUEST_TIMEOUT_SECONDS))); }
@Test public void testRepoFindById() { EntityResponseBodyUserTable response = new EntityResponseBodyUserTable(); response.entity(houseTableMapper.toUserTable(HOUSE_TABLE)); mockHtsServer.enqueue( new MockResponse() .setResponseCode(200) .setBody((new Gson()).toJson(response)) .addHeader("Content-Type", "application/json")); HouseTable result = htsRepo .findById( HouseTablePrimaryKey.builder() .tableId(HOUSE_TABLE.getTableId()) .databaseId(HOUSE_TABLE.getDatabaseId()) .build()) .get(); Assertions.assertEquals(result.getTableId(), HOUSE_TABLE.getTableId()); Assertions.assertEquals(result.getDatabaseId(), HOUSE_TABLE.getDatabaseId()); Assertions.assertEquals(result.getTableLocation(), HOUSE_TABLE.getTableLocation()); Assertions.assertEquals(result.getTableVersion(), HOUSE_TABLE.getTableVersion()); }
@Override public GroupVersion groupVersion() { return PublicApiUtils.groupVersion(new Post()); }
@Test void groupVersion() { GroupVersion groupVersion = endpoint.groupVersion(); assertThat(groupVersion.toString()).isEqualTo("api.content.halo.run/v1alpha1"); }
public static MetricsReporter loadMetricsReporter(Map<String, String> properties) { String impl = properties.get(CatalogProperties.METRICS_REPORTER_IMPL); if (impl == null) { return LoggingMetricsReporter.instance(); } LOG.info("Loading custom MetricsReporter implementation: {}", impl); DynConstructors.Ctor<MetricsReporter> ctor; try { ctor = DynConstructors.builder(MetricsReporter.class) .loader(CatalogUtil.class.getClassLoader()) .impl(impl) .buildChecked(); } catch (NoSuchMethodException e) { throw new IllegalArgumentException( String.format("Cannot initialize MetricsReporter, missing no-arg constructor: %s", impl), e); } MetricsReporter reporter; try { reporter = ctor.newInstance(); } catch (ClassCastException e) { throw new IllegalArgumentException( String.format( "Cannot initialize MetricsReporter, %s does not implement MetricsReporter.", impl), e); } reporter.initialize(properties); return reporter; }
@Test public void loadCustomMetricsReporter_noArg() { Map<String, String> properties = Maps.newHashMap(); properties.put("key", "val"); properties.put( CatalogProperties.METRICS_REPORTER_IMPL, TestMetricsReporterDefault.class.getName()); MetricsReporter metricsReporter = CatalogUtil.loadMetricsReporter(properties); assertThat(metricsReporter).isInstanceOf(TestMetricsReporterDefault.class); }
@GetMapping("/apps/search/by-appid-or-name") public PageDTO<App> search(@RequestParam(value = "query", required = false) String query, Pageable pageable) { if (StringUtils.isEmpty(query)) { return appService.findAll(pageable); } //search app PageDTO<App> appPageDTO = appService.searchByAppIdOrAppName(query, pageable); if (appPageDTO.hasContent()) { return appPageDTO; } if (!portalConfig.supportSearchByItem()) { return new PageDTO<>(Lists.newLinkedList(), pageable, 0); } //search item return searchByItem(query, pageable); }
@Test public void testSearchItem() { String query = "timeout"; PageRequest request = PageRequest.of(0, 20); PageDTO<App> apps = new PageDTO<>(Lists.newLinkedList(), request, 0); PageDTO<NamespaceDTO> devNamespaces = genPageNamespace(10, request, 20); PageDTO<NamespaceDTO> fatNamespaces = genPageNamespace(15, request, 30); when(appService.searchByAppIdOrAppName(query, request)).thenReturn(apps); when(portalConfig.supportSearchByItem()).thenReturn(true); when(portalSettings.getActiveEnvs()).thenReturn(Lists.newArrayList(Env.DEV, Env.FAT)); when(namespaceService.findNamespacesByItem(Env.DEV, query, request)).thenReturn(devNamespaces); when(namespaceService.findNamespacesByItem(Env.FAT, query, request)).thenReturn(fatNamespaces); PageDTO<App> result = searchController.search(query, request); Assert.assertTrue(result.hasContent()); Assert.assertEquals(25, result.getContent().size()); Assert.assertEquals(30, result.getTotal()); verify(appService, times(0)).findAll(request); verify(appService, times(1)).searchByAppIdOrAppName(query, request); verify(namespaceService).findNamespacesByItem(Env.DEV, query, request); verify(namespaceService).findNamespacesByItem(Env.FAT, query, request); }
@Override public List<Map<String, String>> taskConfigs(int maxTasks) { if (knownConsumerGroups == null) { // If knownConsumerGroup is null, it means the initial loading has not finished. // An exception should be thrown to trigger the retry behavior in the framework. log.debug("Initial consumer loading has not yet completed"); throw new RetriableException("Timeout while loading consumer groups."); } // if the replication is disabled, known consumer group is empty, or checkpoint emission is // disabled by setting 'emit.checkpoints.enabled' to false, the interval of checkpoint emission // will be negative and no 'MirrorCheckpointTask' will be created if (!config.enabled() || knownConsumerGroups.isEmpty() || config.emitCheckpointsInterval().isNegative()) { return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); List<List<String>> groupsPartitioned = ConnectorUtils.groupPartitions(new ArrayList<>(knownConsumerGroups), numTasks); return IntStream.range(0, numTasks) .mapToObj(i -> config.taskConfigForConsumerGroups(groupsPartitioned.get(i), i)) .collect(Collectors.toList()); }
@Test public void testNoConsumerGroup() { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); MirrorCheckpointConnector connector = new MirrorCheckpointConnector(new HashSet<>(), config); List<Map<String, String>> output = connector.taskConfigs(1); // expect no task will be created assertEquals(0, output.size(), "ConsumerGroup shouldn't exist"); }
protected String parseStringValue(String strVal, Set<String> visitedPlaceholders) { StringBuffer buf = new StringBuffer(strVal); int startIndex = strVal.indexOf(DEFAULT_PLACEHOLDER_PREFIX); while (startIndex != -1) { int endIndex = findPlaceholderEndIndex(buf, startIndex); if (endIndex != -1) { String placeholder = buf.substring(startIndex + DEFAULT_PLACEHOLDER_PREFIX.length(), endIndex); if (!visitedPlaceholders.add(placeholder)) { throw new SeataRuntimeException(ErrorCode.ERR_CONFIG, "Duplicate placeholders exist '" + placeholder + "' in bundle."); } placeholder = parseStringValue(placeholder, visitedPlaceholders); try { String propVal = resolvePlaceholder(placeholder); if (propVal != null) { propVal = parseStringValue(propVal, visitedPlaceholders); buf.replace(startIndex, endIndex + DEFAULT_PLACEHOLDER_SUFFIX.length(), propVal); startIndex = buf.indexOf(DEFAULT_PLACEHOLDER_PREFIX, startIndex + propVal.length()); } else { throw new SeataRuntimeException(ErrorCode.ERR_CONFIG, "Could not resolve placeholder '" + placeholder + "'"); } } catch (Exception ex) { throw new SeataRuntimeException(ErrorCode.ERR_CONFIG, "Could not resolve placeholder '" + placeholder + "'"); } visitedPlaceholders.remove(placeholder); } else { startIndex = -1; } } return buf.toString(); }
@Test void parseStringValue() { ResourceBundleUtil resourceBundleUtil = ResourceBundleUtil.getInstance(); String strVal = "str val without placeholder"; String parseValue = resourceBundleUtil.parseStringValue(strVal, new HashSet<>()); Assertions.assertEquals(strVal, parseValue); strVal = "str val without placeholder ${"; parseValue = resourceBundleUtil.parseStringValue(strVal, new HashSet<>()); Assertions.assertEquals(strVal, parseValue); strVal = "str val without placeholder }"; parseValue = resourceBundleUtil.parseStringValue(strVal, new HashSet<>()); Assertions.assertEquals(strVal, parseValue); final String strValWithEmptyPlaceHolder = "str val with placeholder ${}"; Assertions.assertThrows(SeataRuntimeException.class, new Executable() { @Override public void execute() throws Throwable { resourceBundleUtil.parseStringValue(strValWithEmptyPlaceHolder, new HashSet<>()); } }, "Could not resolve placeholder 'str val with placeholder ${}'"); String strValWithPlaceHolder = "str val with placeholder ${ERR_CONFIG}"; Set<String> holderSet = new HashSet<>(); parseValue = resourceBundleUtil.parseStringValue(strValWithPlaceHolder, holderSet); Assertions.assertEquals("str val with placeholder config error, {0}", parseValue); Assertions.assertEquals(0, holderSet.size()); String multiSamePlaceHolder = "str val with placeholder ${ERR_CONFIG},${ERR_CONFIG}"; parseValue = resourceBundleUtil.parseStringValue(multiSamePlaceHolder, holderSet); Assertions.assertEquals("str val with placeholder config error, {0},config error, {0}", parseValue); final String strValWithEmptyPlaceHolderValue = "str val with placeholder ${ERR_NOT_EXIST}"; Assertions.assertDoesNotThrow(new Executable() { @Override public void execute() throws Throwable { Set<String> placeholderSet = new HashSet<>(); resourceBundleUtil.parseStringValue(strValWithEmptyPlaceHolderValue, placeholderSet); Assertions.assertEquals(0, placeholderSet.size()); } }); final String strValWithNestPlaceHolderValue = "str val with placeholder ${${${ERROR_LOOP}}}"; Set<String> placeholderSet = new HashSet<>(); parseValue = resourceBundleUtil.parseStringValue(strValWithNestPlaceHolderValue, new HashSet<>()); Assertions.assertEquals("str val with placeholder ERROR_LOOP", parseValue); Assertions.assertEquals(0, placeholderSet.size()); String strValWithNestPlaceHolder = "str val with placeholder ${${ERR_NEST2}}"; parseValue = resourceBundleUtil.parseStringValue(strValWithNestPlaceHolder, new HashSet<>()); Assertions.assertEquals("str val with placeholder ERR NEST TEST", parseValue); Assertions.assertEquals(0, placeholderSet.size()); }
public String resolve(String ensName) { if (Strings.isBlank(ensName) || (ensName.trim().length() == 1 && ensName.contains("."))) { return null; } try { if (isValidEnsName(ensName, addressLength)) { OffchainResolverContract resolver = obtainOffchainResolver(ensName); boolean supportWildcard = resolver.supportsInterface(EnsUtils.ENSIP_10_INTERFACE_ID).send(); byte[] nameHash = NameHash.nameHashAsBytes(ensName); String resolvedName; if (supportWildcard) { String dnsEncoded = NameHash.dnsEncode(ensName); String addrFunction = resolver.addr(nameHash).encodeFunctionCall(); String lookupDataHex = resolver.resolve( Numeric.hexStringToByteArray(dnsEncoded), Numeric.hexStringToByteArray(addrFunction)) .send(); resolvedName = resolveOffchain(lookupDataHex, resolver, LOOKUP_LIMIT); } else { try { resolvedName = resolver.addr(nameHash).send(); } catch (Exception e) { throw new RuntimeException("Unable to execute Ethereum request: ", e); } } if (!WalletUtils.isValidAddress(resolvedName)) { throw new EnsResolutionException( "Unable to resolve address for name: " + ensName); } else { return resolvedName; } } else { return ensName; } } catch (Exception e) { throw new EnsResolutionException(e); } }
@Test public void testResolveEnsNameEmptyOrDot() throws Exception { assertNull(ensResolver.resolve(" ")); assertNull(ensResolver.resolve("")); assertNull(ensResolver.resolve(".")); assertNull(ensResolver.resolve(" . ")); }
public static InstrumentedExecutorService newFixedThreadPool(int nThreads, MetricRegistry registry, String name) { return new InstrumentedExecutorService(Executors.newFixedThreadPool(nThreads), registry, name); }
@Test public void testNewFixedThreadPoolWithThreadFactoryAndName() throws Exception { final ExecutorService executorService = InstrumentedExecutors.newFixedThreadPool(2, defaultThreadFactory, registry, "xs"); executorService.submit(new NoopRunnable()); assertThat(registry.meter("xs.submitted").getCount()).isEqualTo(1L); final Field delegateField = InstrumentedExecutorService.class.getDeclaredField("delegate"); delegateField.setAccessible(true); final ThreadPoolExecutor delegate = (ThreadPoolExecutor) delegateField.get(executorService); assertThat(delegate.getCorePoolSize()).isEqualTo(2); assertThat(delegate.getMaximumPoolSize()).isEqualTo(2); assertThat(delegate.getThreadFactory()).isSameAs(defaultThreadFactory); executorService.shutdown(); }
public static Criterion matchTcpFlags(int flags) { return new TcpFlagsCriterion(flags); }
@Test public void testMatchTcpFlagsMethod() { Criterion matchTcpFlag = Criteria.matchTcpFlags(tcpFlags1); TcpFlagsCriterion tcpFlagsCriterion = checkAndConvert(matchTcpFlag, Criterion.Type.TCP_FLAGS, TcpFlagsCriterion.class); assertThat(tcpFlagsCriterion.flags(), is(equalTo(tcpFlags1))); }
@Override public V fetch(final K key, final long time) { return getValueOrNull(inner.fetch(key, time)); }
@Test public void shouldReturnPlainKeyValuePairsOnSingleKeyFetchInstantParameters() { when(mockedWindowTimestampIterator.next()) .thenReturn(KeyValue.pair(21L, ValueAndTimestamp.make("value1", 22L))) .thenReturn(KeyValue.pair(42L, ValueAndTimestamp.make("value2", 23L))); when(mockedWindowTimestampStore.fetch("key1", Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L))) .thenReturn(mockedWindowTimestampIterator); final WindowStoreIterator<String> iterator = readOnlyWindowStoreFacade.fetch("key1", Instant.ofEpochMilli(21L), Instant.ofEpochMilli(42L)); assertThat(iterator.next(), is(KeyValue.pair(21L, "value1"))); assertThat(iterator.next(), is(KeyValue.pair(42L, "value2"))); }
private void send(ByteBuffer buffer) throws IOException { if (sslChannel != null) { sslChannel.write(buffer); } else { realNetSend(buffer); } isSend = true; }
@Test public void testSend() throws IOException { // mock new Expectations() { { channel.write((ByteBuffer) any); minTimes = 0; result = new Delegate() { int fakeWrite(ByteBuffer buffer) { int writeLen = 0; writeLen += buffer.remaining(); buffer.position(buffer.limit()); return writeLen; } }; } }; MysqlChannel channel1 = new MysqlChannel(channel); ByteBuffer buf = ByteBuffer.allocate(1000); channel1.sendOnePacket(buf); buf = ByteBuffer.allocate(0xffffff0); channel1.sendOnePacket(buf); }
@Override public SchemaKTable<K> selectKey( final FormatInfo valueFormat, final List<Expression> keyExpression, final Optional<KeyFormat> forceInternalKeyFormat, final Stacker contextStacker, final boolean forceRepartition ) { final boolean repartitionNeeded = repartitionNeeded(keyExpression); final boolean keyFormatChange = forceInternalKeyFormat.isPresent() && !forceInternalKeyFormat.get().equals(keyFormat); if (!forceRepartition && !keyFormatChange && !repartitionNeeded) { return this; } if (schema.key().size() > 1) { // let's throw a better error message in the case of multi-column tables throw new UnsupportedOperationException("Cannot repartition a TABLE source. If this is " + "a join, joins on tables with multiple columns is not yet supported."); } // Table repartitioning is only supported for internal use in enabling joins // where we know that the key will be semantically equivalent, but may be serialized // differently (thus ensuring all keys are routed to the same partitions) if (repartitionNeeded) { throw new UnsupportedOperationException("Cannot repartition a TABLE source. " + "If this is a join, make sure that the criteria uses the TABLE's key column " + Iterables.getOnlyElement(schema.key()).name().text() + " instead of " + keyExpression); } if (keyFormat.isWindowed()) { final String errorMsg = "Implicit repartitioning of windowed sources is not supported. " + "See https://github.com/confluentinc/ksql/issues/4385."; final String additionalMsg = forceRepartition ? " As a result, ksqlDB does not support joins on windowed sources with " + "Schema-Registry-enabled key formats (AVRO, JSON_SR, PROTOBUF) at this time. " + "Please repartition your sources to use a different key format before performing " + "the join." : ""; throw new KsqlException(errorMsg + additionalMsg); } final KeyFormat newKeyFormat = SerdeFeaturesFactory.sanitizeKeyFormat( forceInternalKeyFormat.orElse(keyFormat), toSqlTypes(keyExpression), false // logical schema changes are not supported ); final ExecutionStep<KTableHolder<K>> step = ExecutionStepFactory.tableSelectKey( contextStacker, sourceTableStep, InternalFormats.of(newKeyFormat, valueFormat), keyExpression ); return new SchemaKTable<>( step, resolveSchema(step), newKeyFormat, ksqlConfig, functionRegistry ); }
@Test public void shouldFailSelectKeyForceRepartitionOnNonKeyColumn() { // Given: final String selectQuery = "SELECT col0, col2, col3 FROM test2 WHERE col0 > 100 EMIT CHANGES;"; final PlanNode logicalPlan = buildLogicalPlan(selectQuery); initialSchemaKTable = buildSchemaKTableFromPlan(logicalPlan); // When: final UnsupportedOperationException e = assertThrows( UnsupportedOperationException.class, () -> initialSchemaKTable.selectKey( valueFormat.getFormatInfo(), ImmutableList.of(new UnqualifiedColumnReferenceExp(ColumnName.of("COL1"))), Optional.empty(), childContextStacker, true )); // Then: assertThat(e.getMessage(), containsString("Cannot repartition a TABLE source.")); }
public Flowable<ScoredEntry<V>> entryIterator() { return entryScanIteratorReactive(null, 10); }
@Test public void testEntryIterator() { RScoredSortedSetRx<String> set = redisson.getScoredSortedSet("simple"); sync(set.add(1.1, "v1")); sync(set.add(1.2, "v2")); sync(set.add(1.3, "v3")); Iterator<ScoredEntry<String>> iter = toIterator(set.entryIterator()); assertThat(iter).toIterable().containsExactly(new ScoredEntry<>(1.1, "v1"), new ScoredEntry<>(1.2, "v2"), new ScoredEntry<>(1.3, "v3")); }
@Override public ContinuousEnumerationResult planSplits(IcebergEnumeratorPosition lastPosition) { table.refresh(); if (lastPosition != null) { return discoverIncrementalSplits(lastPosition); } else { return discoverInitialSplits(); } }
@Test public void testIncrementalFromSnapshotTimestampWithEmptyTable() { ScanContext scanContextWithInvalidSnapshotId = ScanContext.builder() .startingStrategy(StreamingStartingStrategy.INCREMENTAL_FROM_SNAPSHOT_TIMESTAMP) .startSnapshotTimestamp(1L) .build(); ContinuousSplitPlannerImpl splitPlanner = new ContinuousSplitPlannerImpl( TABLE_RESOURCE.tableLoader().clone(), scanContextWithInvalidSnapshotId, null); assertThatThrownBy(() -> splitPlanner.planSplits(null)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot find a snapshot after: 1"); }
public Exception getException() { if (exception != null) return exception; try { final Class<? extends Exception> exceptionClass = ReflectionUtils.toClass(getExceptionType()); if (getExceptionCauseType() != null) { final Class<? extends Exception> exceptionCauseClass = ReflectionUtils.toClass(getExceptionCauseType()); final Exception exceptionCause = getExceptionCauseMessage() != null ? ReflectionUtils.newInstanceCE(exceptionCauseClass, getExceptionCauseMessage()) : ReflectionUtils.newInstanceCE(exceptionCauseClass); exceptionCause.setStackTrace(new StackTraceElement[]{}); return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage(), exceptionCause) : ReflectionUtils.newInstanceCE(exceptionClass, exceptionCause); } else { return getExceptionMessage() != null ? ReflectionUtils.newInstanceCE(exceptionClass, getExceptionMessage()) : ReflectionUtils.newInstanceCE(exceptionClass); } } catch (ReflectiveOperationException e) { throw new IllegalStateException("Could not reconstruct exception for class " + getExceptionType() + " and message " + getExceptionMessage(), e); } }
@Test void getExceptionWithoutMessage() { final FailedState failedState = new FailedState("JobRunr message", new CustomException()); assertThat(failedState.getException()) .isInstanceOf(CustomException.class) .hasMessage(null); }
public static String fix(final String raw) { if ( raw == null || "".equals( raw.trim() )) { return raw; } MacroProcessor macroProcessor = new MacroProcessor(); macroProcessor.setMacros( macros ); return macroProcessor.parse( raw ); }
@Test public void testAssert2() { final String raw = "some code; insert( new String(\"foo\") );\n More();"; final String result = "some code; drools.insert( new String(\"foo\") );\n More();"; assertEqualsIgnoreWhitespace( result, KnowledgeHelperFixerTest.fixer.fix( raw ) ); }