focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public UniVocityFixedDataFormat setFieldLengths(int[] fieldLengths) { this.fieldLengths = fieldLengths; return this; }
@Test public void shouldConfigureNumberOfRecordsToRead() { UniVocityFixedDataFormat dataFormat = new UniVocityFixedDataFormat() .setFieldLengths(new int[] { 1, 2, 3 }) .setNumberOfRecordsToRead(42); assertEquals(Integer.valueOf(42), dataFormat.getNumberOfRecordsToRead()); assertEquals(42, dataFormat.createAndConfigureParserSettings().getNumberOfRecordsToRead()); }
public static String getId(DMNElementReference er) { String href = er.getHref(); if (href.startsWith("#")) { return href.substring(1); } else { Definitions rootElement = getRootElement(er); String toRemove = String.format("%s#", rootElement.getNamespace()); return href.replace(toRemove, ""); } }
@Test void getId() { String localPart = "reference"; DMNElementReference elementReference = new TDMNElementReference(); elementReference.setHref(String.format("%s#%s", nameSpace, localPart)); elementReference.setParent(parent); String retrieved = DMNCompilerImpl.getId(elementReference); assertThat(retrieved).isNotNull().isEqualTo(localPart); String expected = String.format("%s#%s", "http://a-different-namespace", localPart); elementReference.setHref(expected); retrieved = DMNCompilerImpl.getId(elementReference); assertThat(retrieved).isNotNull().isEqualTo(expected); }
public static <I> Builder<I> foreach(Iterable<I> items) { return new Builder<>(requireNonNull(items, "items")); }
@Test public void testRevertAllSuppressed() throws Throwable { CounterTask failLast = new CounterTask("task", ITEM_COUNT, Item::commit); assertFailed(builder() .suppressExceptions() .stopOnFailure() .revertWith(reverter) .abortWith(aborter) .onFailure(failures), failLast); failLast.assertInvoked("success", ITEM_COUNT); int abCount = aborter.getCount(); int revCount = reverter.getCount(); assertEquals(ITEM_COUNT, 1 + abCount + revCount); // identify which task failed from the set int failing = failures.getItem().id; // all committed were reverted items.stream() .filter(i -> i.id != failing) .filter(i -> i.committed) .forEach(Item::assertReverted); items.stream() .filter(i -> i.id != failing) .filter(i -> !i.committed) .forEach(Item::assertAborted); // all reverted items are committed items.stream().filter(i -> i.reverted) .forEach(Item::assertCommitted); // only one failure was triggered failures.assertInvoked("failure event", 1); }
static Properties resolveProducerProperties(Map<String, String> options, Object keySchema, Object valueSchema) { Properties properties = from(options); withSerdeProducerProperties(true, options, keySchema, properties); withSerdeProducerProperties(false, options, valueSchema, properties); return properties; }
@Test public void test_producerProperties_avro() { // key assertThat(PropertiesResolver.resolveProducerProperties(Map.of( OPTION_KEY_FORMAT, AVRO_FORMAT ), DUMMY_SCHEMA, null)).containsExactlyInAnyOrderEntriesOf(Map.of( KEY_SERIALIZER, HazelcastKafkaAvroSerializer.class.getCanonicalName(), OPTION_KEY_AVRO_SCHEMA, DUMMY_SCHEMA )); // value assertThat(PropertiesResolver.resolveProducerProperties(Map.of( OPTION_KEY_FORMAT, UNKNOWN_FORMAT, OPTION_VALUE_FORMAT, AVRO_FORMAT ), null, DUMMY_SCHEMA)).containsExactlyInAnyOrderEntriesOf(Map.of( VALUE_SERIALIZER, HazelcastKafkaAvroSerializer.class.getCanonicalName(), OPTION_VALUE_AVRO_SCHEMA, DUMMY_SCHEMA )); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "{namespace}/files/stats") @Operation(tags = {"Files"}, summary = "Get namespace file stats such as size, creation & modification dates and type") public FileAttributes stats( @Parameter(description = "The namespace id") @PathVariable String namespace, @Parameter(description = "The internal storage uri") @Nullable @QueryValue URI path ) throws IOException, URISyntaxException { forbiddenPathsGuard(path); // if stats is performed upon namespace root, and it doesn't exist yet, we create it if (path == null) { if(!storageInterface.exists(tenantService.resolveTenant(), NamespaceFile.of(namespace).uri())) { storageInterface.createDirectory(tenantService.resolveTenant(), NamespaceFile.of(namespace).uri()); } return storageInterface.getAttributes(tenantService.resolveTenant(), NamespaceFile.of(namespace).uri()); } return storageInterface.getAttributes(tenantService.resolveTenant(), NamespaceFile.of(namespace, path).uri()); }
@Test void stats() throws IOException { String hw = "Hello World"; storageInterface.put(null, toNamespacedStorageUri(NAMESPACE, URI.create("/test.txt")), new ByteArrayInputStream(hw.getBytes())); FileAttributes res = client.toBlocking().retrieve(HttpRequest.GET("/api/v1/namespaces/" + NAMESPACE + "/files/stats?path=/test.txt"), TestFileAttributes.class); assertThat(res.getFileName(), is("test.txt")); assertThat(res.getType(), is(FileAttributes.FileType.File)); }
private void stop(int numOfServicesStarted, boolean stopOnlyStartedServices) { // stop in reverse order of start Exception firstException = null; List<Service> services = getServices(); for (int i = numOfServicesStarted - 1; i >= 0; i--) { Service service = services.get(i); if (LOG.isDebugEnabled()) { LOG.debug("Stopping service #" + i + ": " + service); } STATE state = service.getServiceState(); //depending on the stop police if (state == STATE.STARTED || (!stopOnlyStartedServices && state == STATE.INITED)) { Exception ex = ServiceOperations.stopQuietly(LOG, service); if (ex != null && firstException == null) { firstException = ex; } } } //after stopping all services, rethrow the first exception raised if (firstException != null) { throw ServiceStateException.convert(firstException); } }
@Test(timeout = 10000) public void testAddStartedChildInStart() throws Throwable { CompositeService parent = new CompositeService("parent"); BreakableService child = new BreakableService(); child.init(new Configuration()); child.start(); parent.init(new Configuration()); parent.start(); AddSiblingService.addChildToService(parent, child); assertInState(STATE.STARTED, child); parent.stop(); assertInState(STATE.STOPPED, child); }
@Override public BatchResult<Long, Photo> batchGet(Set<Long> ids) { Map<Long, Photo> result = new HashMap<>(); Map<Long, RestLiServiceException> errors = new HashMap<>(); for (Long key : ids) { if (get(key) != null) { result.put(key, get(key)); } else { errors.put(key, new RestLiServiceException(HttpStatus.S_404_NOT_FOUND, "No photo with id=" + key + " has been found.")); } } return new BatchResult<>(result, errors); }
@Test public void testBatchGet() { final String[] titles = {"1","2","3"}; final long[] ids = new long[titles.length]; for (int i = 0; i < titles.length; i++) ids[i] = createPhoto(titles[i]); // validate all data are correct Set<Long> batchIds = new HashSet<>(); batchIds.add(ids[1]); batchIds.add(ids[2]); Map<Long, Photo> batchPhotos = _res.batchGet(batchIds); Assert.assertEquals(batchPhotos.size(), 2); for (int i = 1; i < titles.length; i++) // go through {1,2} { final Photo p = batchPhotos.get(ids[i]); Assert.assertNotNull(p); Assert.assertEquals(p.getTitle(), titles[i]); Assert.assertEquals(p.getId().longValue(), ids[i]); Assert.assertTrue(p.hasExif()); final EXIF e = p.getExif(); Assert.assertTrue(e.hasLocation()); final LatLong l = e.getLocation(); Assert.assertEquals(l.getLatitude(), 7.0f); Assert.assertEquals(l.getLongitude(), 27.0f); } }
public void onCheckpoint(long checkpointId) throws Exception { // Include the uncheckpointed assignments to the snapshot. assignmentsByCheckpointId.put(checkpointId, uncheckpointedAssignments); uncheckpointedAssignments = new HashMap<>(); }
@Test void testOnCheckpoint() throws Exception { final long checkpointId = 123L; SplitAssignmentTracker<MockSourceSplit> tracker = new SplitAssignmentTracker<>(); tracker.recordSplitAssignment(getSplitsAssignment(3, 0)); // Serialize tracker.onCheckpoint(checkpointId); // Verify the uncheckpointed assignments. assertThat(tracker.uncheckpointedAssignments()).isEmpty(); // verify assignments put into the checkpoints. Map<Long, Map<Integer, LinkedHashSet<MockSourceSplit>>> assignmentsByCheckpoints = tracker.assignmentsByCheckpointId(); assertThat(assignmentsByCheckpoints.size()).isOne(); Map<Integer, LinkedHashSet<MockSourceSplit>> assignmentForCheckpoint = assignmentsByCheckpoints.get(checkpointId); assertThat(assignmentForCheckpoint).isNotNull(); verifyAssignment(Arrays.asList("0"), assignmentForCheckpoint.get(0)); verifyAssignment(Arrays.asList("1", "2"), assignmentForCheckpoint.get(1)); verifyAssignment(Arrays.asList("3", "4", "5"), assignmentForCheckpoint.get(2)); }
@Override public Iterable<Measure> getChildrenMeasures(String metric) { validateInputMetric(metric); return () -> internalComponent.getChildren().stream() .map(new ComponentToMeasure(metricRepository.getByKey(metric))) .map(ToMeasureAPI.INSTANCE) .filter(Objects::nonNull) .iterator(); }
@Test public void get_children_measures() { measureRepository.addRawMeasure(FILE_1_REF, NCLOC_KEY, newMeasureBuilder().create(10)); measureRepository.addRawMeasure(FILE_2_REF, NCLOC_KEY, newMeasureBuilder().create(12)); MeasureComputerContextImpl underTest = newContext(PROJECT_REF, NCLOC_KEY, COMMENT_LINES_KEY); assertThat(underTest.getChildrenMeasures(NCLOC_KEY)).hasSize(2); assertThat(underTest.getChildrenMeasures(NCLOC_KEY)).extracting("intValue").containsOnly(10, 12); }
public static String getServletClassName( Document webXml, String servletName ) { return getClassName( "servlet", webXml, servletName ); }
@Test public void testGetServletClassName() throws Exception { // Setup fixture. final Document webXml = WebXmlUtils.asDocument( new File( Objects.requireNonNull(WebXmlUtilsTest.class.getResource("/org/jivesoftware/util/test-web.xml")).toURI() ) ); final String servletName = "dwr-invoker"; // Execute system under test. final String result = WebXmlUtils.getServletClassName( webXml, servletName ); // Verify result. assertEquals( "uk.ltd.getahead.dwr.DWRServlet", result ); }
boolean isPossibleExpiredKeyException(long initialRequestTime, Throwable error) { if (error instanceof ConnectRestException) { ConnectRestException connectError = (ConnectRestException) error; return connectError.statusCode() == Response.Status.FORBIDDEN.getStatusCode() && initialRequestTime + TimeUnit.MINUTES.toMillis(1) >= time.milliseconds(); } return false; }
@Test public void testKeyExceptionDetection() { assertFalse(herder.isPossibleExpiredKeyException( time.milliseconds(), new RuntimeException() )); assertFalse(herder.isPossibleExpiredKeyException( time.milliseconds(), new BadRequestException("") )); assertFalse(herder.isPossibleExpiredKeyException( time.milliseconds() - TimeUnit.MINUTES.toMillis(2), new ConnectRestException(FORBIDDEN.getStatusCode(), "") )); assertTrue(herder.isPossibleExpiredKeyException( time.milliseconds(), new ConnectRestException(FORBIDDEN.getStatusCode(), "") )); }
@Override public String getCommandWithArguments() { List<String> argList = new ArrayList<>(); argList.add(super.getCommandWithArguments()); argList.add(containerName); argList.addAll(commandInContainer); return StringUtils.join(argList, " "); }
@Test public void getCommandWithArguments() { dockerExecCommand.addExecCommand(Arrays.asList("ls", "-l")); assertEquals("exec container_name ls -l", dockerExecCommand.getCommandWithArguments()); }
@GetMapping(value = "/{appId}/{clusterName}/{namespace:.+}") public ApolloConfig queryConfig(@PathVariable String appId, @PathVariable String clusterName, @PathVariable String namespace, @RequestParam(value = "dataCenter", required = false) String dataCenter, @RequestParam(value = "releaseKey", defaultValue = "-1") String clientSideReleaseKey, @RequestParam(value = "ip", required = false) String clientIp, @RequestParam(value = "label", required = false) String clientLabel, @RequestParam(value = "messages", required = false) String messagesAsString, HttpServletRequest request, HttpServletResponse response) throws IOException { String originalNamespace = namespace; //strip out .properties suffix namespace = namespaceUtil.filterNamespaceName(namespace); //fix the character case issue, such as FX.apollo <-> fx.apollo namespace = namespaceUtil.normalizeNamespace(appId, namespace); if (Strings.isNullOrEmpty(clientIp)) { clientIp = WebUtils.tryToGetClientIp(request); } ApolloNotificationMessages clientMessages = transformMessages(messagesAsString); List<Release> releases = Lists.newLinkedList(); String appClusterNameLoaded = clusterName; if (!ConfigConsts.NO_APPID_PLACEHOLDER.equalsIgnoreCase(appId)) { Release currentAppRelease = configService.loadConfig(appId, clientIp, clientLabel, appId, clusterName, namespace, dataCenter, clientMessages); if (currentAppRelease != null) { releases.add(currentAppRelease); //we have cluster search process, so the cluster name might be overridden appClusterNameLoaded = currentAppRelease.getClusterName(); } } //if namespace does not belong to this appId, should check if there is a public configuration if (!namespaceBelongsToAppId(appId, namespace)) { Release publicRelease = this.findPublicConfig(appId, clientIp, clientLabel, clusterName, namespace, dataCenter, clientMessages); if (Objects.nonNull(publicRelease)) { releases.add(publicRelease); } } if (releases.isEmpty()) { response.sendError(HttpServletResponse.SC_NOT_FOUND, String.format( "Could not load configurations with appId: %s, clusterName: %s, namespace: %s", appId, clusterName, originalNamespace)); Tracer.logEvent("Apollo.Config.NotFound", assembleKey(appId, clusterName, originalNamespace, dataCenter)); return null; } auditReleases(appId, clusterName, dataCenter, clientIp, releases); String mergedReleaseKey = releases.stream().map(Release::getReleaseKey) .collect(Collectors.joining(ConfigConsts.CLUSTER_NAMESPACE_SEPARATOR)); if (mergedReleaseKey.equals(clientSideReleaseKey)) { // Client side configuration is the same with server side, return 304 response.setStatus(HttpServletResponse.SC_NOT_MODIFIED); Tracer.logEvent("Apollo.Config.NotModified", assembleKey(appId, appClusterNameLoaded, originalNamespace, dataCenter)); return null; } ApolloConfig apolloConfig = new ApolloConfig(appId, appClusterNameLoaded, originalNamespace, mergedReleaseKey); apolloConfig.setConfigurations(mergeReleaseConfigurations(releases)); Tracer.logEvent("Apollo.Config.Found", assembleKey(appId, appClusterNameLoaded, originalNamespace, dataCenter)); return apolloConfig; }
@Test public void testQueryConfig() throws Exception { String someClientSideReleaseKey = "1"; String someServerSideNewReleaseKey = "2"; HttpServletResponse someResponse = mock(HttpServletResponse.class); when(configService.loadConfig(someAppId, someClientIp, someClientLabel, someAppId, someClusterName, defaultNamespaceName, someDataCenter, someNotificationMessages)).thenReturn(someRelease); when(someRelease.getReleaseKey()).thenReturn(someServerSideNewReleaseKey); when(someRelease.getNamespaceName()).thenReturn(defaultNamespaceName); ApolloConfig result = configController.queryConfig(someAppId, someClusterName, defaultNamespaceName, someDataCenter, someClientSideReleaseKey, someClientIp, someClientLabel, someMessagesAsString, someRequest, someResponse); verify(configService, times(1)).loadConfig(someAppId, someClientIp, someClientLabel, someAppId, someClusterName, defaultNamespaceName, someDataCenter, someNotificationMessages); assertEquals(someAppId, result.getAppId()); assertEquals(someClusterName, result.getCluster()); assertEquals(defaultNamespaceName, result.getNamespaceName()); assertEquals(someServerSideNewReleaseKey, result.getReleaseKey()); verify(instanceConfigAuditUtil, times(1)).audit(someAppId, someClusterName, someDataCenter, someClientIp, someAppId, someClusterName, defaultNamespaceName, someServerSideNewReleaseKey); }
@ProtoFactory public static MediaType fromString(String tree) { if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType(); Matcher matcher = TREE_PATTERN.matcher(tree); return parseSingleMediaType(tree, matcher, false); }
@Test public void testParsingNoType() { Exceptions.expectException(EncodingException.class, () -> MediaType.fromString("")); Exceptions.expectException(EncodingException.class, () -> MediaType.fromString(";param=value")); }
public void addRole(String role, String username) { if (userDetailsService.getUserFromDatabase(username) == null) { throw new IllegalArgumentException("user '" + username + "' not found!"); } if (AuthConstants.GLOBAL_ADMIN_ROLE.equals(role)) { throw new IllegalArgumentException( "role '" + AuthConstants.GLOBAL_ADMIN_ROLE + "' is not permitted to create!"); } rolePersistService.addRole(role, username); roleSet.add(role); }
@Test void addRole() { String username = "nacos"; User userFromDatabase = userDetailsService.getUserFromDatabase(username); assertNull(userFromDatabase); try { nacosRoleService.addRole("role-admin", "nacos"); } catch (Exception e) { assertTrue(e.getMessage().contains("user 'nacos' not found!")); } }
@Override public BackgroundException map(final IOException e) { if(ExceptionUtils.getRootCause(e) != e && ExceptionUtils.getRootCause(e) instanceof SSHException) { return this.map((SSHException) ExceptionUtils.getRootCause(e)); } final StringBuilder buffer = new StringBuilder(); this.append(buffer, e.getMessage()); if(ExceptionUtils.getRootCause(e) != e) { if(!StringUtils.equals(e.getMessage(), ExceptionUtils.getRootCause(e).getMessage())) { this.append(buffer, ExceptionUtils.getRootCause(e).getMessage()); } } if(e instanceof SFTPException) { final SFTPException failure = (SFTPException) e; final Response.StatusCode code = failure.getStatusCode(); switch(code) { case FILE_ALREADY_EXISTS: return new ConflictException(buffer.toString(),e); case NO_SUCH_FILE: case NO_SUCH_PATH: case INVALID_HANDLE: return new NotfoundException(buffer.toString(), e); case PERMISSION_DENIED: case WRITE_PROTECT: case CANNOT_DELETE: return new AccessDeniedException(buffer.toString(), e); case NO_CONNECTION: case CONNECITON_LOST: return new ConnectionRefusedException(buffer.toString(), e); case NO_MEDIA: break; case NO_SPACE_ON_FILESYSTEM: case QUOTA_EXCEEDED: return new QuotaException(buffer.toString(), e); case LOCK_CONFLICT: return new LockedException(buffer.toString(), e); default: return new InteroperabilityException(buffer.toString(), e); } } if(e instanceof UserAuthException) { return new LoginFailureException(buffer.toString(), e); } if(e instanceof ConnectionException) { return new ConnectionRefusedException(buffer.toString(), e); } if(e instanceof Buffer.BufferException) { return new InteroperabilityException(buffer.toString(), e); } if(e instanceof SSHException) { final SSHException failure = (SSHException) e; final DisconnectReason reason = failure.getDisconnectReason(); return this.map(e, buffer, reason); } return this.wrap(e, buffer); }
@Test public void testWrapped() { assertEquals(InteroperabilityException.class, new SFTPExceptionMappingService().map(new TransportException(DisconnectReason.UNKNOWN, new SSHException(DisconnectReason.PROTOCOL_ERROR))).getClass()); }
public static BDBJournalCursor getJournalCursor(BDBEnvironment env, long fromKey, long toKey) throws JournalException, JournalInconsistentException, InterruptedException { return getJournalCursor(env, "", fromKey, toKey); }
@Test(expected = JournalException.class) public void testInvalidKeyRange(@Mocked BDBEnvironment environment) throws Exception { // db = [10, 12] // from 9,9 new Expectations(environment) { { environment.getDatabaseNamesWithPrefix(""); minTimes = 0; result = Arrays.asList(Long.valueOf(10), Long.valueOf(12)); } }; BDBJournalCursor.getJournalCursor(environment, 9, 9); Assert.fail(); }
@Override public Optional<ShardingConditionValue> generate(final BetweenExpression predicate, final Column column, final List<Object> params, final TimestampServiceRule timestampServiceRule) { ConditionValue betweenConditionValue = new ConditionValue(predicate.getBetweenExpr(), params); ConditionValue andConditionValue = new ConditionValue(predicate.getAndExpr(), params); Optional<Comparable<?>> betweenValue = betweenConditionValue.getValue(); Optional<Comparable<?>> andValue = andConditionValue.getValue(); List<Integer> parameterMarkerIndexes = new ArrayList<>(2); betweenConditionValue.getParameterMarkerIndex().ifPresent(parameterMarkerIndexes::add); andConditionValue.getParameterMarkerIndex().ifPresent(parameterMarkerIndexes::add); if (betweenValue.isPresent() && andValue.isPresent()) { return Optional.of(new RangeShardingConditionValue<>(column.getName(), column.getTableName(), SafeNumberOperationUtils.safeClosed(betweenValue.get(), andValue.get()), parameterMarkerIndexes)); } Timestamp timestamp = timestampServiceRule.getTimestamp(); if (!betweenValue.isPresent() && ExpressionConditionUtils.isNowExpression(predicate.getBetweenExpr())) { betweenValue = Optional.of(timestamp); } if (!andValue.isPresent() && ExpressionConditionUtils.isNowExpression(predicate.getAndExpr())) { andValue = Optional.of(timestamp); } if (!betweenValue.isPresent() || !andValue.isPresent()) { return Optional.empty(); } return Optional.of(new RangeShardingConditionValue<>(column.getName(), column.getTableName(), Range.closed(betweenValue.get(), andValue.get()), parameterMarkerIndexes)); }
@SuppressWarnings("unchecked") @Test void assertGenerateConditionValueWithDifferentNumericType() { int between = 3; long and = 3147483647L; ExpressionSegment betweenSegment = new LiteralExpressionSegment(0, 0, between); ExpressionSegment andSegment = new LiteralExpressionSegment(0, 0, and); BetweenExpression value = new BetweenExpression(0, 0, null, betweenSegment, andSegment, false); Optional<ShardingConditionValue> shardingConditionValue = generator.generate(value, column, new LinkedList<>(), timestampServiceRule); assertTrue(shardingConditionValue.isPresent()); RangeShardingConditionValue<Comparable<?>> rangeShardingConditionValue = (RangeShardingConditionValue<Comparable<?>>) shardingConditionValue.get(); assertThat(rangeShardingConditionValue.getColumnName(), is(column.getName())); assertThat(rangeShardingConditionValue.getTableName(), is(column.getTableName())); assertTrue(SafeNumberOperationUtils.safeContains(rangeShardingConditionValue.getValueRange(), between)); assertTrue(SafeNumberOperationUtils.safeContains(rangeShardingConditionValue.getValueRange(), and)); assertTrue(rangeShardingConditionValue.getParameterMarkerIndexes().isEmpty()); }
@Override public Set<String> addProcessor(Set<String> origin) { Iterator<HealthCheckProcessorV2> processorIt = processors.iterator(); Set<String> processorType = new HashSet<>(origin); while (processorIt.hasNext()) { HealthCheckProcessorV2 processor = processorIt.next(); String type = processor.getType(); if (processorType.contains(type)) { throw new RuntimeException( "More than one processor of the same type was found : [type=\"" + type + "\"]"); } processorType.add(type); registry.registerSingleton(lowerFirstChar(processor.getClass().getSimpleName()), processor); } return processorType; }
@Test void addProcessor() { Set<String> origin = new HashSet<>(); origin.add("HTTP"); healthCheckProcessorExtendV2.addProcessor(origin); verify(registry).registerSingleton(healthCheckProcessorExtendV2.lowerFirstChar(mysqlProcessor.getClass().getSimpleName()), mysqlProcessor); }
public static String join(Collection collection, String separator) { if (collection == null) { return null; } StringBuilder stringBuilder = new StringBuilder(); Object[] objects = collection.toArray(); for (int i = 0; i < collection.size(); i++) { if (objects[i] != null) { stringBuilder.append(objects[i]); if (i != collection.size() - 1 && separator != null) { stringBuilder.append(separator); } } } return stringBuilder.toString(); }
@Test void testJoin() { ArrayList<Object> objects = new ArrayList<>(); objects.add(null); assertNull(StringUtils.join(null, "a")); assertEquals(StringUtils.EMPTY, StringUtils.join(Arrays.asList(), "a")); assertEquals(StringUtils.EMPTY, StringUtils.join(objects, "a")); assertEquals("a;b;c", StringUtils.join(Arrays.asList("a", "b", "c"), ";")); assertEquals("abc", StringUtils.join(Arrays.asList("a", "b", "c"), null)); }
void validateManualPartitionAssignment( PartitionAssignment assignment, OptionalInt replicationFactor ) { if (assignment.replicas().isEmpty()) { throw new InvalidReplicaAssignmentException("The manual partition " + "assignment includes an empty replica list."); } List<Integer> sortedBrokerIds = new ArrayList<>(assignment.replicas()); sortedBrokerIds.sort(Integer::compare); Integer prevBrokerId = null; for (Integer brokerId : sortedBrokerIds) { if (!clusterControl.brokerRegistrations().containsKey(brokerId)) { throw new InvalidReplicaAssignmentException("The manual partition " + "assignment includes broker " + brokerId + ", but no such broker is " + "registered."); } if (brokerId.equals(prevBrokerId)) { throw new InvalidReplicaAssignmentException("The manual partition " + "assignment includes the broker " + prevBrokerId + " more than " + "once."); } prevBrokerId = brokerId; } if (replicationFactor.isPresent() && sortedBrokerIds.size() != replicationFactor.getAsInt()) { throw new InvalidReplicaAssignmentException("The manual partition " + "assignment includes a partition with " + sortedBrokerIds.size() + " replica(s), but this is not consistent with previous " + "partitions, which have " + replicationFactor.getAsInt() + " replica(s)."); } }
@Test public void testValidateGoodManualPartitionAssignments() { ReplicationControlTestContext ctx = new ReplicationControlTestContext.Builder().build(); ctx.registerBrokers(1, 2, 3); ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(singletonList(1)), OptionalInt.of(1)); ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(singletonList(1)), OptionalInt.empty()); ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2, 3)), OptionalInt.of(3)); ctx.replicationControl.validateManualPartitionAssignment(partitionAssignment(asList(1, 2, 3)), OptionalInt.empty()); }
@Override public int run() throws IOException { Preconditions.checkArgument(sourceFiles != null && !sourceFiles.isEmpty(), "Missing file name"); // Ensure all source files have the columns specified first Map<String, Schema> schemas = new HashMap<>(); for (String source : sourceFiles) { Schema schema = getAvroSchema(source); schemas.put(source, Expressions.filterSchema(schema, columns)); } for (String source : sourceFiles) { Schema projection = schemas.get(source); Iterable<Object> reader = openDataFile(source, projection); boolean threw = true; long count = 0; try { for (Object record : reader) { if (numRecords > 0 && count >= numRecords) { break; } if (columns == null || columns.size() != 1) { console.info(String.valueOf(record)); } else { console.info(String.valueOf(select(projection, record, columns.get(0)))); } count += 1; } threw = false; } catch (RuntimeException e) { throw new RuntimeException("Failed on record " + count + " in file " + source, e); } finally { if (reader instanceof Closeable) { Closeables.close((Closeable) reader, threw); } } } return 0; }
@Test public void testCatCommandWithMultipleInput() throws IOException { File file = parquetFile(); CatCommand command = new CatCommand(createLogger(), 0); command.sourceFiles = Arrays.asList(file.getAbsolutePath(), file.getAbsolutePath()); command.setConf(new Configuration()); Assert.assertEquals(0, command.run()); }
@Override public void process(Exchange exchange) throws Exception { String operation = getOperation(exchange); switch (operation) { case GlanceConstants.RESERVE: doReserve(exchange); break; case OpenstackConstants.CREATE: doCreate(exchange); break; case OpenstackConstants.UPDATE: doUpdate(exchange); break; case GlanceConstants.UPLOAD: doUpload(exchange); break; case OpenstackConstants.GET: doGet(exchange); break; case OpenstackConstants.GET_ALL: doGetAll(exchange); break; case OpenstackConstants.DELETE: doDelete(exchange); break; default: throw new IllegalArgumentException("Unsupported operation " + operation); } }
@Test public void uploadWithoutUpdatingTest() throws Exception { msg.setHeader(OpenstackConstants.OPERATION, GlanceConstants.UPLOAD); final String id = "id"; msg.setHeader(OpenstackConstants.ID, id); final File file = File.createTempFile("image", ".iso"); msg.setBody(file); producer.process(exchange); verify(imageService).upload(imageIdCaptor.capture(), payloadCaptor.capture(), imageCaptor.capture()); assertEquals(file, payloadCaptor.getValue().getRaw()); assertEquals(id, imageIdCaptor.getValue()); assertNull(imageCaptor.getValue()); final Image result = msg.getBody(Image.class); assertNotNull(result.getId()); assertEqualsImages(dummyImage, result); }
@Override public String displayName() { MaterialConfig materialConfig = configRepo != null ? configRepo.getRepo() : null; String materialName = materialConfig != null ? materialConfig.getDisplayName() : "NULL material"; return String.format("%s at revision %s", materialName, revision); }
@Test public void shouldShowDisplayNameWhenEmptyConfig() { RepoConfigOrigin repoConfigOrigin = new RepoConfigOrigin(); assertThat(repoConfigOrigin.displayName(), is("NULL material at revision null")); }
@Override public String sendCall( final String to, final String data, final DefaultBlockParameter defaultBlockParameter) throws IOException { final EthCall ethCall = besu.privCall( privacyGroupId.toString(), Transaction.createEthCallTransaction(getFromAddress(), to, data), defaultBlockParameter) .send(); assertCallNotReverted(ethCall); return ethCall.getValue(); }
@Test public void sendPrivCallRevertedTest() throws IOException { when(response.isReverted()).thenReturn(true); when(response.getRevertReason()).thenReturn(OWNER_REVERT_MSG_STR); when(service.send(any(), any())).thenReturn(response); TransactionReceiptProcessor transactionReceiptProcessor = new PollingPrivateTransactionReceiptProcessor( besu, DEFAULT_POLLING_FREQUENCY, DEFAULT_POLLING_ATTEMPTS_PER_TX_HASH); PrivateTransactionManager besuTransactionManager = new PrivateTransactionManager( besu, credentials, transactionReceiptProcessor, ChainIdLong.NONE, PRIVATE_FROM, PRIVACY_GROUP_ID, RESTRICTED); ContractCallException thrown = assertThrows( ContractCallException.class, () -> besuTransactionManager.sendCall("", "", defaultBlockParameter)); assertEquals(String.format(REVERT_ERR_STR, OWNER_REVERT_MSG_STR), thrown.getMessage()); }
@Override public <T> T clone(T object) { if (object instanceof String) { return object; } else if (object instanceof Collection) { Object firstElement = findFirstNonNullElement((Collection) object); if (firstElement != null && !(firstElement instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), firstElement.getClass()); return objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } else if (object instanceof Map) { Map.Entry firstEntry = this.findFirstNonNullEntry((Map) object); if (firstEntry != null) { Object key = firstEntry.getKey(); Object value = firstEntry.getValue(); if (!(key instanceof Serializable) || !(value instanceof Serializable)) { JavaType type = TypeFactory.defaultInstance().constructParametricType(object.getClass(), key.getClass(), value.getClass()); return (T) objectMapperWrapper.fromBytes(objectMapperWrapper.toBytes(object), type); } } } else if (object instanceof JsonNode) { return (T) ((JsonNode) object).deepCopy(); } if (object instanceof Serializable) { try { return (T) SerializationHelper.clone((Serializable) object); } catch (SerializationException e) { //it is possible that object itself implements java.io.Serializable, but underlying structure does not //in this case we switch to the other JSON marshaling strategy which doesn't use the Java serialization } } return jsonClone(object); }
@Test public void should_clone_map_of_serializable_key_and_value_with_null() { Map<String, SerializableObject> original = new LinkedHashMap<>(); original.put("null", null); original.put("key", new SerializableObject("value")); Object cloned = serializer.clone(original); assertEquals(original, cloned); assertNotSame(original, cloned); }
public void setContract(@Nullable Produce contract) { this.contract = contract; setStoredContract(contract); handleContractState(); }
@Test public void cabbageContractCabbageDeadAndEmptyPatch() { final FarmingPatch patch = farmingGuildPatches.get(Varbits.FARMING_4773); assertNotNull(patch); when(farmingTracker.predictPatch(patch)) .thenReturn(new PatchPrediction(Produce.CABBAGE, CropState.DEAD, 0, 2, 3)); farmingContractManager.setContract(Produce.CABBAGE); assertEquals(SummaryState.IN_PROGRESS, farmingContractManager.getSummary()); assertEquals(CropState.DEAD, farmingContractManager.getContractCropState()); }
@Override public Optional<Entity> exportEntity(EntityDescriptor entityDescriptor, EntityDescriptorIds entityDescriptorIds) { final ModelId modelId = entityDescriptor.id(); try { final Output output = outputService.load(modelId.id()); return Optional.of(exportNativeEntity(output, entityDescriptorIds)); } catch (NotFoundException e) { LOG.debug("Couldn't find output {}", entityDescriptor, e); return Optional.empty(); } }
@Test @MongoDBFixtures("OutputFacadeTest.json") public void collectEntity() { final EntityDescriptor descriptor = EntityDescriptor.create("5adf239e4b900a0fdb4e5197", ModelTypes.OUTPUT_V1); final EntityDescriptorIds entityDescriptorIds = EntityDescriptorIds.of(descriptor); final Optional<Entity> collectedEntity = facade.exportEntity(descriptor, entityDescriptorIds); assertThat(collectedEntity) .isPresent() .containsInstanceOf(EntityV1.class); final EntityV1 entity = (EntityV1) collectedEntity.orElseThrow(AssertionError::new); assertThat(entity.id()).isEqualTo(ModelId.of(entityDescriptorIds.get(descriptor).orElse(null))); assertThat(entity.type()).isEqualTo(ModelTypes.OUTPUT_V1); final OutputEntity outputEntity = objectMapper.convertValue(entity.data(), OutputEntity.class); assertThat(outputEntity.title()).isEqualTo(ValueReference.of("STDOUT")); assertThat(outputEntity.type()).isEqualTo(ValueReference.of("org.graylog2.outputs.LoggingOutput")); assertThat(outputEntity.configuration()).isNotEmpty(); }
public Schema mergeTables( Map<FeatureOption, MergingStrategy> mergingStrategies, Schema sourceSchema, List<SqlNode> derivedColumns, List<SqlWatermark> derivedWatermarkSpecs, SqlTableConstraint derivedPrimaryKey) { SchemaBuilder schemaBuilder = new SchemaBuilder( mergingStrategies, sourceSchema, (FlinkTypeFactory) validator.getTypeFactory(), dataTypeFactory, validator, escapeExpression); schemaBuilder.appendDerivedColumns(mergingStrategies, derivedColumns); schemaBuilder.appendDerivedWatermarks(mergingStrategies, derivedWatermarkSpecs); schemaBuilder.appendDerivedPrimaryKey(derivedPrimaryKey); return schemaBuilder.build(); }
@Test void mergeWatermarks() { Schema sourceSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByExpression("two", "one +1") .column("timestamp", DataTypes.TIMESTAMP()) .watermark("timestamp", "timestamp - INTERVAL '5' SECOND") .build(); List<SqlNode> derivedColumns = Arrays.asList( regularColumn("three", DataTypes.INT()), computedColumn("four", plus("one", "3"))); Schema mergedSchema = util.mergeTables( getDefaultMergingStrategies(), sourceSchema, derivedColumns, Collections.emptyList(), null); Schema expectedSchema = Schema.newBuilder() .column("one", DataTypes.INT()) .columnByExpression("two", "one +1") .column("timestamp", DataTypes.TIMESTAMP()) .watermark("timestamp", "timestamp - INTERVAL '5' SECOND") .column("three", DataTypes.INT()) .columnByExpression("four", "`one` + 3") .build(); assertThat(mergedSchema).isEqualTo(expectedSchema); }
@Override <T> RFuture<T> tryLockInnerAsync(long waitTime, long leaseTime, TimeUnit unit, long threadId, RedisStrictCommand<T> command) { long wait = threadWaitTime; if (waitTime > 0) { wait = unit.toMillis(waitTime); } long currentTime = System.currentTimeMillis(); if (command == RedisCommands.EVAL_NULL_BOOLEAN) { return commandExecutor.syncedEval(getRawName(), LongCodec.INSTANCE, command, // remove stale threads "while true do " + "local firstThreadId2 = redis.call('lindex', KEYS[2], 0);" + "if firstThreadId2 == false then " + "break;" + "end;" + "local timeout = redis.call('zscore', KEYS[3], firstThreadId2);" + "if timeout ~= false and tonumber(timeout) <= tonumber(ARGV[3]) then " + // remove the item from the queue and timeout set // NOTE we do not alter any other timeout "redis.call('zrem', KEYS[3], firstThreadId2);" + "redis.call('lpop', KEYS[2]);" + "else " + "break;" + "end;" + "end;" + "if (redis.call('exists', KEYS[1]) == 0) " + "and ((redis.call('exists', KEYS[2]) == 0) " + "or (redis.call('lindex', KEYS[2], 0) == ARGV[2])) then " + "redis.call('lpop', KEYS[2]);" + "redis.call('zrem', KEYS[3], ARGV[2]);" + // decrease timeouts for all waiting in the queue "local keys = redis.call('zrange', KEYS[3], 0, -1);" + "for i = 1, #keys, 1 do " + "redis.call('zincrby', KEYS[3], -tonumber(ARGV[4]), keys[i]);" + "end;" + "redis.call('hset', KEYS[1], ARGV[2], 1);" + "redis.call('pexpire', KEYS[1], ARGV[1]);" + "return nil;" + "end;" + "if (redis.call('hexists', KEYS[1], ARGV[2]) == 1) then " + "redis.call('hincrby', KEYS[1], ARGV[2], 1);" + "redis.call('pexpire', KEYS[1], ARGV[1]);" + "return nil;" + "end;" + "return 1;", Arrays.asList(getRawName(), threadsQueueName, timeoutSetName), unit.toMillis(leaseTime), getLockName(threadId), currentTime, wait); } if (command == RedisCommands.EVAL_LONG) { return commandExecutor.syncedEval(getRawName(), LongCodec.INSTANCE, command, // remove stale threads "while true do " + "local firstThreadId2 = redis.call('lindex', KEYS[2], 0);" + "if firstThreadId2 == false then " + "break;" + "end;" + "local timeout = redis.call('zscore', KEYS[3], firstThreadId2);" + "if timeout ~= false and tonumber(timeout) <= tonumber(ARGV[4]) then " + // remove the item from the queue and timeout set // NOTE we do not alter any other timeout "redis.call('zrem', KEYS[3], firstThreadId2);" + "redis.call('lpop', KEYS[2]);" + "else " + "break;" + "end;" + "end;" + // check if the lock can be acquired now "if (redis.call('exists', KEYS[1]) == 0) " + "and ((redis.call('exists', KEYS[2]) == 0) " + "or (redis.call('lindex', KEYS[2], 0) == ARGV[2])) then " + // remove this thread from the queue and timeout set "redis.call('lpop', KEYS[2]);" + "redis.call('zrem', KEYS[3], ARGV[2]);" + // decrease timeouts for all waiting in the queue "local keys = redis.call('zrange', KEYS[3], 0, -1);" + "for i = 1, #keys, 1 do " + "redis.call('zincrby', KEYS[3], -tonumber(ARGV[3]), keys[i]);" + "end;" + // acquire the lock and set the TTL for the lease "redis.call('hset', KEYS[1], ARGV[2], 1);" + "redis.call('pexpire', KEYS[1], ARGV[1]);" + "return nil;" + "end;" + // check if the lock is already held, and this is a re-entry "if redis.call('hexists', KEYS[1], ARGV[2]) == 1 then " + "redis.call('hincrby', KEYS[1], ARGV[2],1);" + "redis.call('pexpire', KEYS[1], ARGV[1]);" + "return nil;" + "end;" + // the lock cannot be acquired // check if the thread is already in the queue "local timeout = redis.call('zscore', KEYS[3], ARGV[2]);" + "if timeout ~= false then " + "local ttl = redis.call('pttl', KEYS[1]);" + "return math.max(0, ttl); " + // the real timeout is the timeout of the prior thread // in the queue, but this is approximately correct, and // avoids having to traverse the queue // "return timeout - tonumber(ARGV[3]) - tonumber(ARGV[4]);" + "end;" + // add the thread to the queue at the end, and set its timeout in the timeout set to the timeout of // the prior thread in the queue (or the timeout of the lock if the queue is empty) plus the // threadWaitTime "local lastThreadId = redis.call('lindex', KEYS[2], -1);" + "local ttl;" + "if lastThreadId ~= false and lastThreadId ~= ARGV[2] then " + "ttl = tonumber(redis.call('zscore', KEYS[3], lastThreadId)) - tonumber(ARGV[4]);" + "else " + "ttl = redis.call('pttl', KEYS[1]);" + "end;" + "local timeout = ttl + tonumber(ARGV[3]) + tonumber(ARGV[4]);" + "if redis.call('zadd', KEYS[3], timeout, ARGV[2]) == 1 then " + "redis.call('rpush', KEYS[2], ARGV[2]);" + "end;" + "return ttl;", Arrays.asList(getRawName(), threadsQueueName, timeoutSetName), unit.toMillis(leaseTime), getLockName(threadId), wait, currentTime); } throw new IllegalArgumentException(); }
@Test public void testAbandonedTimeoutDrift_Descrete() throws Exception { long leaseTime = 500; long threadWaitTime = 100; // we're testing interaction of various internal methods, so create a Redisson instance for protected access RedissonClient redisson = Redisson.create(createConfig()); RedissonFairLock lock = new RedissonFairLock( ((Redisson) redisson).getCommandExecutor(), "testAbandonedTimeoutDrift_Descrete", threadWaitTime); // clear out any prior state lock.delete(); long threadInit = 101; long threadFirstWaiter = 102; long threadSecondWaiter = 103; long threadThirdWaiter = 104; // take the lock successfully Long ttl = lock.tryLockInnerAsync(-1, leaseTime, TimeUnit.MILLISECONDS, threadInit, RedisCommands.EVAL_LONG).toCompletableFuture().join();; Assertions.assertNull(ttl); // fail to get the lock, but end up in the thread queue w/ ttl + 5s timeout Long firstTTL = lock.tryLockInnerAsync(-1, leaseTime, TimeUnit.MILLISECONDS, threadFirstWaiter, RedisCommands.EVAL_LONG).toCompletableFuture().join();; Assertions.assertNotNull(firstTTL); // fail to get the lock again, but end up in the thread queue w/ ttl + 10s timeout Long secondTTL = lock.tryLockInnerAsync(-1, leaseTime, TimeUnit.MILLISECONDS, threadSecondWaiter, RedisCommands.EVAL_LONG).toCompletableFuture().join();; Assertions.assertNotNull(secondTTL); Long thirdTTL = lock.tryLockInnerAsync(-1, leaseTime, TimeUnit.MILLISECONDS, threadThirdWaiter, RedisCommands.EVAL_LONG).toCompletableFuture().join();; Assertions.assertNotNull(thirdTTL); long diff = thirdTTL - firstTTL; Assertions.assertTrue(diff > 190 && diff < 210, "Expected 200 +/- 10 but was " + diff); Thread.sleep(thirdTTL + threadWaitTime); ttl = lock.tryLockInnerAsync(-1, leaseTime, TimeUnit.MILLISECONDS, threadThirdWaiter, RedisCommands.EVAL_LONG).toCompletableFuture().join();; Assertions.assertNull(ttl); }
@Override public boolean trySetPermits(int permits) { return get(trySetPermitsAsync(permits)); }
@Test public void testTrySetPermits() { RPermitExpirableSemaphore s = redisson.getPermitExpirableSemaphore("test"); assertThat(s.trySetPermits(10)).isTrue(); assertThat(s.availablePermits()).isEqualTo(10); assertThat(s.trySetPermits(15)).isFalse(); assertThat(s.availablePermits()).isEqualTo(10); }
@Override public ColumnStatisticsObj aggregate(List<ColStatsObjWithSourceInfo> colStatsWithSourceInfo, List<String> partNames, boolean areAllPartsFound) throws MetaException { checkStatisticsList(colStatsWithSourceInfo); ColumnStatisticsObj statsObj = null; String colType; String colName; BooleanColumnStatsData aggregateData = null; for (ColStatsObjWithSourceInfo csp : colStatsWithSourceInfo) { ColumnStatisticsObj cso = csp.getColStatsObj(); if (statsObj == null) { colName = cso.getColName(); colType = cso.getColType(); statsObj = ColumnStatsAggregatorFactory.newColumnStaticsObj(colName, colType, cso.getStatsData().getSetField()); } BooleanColumnStatsData newData = cso.getStatsData().getBooleanStats(); if (aggregateData == null) { aggregateData = newData.deepCopy(); } else { aggregateData.setNumTrues(aggregateData.getNumTrues() + newData.getNumTrues()); aggregateData.setNumFalses(aggregateData.getNumFalses() + newData.getNumFalses()); aggregateData.setNumNulls(aggregateData.getNumNulls() + newData.getNumNulls()); } } ColumnStatisticsData columnStatisticsData = initColumnStatisticsData(); columnStatisticsData.setBooleanStats(aggregateData); statsObj.setStatsData(columnStatisticsData); return statsObj; }
@Test public void testAggregateMultiStatsWhenAllAvailable() throws MetaException { List<String> partitions = Arrays.asList("part1", "part2", "part3"); ColumnStatisticsData data1 = new ColStatsBuilder<>(Boolean.class).numNulls(1).numFalses(3).numTrues(13).build(); ColumnStatisticsData data2 = new ColStatsBuilder<>(Boolean.class).numNulls(2).numFalses(6).numTrues(18).build(); ColumnStatisticsData data3 = new ColStatsBuilder<>(Boolean.class).numNulls(3).numFalses(2).numTrues(18).build(); List<ColStatsObjWithSourceInfo> statsList = Arrays.asList( createStatsWithInfo(data1, TABLE, COL, partitions.get(0)), createStatsWithInfo(data2, TABLE, COL, partitions.get(1)), createStatsWithInfo(data3, TABLE, COL, partitions.get(2))); BooleanColumnStatsAggregator aggregator = new BooleanColumnStatsAggregator(); ColumnStatisticsObj computedStatsObj = aggregator.aggregate(statsList, partitions, true); ColumnStatisticsData expectedStats = new ColStatsBuilder<>(Boolean.class).numNulls(6).numFalses(11).numTrues(49).build(); Assert.assertEquals(expectedStats, computedStatsObj.getStatsData()); }
public void validateDocumentGraph(List<SDDocumentType> documents) { for (SDDocumentType document : documents) { validateRoot(document); } }
@Test void inherit_cycle_is_forbidden() { Throwable exception = assertThrows(DocumentGraphValidator.DocumentGraphException.class, () -> { Schema schema1 = createSearchWithName("doc1"); Schema schema2 = createSearchWithName("doc2", schema1); Schema schema3 = createSearchWithName("doc3", schema2); schema1.getDocument().inherit(schema3.getDocument()); DocumentGraphValidator validator = new DocumentGraphValidator(); validator.validateDocumentGraph(documentListOf(schema1, schema2, schema3)); }); assertTrue(exception.getMessage().contains("Document dependency cycle detected: doc1->doc3->doc2->doc1.")); }
@Override String getProperty(String key) { String checkedKey = checkPropertyName(key); if (checkedKey == null) { final String upperCaseKey = key.toUpperCase(); if (!upperCaseKey.equals(key)) { checkedKey = checkPropertyName(upperCaseKey); } } if (checkedKey == null) { return null; } return env.get(checkedKey); }
@Test void testGetEnvForLowerCaseKeyWithDot() { assertEquals("value2", systemEnvPropertySource.getProperty("test.case.2")); }
public static byte[] decompress(byte[] bytes) { if (bytes == null) { throw new NullPointerException("bytes is null"); } ByteArrayOutputStream outputStream = new ByteArrayOutputStream(ARRAY_SIZE); LZ4FastDecompressor decompressor = LZ4Factory.fastestInstance().fastDecompressor(); ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes); try (LZ4BlockInputStream decompressedInputStream = new LZ4BlockInputStream(inputStream, decompressor)) { int count; byte[] buffer = new byte[ARRAY_SIZE]; while ((count = decompressedInputStream.read(buffer)) != -1) { outputStream.write(buffer, 0, count); } } catch (IOException e) { LOGGER.error("decompress bytes error", e); } return outputStream.toByteArray(); }
@Test public void testDecompress() { Assertions.assertThrows(NullPointerException.class, () -> { Lz4Util.decompress(null); }); }
@VisibleForTesting protected void setTimerForTokenRenewal(DelegationTokenToRenew token) throws IOException { // calculate timer time long expiresIn = token.expirationDate - System.currentTimeMillis(); if (expiresIn <= 0) { LOG.info("Will not renew token " + token); return; } long renewIn = token.expirationDate - expiresIn/10; // little bit before the expiration // need to create new task every time RenewalTimerTask tTask = new RenewalTimerTask(token); token.setTimerTask(tTask); // keep reference to the timer renewalTimer.schedule(token.timerTask, new Date(renewIn)); LOG.info("Renew " + token + " in " + expiresIn + " ms, appId = " + token.referringAppIds); }
@Test public void testTokenRenewerInvalidReturn() throws Exception { DelegationTokenToRenew mockDttr = mock(DelegationTokenToRenew.class); mockDttr.expirationDate = 0; delegationTokenRenewer.setTimerForTokenRenewal(mockDttr); assertNull(mockDttr.timerTask); mockDttr.expirationDate = -1; delegationTokenRenewer.setTimerForTokenRenewal(mockDttr); assertNull(mockDttr.timerTask); mockDttr.expirationDate = System.currentTimeMillis() - 1; delegationTokenRenewer.setTimerForTokenRenewal(mockDttr); assertNull(mockDttr.timerTask); }
public SqlPredicate(String sql) { this.sql = sql; predicate = createPredicate(sql); }
@Test public void testSqlPredicate() { assertEquals("name IN (name0,name2)", sql("name in ('name0', 'name2')")); assertEquals("(name LIKE 'joe' AND id=5)", sql("name like 'joe' AND id = 5")); assertEquals("(name REGEX '\\w*' AND id=5)", sql("name regex '\\w*' AND id = 5")); assertEquals("active=true", sql("active")); assertEquals("(active=true AND name=abc xyz 123)", sql("active AND name='abc xyz 123'")); assertEquals("(name LIKE 'abc-xyz+(123)' AND name=abc xyz 123)", sql("name like 'abc-xyz+(123)' AND name='abc xyz 123'")); assertEquals("(name REGEX '\\w{3}-\\w{3}+\\(\\d{3}\\)' AND name=abc xyz 123)", sql("name regex '\\w{3}-\\w{3}+\\(\\d{3}\\)' AND name='abc xyz 123'")); assertEquals("(active=true AND age>4)", sql("active and age > 4")); assertEquals("(active=true AND age>4)", sql("active and age>4")); assertEquals("(active=false AND age<=4)", sql("active=false AND age<=4")); assertEquals("(active=false AND age<=4)", sql("active= false and age <= 4")); assertEquals("(active=false AND age>=4)", sql("active=false AND (age>=4)")); assertEquals("(active=false OR age>=4)", sql("active =false or (age>= 4)")); assertEquals("name LIKE 'J%'", sql("name like 'J%'")); assertEquals("name REGEX 'J.*'", sql("name regex 'J.*'")); assertEquals("NOT(name LIKE 'J%')", sql("name not like 'J%'")); assertEquals("NOT(name REGEX 'J.*')", sql("name not regex 'J.*'")); assertEquals("(active=false OR name LIKE 'J%')", sql("active =false or name like 'J%'")); assertEquals("(active=false OR name LIKE 'Java World')", sql("active =false or name like 'Java World'")); assertEquals("(active=false OR name LIKE 'Java W% Again')", sql("active =false or name like 'Java W% Again'")); assertEquals("(active=false OR name REGEX 'J.*')", sql("active =false or name regex 'J.*'")); assertEquals("(active=false OR name REGEX 'Java World')", sql("active =false or name regex 'Java World'")); assertEquals("(active=false OR name REGEX 'Java W.* Again')", sql("active =false or name regex 'Java W.* Again'")); assertEquals("i<=-1", sql("i<= -1")); assertEquals("age IN (-1)", sql("age in (-1)")); assertEquals("age IN (10,15)", sql("age in (10, 15)")); assertEquals("NOT(age IN (10,15))", sql("age not in ( 10 , 15 )")); assertEquals("(active=true AND age BETWEEN 10 AND 15)", sql("active and age between 10 and 15")); assertEquals("(age IN (10,15) AND active=true)", sql("age IN (10, 15) and active")); assertEquals("(active=true OR age IN (10,15))", sql("active or (age in ( 10,15))")); assertEquals("(age>10 AND (active=true OR age IN (10,15)))", sql("age>10 AND (active or (age IN (10, 15 )))")); assertEquals("(age<=10 AND (active=true OR NOT(age IN (10,15))))", sql("age<=10 AND (active or (age not in (10 , 15)))")); assertEquals("age BETWEEN 10 AND 15", sql("age between 10 and 15")); assertEquals("NOT(age BETWEEN 10 AND 15)", sql("age not between 10 and 15")); assertEquals("(active=true AND age BETWEEN 10 AND 15)", sql("active and age between 10 and 15")); assertEquals("(age BETWEEN 10 AND 15 AND active=true)", sql("age between 10 and 15 and active")); assertEquals("(active=true OR age BETWEEN 10 AND 15)", sql("active or (age between 10 and 15)")); assertEquals("(age>10 AND (active=true OR age BETWEEN 10 AND 15))", sql("age>10 AND (active or (age between 10 and 15))")); assertEquals("(age<=10 AND (active=true OR NOT(age BETWEEN 10 AND 15)))", sql("age<=10 AND (active or (age not between 10 and 15))")); assertEquals("name ILIKE 'J%'", sql("name ilike 'J%'")); // issue #594 assertEquals("(name IN (name0,name2) AND age IN (2,5,8))", sql("name in('name0', 'name2') and age IN ( 2, 5 ,8)")); }
@Override public VplsData createVpls(String vplsName, EncapsulationType encapsulationType) { requireNonNull(vplsName); requireNonNull(encapsulationType); if (vplsStore.getVpls(vplsName) != null) { return null; } VplsData vplsData = VplsData.of(vplsName, encapsulationType); vplsStore.addVpls(vplsData); return vplsData; }
@Test public void testCreateVpls() { VplsData vplsData = vplsManager.createVpls(VPLS1, NONE); assertEquals(VPLS1, vplsData.name()); assertEquals(NONE, vplsData.encapsulationType()); vplsData = vplsStore.getVpls(VPLS1); assertEquals(vplsData.state(), ADDING); }
public OperatorStats add(OperatorStats operatorStats) { return add(ImmutableList.of(operatorStats)); }
@Test public void testAdd() { OperatorStats actual = EXPECTED.add(ImmutableList.of(EXPECTED, EXPECTED)); assertEquals(actual.getStageId(), 0); assertEquals(actual.getStageExecutionId(), 10); assertEquals(actual.getOperatorId(), 41); assertEquals(actual.getOperatorType(), "test"); assertEquals(actual.getTotalDrivers(), 3 * 1); assertEquals(actual.getAddInputCalls(), 3 * 2); assertEquals(actual.getAddInputWall(), new Duration(3 * 3, NANOSECONDS)); assertEquals(actual.getAddInputCpu(), new Duration(3 * 4, NANOSECONDS)); assertEquals(actual.getAddInputAllocation(), new DataSize(3 * 123, BYTE)); assertEquals(actual.getRawInputDataSize(), new DataSize(3 * 5, BYTE)); assertEquals(actual.getInputDataSize(), new DataSize(3 * 6, BYTE)); assertEquals(actual.getInputPositions(), 3 * 7); assertEquals(actual.getSumSquaredInputPositions(), 3 * 8.0); assertEquals(actual.getGetOutputCalls(), 3 * 9); assertEquals(actual.getGetOutputWall(), new Duration(3 * 10, NANOSECONDS)); assertEquals(actual.getGetOutputCpu(), new Duration(3 * 11, NANOSECONDS)); assertEquals(actual.getGetOutputAllocation(), new DataSize(3 * 234, BYTE)); assertEquals(actual.getOutputDataSize(), new DataSize(3 * 12, BYTE)); assertEquals(actual.getOutputPositions(), 3 * 13); assertEquals(actual.getPhysicalWrittenDataSize(), new DataSize(3 * 14, BYTE)); assertEquals(actual.getAdditionalCpu(), new Duration(3 * 100, NANOSECONDS)); assertEquals(actual.getBlockedWall(), new Duration(3 * 15, NANOSECONDS)); assertEquals(actual.getFinishCalls(), 3 * 16); assertEquals(actual.getFinishWall(), new Duration(3 * 17, NANOSECONDS)); assertEquals(actual.getFinishCpu(), new Duration(3 * 18, NANOSECONDS)); assertEquals(actual.getFinishAllocation(), new DataSize(3 * 345, BYTE)); assertEquals(actual.getUserMemoryReservation().toBytes(), Long.MAX_VALUE); assertEquals(actual.getRevocableMemoryReservation(), new DataSize(3 * 20, BYTE)); assertEquals(actual.getSystemMemoryReservation(), new DataSize(3 * 21, BYTE)); assertEquals(actual.getPeakUserMemoryReservation(), new DataSize(22, BYTE)); assertEquals(actual.getPeakSystemMemoryReservation(), new DataSize(23, BYTE)); assertEquals(actual.getPeakTotalMemoryReservation(), new DataSize(24, BYTE)); assertEquals(actual.getSpilledDataSize(), new DataSize(3 * 25, BYTE)); assertNull(actual.getInfo()); RuntimeMetric expectedMetric = RuntimeMetric.merge(TEST_RUNTIME_METRIC_1, TEST_RUNTIME_METRIC_1); expectedMetric.mergeWith(TEST_RUNTIME_METRIC_1); assertRuntimeMetricEquals(actual.getRuntimeStats().getMetric(TEST_METRIC_NAME), expectedMetric); assertEquals(actual.getDynamicFilterStats().getProducerNodeIds(), TEST_DYNAMIC_FILTER_STATS_1.getProducerNodeIds()); }
public static String readFile(String path) { StringBuilder builder = new StringBuilder(); File file = new File(path); if (!file.isFile()) { throw new BusException(StrUtil.format("File path {} is not a file.", path)); } try (InputStreamReader inputStreamReader = new InputStreamReader(Files.newInputStream(file.toPath()), StandardCharsets.UTF_8); BufferedReader bufferedReader = new BufferedReader(inputStreamReader)) { String content; while ((content = bufferedReader.readLine()) != null) { builder.append("\n"); builder.append(content); } } catch (Exception e) { e.printStackTrace(); } return builder.toString(); }
@Ignore @Test public void testReadRootLog() { String result = DirUtil.readFile(DirConstant.getRootLog()); Assertions.assertThat(result).isNotNull(); }
@Override public DescriptiveUrlBag toUrl(final Path file) { final DescriptiveUrlBag list = new DescriptiveUrlBag(); final DescriptiveUrl base = new DefaultWebUrlProvider().toUrl(host); list.add(new DescriptiveUrl(URI.create(String.format("%s%s", base.getUrl(), URIEncoder.encode( PathNormalizer.normalize(PathRelativizer.relativize(PathNormalizer.normalize(host.getDefaultPath(), true), file.getAbsolute())) ))).normalize(), base.getType(), base.getHelp()) ); return list; }
@Test public void testHttps() { final Host host = new Host(new TestProtocol(Scheme.https), "test.cyberduck.ch"); assertEquals("https://test.cyberduck.ch/", new DefaultWebUrlProvider().toUrl(host).getUrl()); assertEquals("https://test.cyberduck.ch/my/documentroot/f", new HostWebUrlProvider(host).toUrl(new Path("/my/documentroot/f", EnumSet.of(Path.Type.directory))).find(DescriptiveUrl.Type.http).getUrl()); }
public static <T> T visit(final Schema start, final SchemaVisitor<T> visitor) { // Set of Visited Schemas IdentityHashMap<Schema, Schema> visited = new IdentityHashMap<>(); // Stack that contains the Schams to process and afterVisitNonTerminal // functions. // Deque<Either<Schema, Supplier<SchemaVisitorAction>>> // Using either has a cost which we want to avoid... Deque<Object> dq = new ArrayDeque<>(); dq.addLast(start); Object current; while ((current = dq.pollLast()) != null) { if (current instanceof Supplier) { // we are executing a non terminal post visit. SchemaVisitorAction action = ((Supplier<SchemaVisitorAction>) current).get(); switch (action) { case CONTINUE: break; case SKIP_SUBTREE: throw new UnsupportedOperationException(); case SKIP_SIBLINGS: while (dq.getLast() instanceof Schema) { dq.removeLast(); } break; case TERMINATE: return visitor.get(); default: throw new UnsupportedOperationException("Invalid action " + action); } } else { Schema schema = (Schema) current; boolean terminate; if (!visited.containsKey(schema)) { Schema.Type type = schema.getType(); switch (type) { case ARRAY: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getElementType())); visited.put(schema, schema); break; case RECORD: terminate = visitNonTerminal(visitor, schema, dq, () -> schema.getFields().stream().map(Field::schema) .collect(Collectors.toCollection(ArrayDeque::new)).descendingIterator()); visited.put(schema, schema); break; case UNION: terminate = visitNonTerminal(visitor, schema, dq, schema.getTypes()); visited.put(schema, schema); break; case MAP: terminate = visitNonTerminal(visitor, schema, dq, Collections.singleton(schema.getValueType())); visited.put(schema, schema); break; case NULL: case BOOLEAN: case BYTES: case DOUBLE: case ENUM: case FIXED: case FLOAT: case INT: case LONG: case STRING: terminate = visitTerminal(visitor, schema, dq); break; default: throw new UnsupportedOperationException("Invalid type " + type); } } else { terminate = visitTerminal(visitor, schema, dq); } if (terminate) { return visitor.get(); } } } return visitor.get(); }
@Test void visit13() { String s12 = "{\"type\": \"int\"}"; assertEquals("\"int\".", Schemas.visit(new Schema.Parser().parse(s12), new TestVisitor() { public SchemaVisitorAction visitTerminal(Schema terminal) { sb.append(terminal).append('.'); return SchemaVisitorAction.SKIP_SIBLINGS; } })); }
public boolean isXML() { return "xml".equals(getFormat()); }
@Test public void defaultsToXml() { JMXEndpoint ep = context.getEndpoint("jmx:platform?objectDomain=FooDomain&objectName=theObjectName", JMXEndpoint.class); assertTrue(ep.isXML()); }
public List<AuthzRoleDTO> findByIds(Collection<String> ids) { return asImmutableList(db.find(DBQuery.in("_id", ids))); }
@Test void findByIds() { final List<AuthzRoleDTO> roles = service.findByIds(ImmutableSet.of( "5d41bb973086a840541a3ed2", "564c6707c8306e079f718980" )); assertThat(roles).hasSize(2); assertThat(roles.get(0).id()).isEqualTo("564c6707c8306e079f718980"); assertThat(roles.get(0).name()).isEqualTo("Reader"); assertThat(roles.get(1).id()).isEqualTo("5d41bb973086a840541a3ed2"); assertThat(roles.get(1).name()).isEqualTo("Alerts Manager"); }
@Override public synchronized ScheduleResult schedule() { dropListenersFromWhenFinishedOrNewLifespansAdded(); int overallSplitAssignmentCount = 0; ImmutableSet.Builder<RemoteTask> overallNewTasks = ImmutableSet.builder(); List<ListenableFuture<?>> overallBlockedFutures = new ArrayList<>(); boolean anyBlockedOnPlacements = false; boolean anyBlockedOnNextSplitBatch = false; boolean anyNotBlocked = false; for (Entry<Lifespan, ScheduleGroup> entry : scheduleGroups.entrySet()) { Lifespan lifespan = entry.getKey(); ScheduleGroup scheduleGroup = entry.getValue(); if (scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS || scheduleGroup.state == ScheduleGroupState.DONE) { verify(scheduleGroup.nextSplitBatchFuture == null); } else if (scheduleGroup.pendingSplits.isEmpty()) { // try to get the next batch if (scheduleGroup.nextSplitBatchFuture == null) { scheduleGroup.nextSplitBatchFuture = splitSource.getNextBatch(scheduleGroup.partitionHandle, lifespan, splitBatchSize); long start = System.nanoTime(); addSuccessCallback(scheduleGroup.nextSplitBatchFuture, () -> stage.recordGetSplitTime(start)); } if (scheduleGroup.nextSplitBatchFuture.isDone()) { SplitBatch nextSplits = getFutureValue(scheduleGroup.nextSplitBatchFuture); scheduleGroup.nextSplitBatchFuture = null; scheduleGroup.pendingSplits = new HashSet<>(nextSplits.getSplits()); if (nextSplits.isLastBatch()) { if (scheduleGroup.state == ScheduleGroupState.INITIALIZED && scheduleGroup.pendingSplits.isEmpty()) { // Add an empty split in case no splits have been produced for the source. // For source operators, they never take input, but they may produce output. // This is well handled by Presto execution engine. // However, there are certain non-source operators that may produce output without any input, // for example, 1) an AggregationOperator, 2) a HashAggregationOperator where one of the grouping sets is (). // Scheduling an empty split kicks off necessary driver instantiation to make this work. scheduleGroup.pendingSplits.add(new Split( splitSource.getConnectorId(), splitSource.getTransactionHandle(), new EmptySplit(splitSource.getConnectorId()), lifespan, NON_CACHEABLE)); } scheduleGroup.state = ScheduleGroupState.NO_MORE_SPLITS; } } else { overallBlockedFutures.add(scheduleGroup.nextSplitBatchFuture); anyBlockedOnNextSplitBatch = true; continue; } } Multimap<InternalNode, Split> splitAssignment = ImmutableMultimap.of(); if (!scheduleGroup.pendingSplits.isEmpty()) { if (!scheduleGroup.placementFuture.isDone()) { anyBlockedOnPlacements = true; continue; } if (scheduleGroup.state == ScheduleGroupState.INITIALIZED) { scheduleGroup.state = ScheduleGroupState.SPLITS_ADDED; } if (state == State.INITIALIZED) { state = State.SPLITS_ADDED; } // calculate placements for splits SplitPlacementResult splitPlacementResult = splitPlacementPolicy.computeAssignments(scheduleGroup.pendingSplits); splitAssignment = splitPlacementResult.getAssignments(); // remove splits with successful placements splitAssignment.values().forEach(scheduleGroup.pendingSplits::remove); // AbstractSet.removeAll performs terribly here. overallSplitAssignmentCount += splitAssignment.size(); // if not completed placed, mark scheduleGroup as blocked on placement if (!scheduleGroup.pendingSplits.isEmpty()) { scheduleGroup.placementFuture = splitPlacementResult.getBlocked(); overallBlockedFutures.add(scheduleGroup.placementFuture); anyBlockedOnPlacements = true; } } // if no new splits will be assigned, update state and attach completion event Multimap<InternalNode, Lifespan> noMoreSplitsNotification = ImmutableMultimap.of(); if (scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state == ScheduleGroupState.NO_MORE_SPLITS) { scheduleGroup.state = ScheduleGroupState.DONE; if (!lifespan.isTaskWide()) { InternalNode node = ((BucketedSplitPlacementPolicy) splitPlacementPolicy).getNodeForBucket(lifespan.getId()); noMoreSplitsNotification = ImmutableMultimap.of(node, lifespan); } } // assign the splits with successful placements overallNewTasks.addAll(assignSplits(splitAssignment, noMoreSplitsNotification)); // Assert that "placement future is not done" implies "pendingSplits is not empty". // The other way around is not true. One obvious reason is (un)lucky timing, where the placement is unblocked between `computeAssignments` and this line. // However, there are other reasons that could lead to this. // Note that `computeAssignments` is quite broken: // 1. It always returns a completed future when there are no tasks, regardless of whether all nodes are blocked. // 2. The returned future will only be completed when a node with an assigned task becomes unblocked. Other nodes don't trigger future completion. // As a result, to avoid busy loops caused by 1, we check pendingSplits.isEmpty() instead of placementFuture.isDone() here. if (scheduleGroup.nextSplitBatchFuture == null && scheduleGroup.pendingSplits.isEmpty() && scheduleGroup.state != ScheduleGroupState.DONE) { anyNotBlocked = true; } } // * `splitSource.isFinished` invocation may fail after `splitSource.close` has been invoked. // If state is NO_MORE_SPLITS/FINISHED, splitSource.isFinished has previously returned true, and splitSource is closed now. // * Even if `splitSource.isFinished()` return true, it is not necessarily safe to tear down the split source. // * If anyBlockedOnNextSplitBatch is true, it means we have not checked out the recently completed nextSplitBatch futures, // which may contain recently published splits. We must not ignore those. // * If any scheduleGroup is still in DISCOVERING_SPLITS state, it means it hasn't realized that there will be no more splits. // Next time it invokes getNextBatch, it will realize that. However, the invocation will fail we tear down splitSource now. // // Since grouped execution is going to support failure recovery, and scheduled splits might have to be rescheduled during retry, // we can no longer claim schedule is complete after all splits are scheduled. // Splits schedule can only be considered as finished when all lifespan executions are done // (by calling `notifyAllLifespansFinishedExecution`) if ((state == State.NO_MORE_SPLITS || state == State.FINISHED) || (!groupedExecution && lifespanAdded && scheduleGroups.isEmpty() && splitSource.isFinished())) { switch (state) { case INITIALIZED: // We have not scheduled a single split so far. // But this shouldn't be possible. See usage of EmptySplit in this method. throw new IllegalStateException("At least 1 split should have been scheduled for this plan node"); case SPLITS_ADDED: state = State.NO_MORE_SPLITS; splitSource.close(); // fall through case NO_MORE_SPLITS: state = State.FINISHED; whenFinishedOrNewLifespanAdded.set(null); // fall through case FINISHED: return ScheduleResult.nonBlocked( true, overallNewTasks.build(), overallSplitAssignmentCount); default: throw new IllegalStateException("Unknown state"); } } if (anyNotBlocked) { return ScheduleResult.nonBlocked(false, overallNewTasks.build(), overallSplitAssignmentCount); } if (anyBlockedOnPlacements) { // In a broadcast join, output buffers of the tasks in build source stage have to // hold onto all data produced before probe side task scheduling finishes, // even if the data is acknowledged by all known consumers. This is because // new consumers may be added until the probe side task scheduling finishes. // // As a result, the following line is necessary to prevent deadlock // due to neither build nor probe can make any progress. // The build side blocks due to a full output buffer. // In the meantime the probe side split cannot be consumed since // builder side hash table construction has not finished. // // TODO: When SourcePartitionedScheduler is used as a SourceScheduler, it shouldn't need to worry about // task scheduling and creation -- these are done by the StageScheduler. overallNewTasks.addAll(finalizeTaskCreationIfNecessary()); } ScheduleResult.BlockedReason blockedReason; if (anyBlockedOnNextSplitBatch) { blockedReason = anyBlockedOnPlacements ? MIXED_SPLIT_QUEUES_FULL_AND_WAITING_FOR_SOURCE : WAITING_FOR_SOURCE; } else { blockedReason = anyBlockedOnPlacements ? SPLIT_QUEUES_FULL : NO_ACTIVE_DRIVER_GROUP; } overallBlockedFutures.add(whenFinishedOrNewLifespanAdded); return ScheduleResult.blocked( false, overallNewTasks.build(), nonCancellationPropagating(whenAnyComplete(overallBlockedFutures)), blockedReason, overallSplitAssignmentCount); }
@Test public void testScheduleSlowSplitSource() { QueuedSplitSource queuedSplitSource = new QueuedSplitSource(TestingSplit::createRemoteSplit); SubPlan plan = createPlan(); NodeTaskMap nodeTaskMap = new NodeTaskMap(finalizerService); SqlStageExecution stage = createSqlStageExecution(plan, nodeTaskMap); StageScheduler scheduler = getSourcePartitionedScheduler(queuedSplitSource, stage, nodeManager, nodeTaskMap, 1); // schedule with no splits - will block ScheduleResult scheduleResult = scheduler.schedule(); assertFalse(scheduleResult.isFinished()); assertFalse(scheduleResult.getBlocked().isDone()); assertEquals(scheduleResult.getNewTasks().size(), 0); assertEquals(stage.getAllTasks().size(), 0); queuedSplitSource.addSplits(1); assertTrue(scheduleResult.getBlocked().isDone()); }
@Override public WindowedRow withValue( final GenericRow newValue, final LogicalSchema newSchema ) { return new WindowedRow( newSchema, key, newValue, rowTime, validator ); }
@SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_INFERRED") @Test public void shouldValidateOnCopy() { // Given: final WindowedRow row = new WindowedRow(SCHEMA, A_WINDOWED_KEY, A_VALUE, A_ROWTIME, validator); clearInvocations(validator); // When: row.withValue(A_VALUE, SCHEMA); // Then: verify(validator).validate(SCHEMA, A_KEY, A_VALUE); }
abstract AbstractWatchService newWatchService(FileSystemView view, PathService pathService);
@Test public void testDefaultConfig() { WatchService watchService = WatchServiceConfiguration.DEFAULT.newWatchService(fs.getDefaultView(), fs.getPathService()); assertThat(watchService).isInstanceOf(PollingWatchService.class); PollingWatchService pollingWatchService = (PollingWatchService) watchService; assertThat(pollingWatchService.interval).isEqualTo(5); assertThat(pollingWatchService.timeUnit).isEqualTo(SECONDS); }
public static UIf create( UExpression condition, UStatement thenStatement, UStatement elseStatement) { return new AutoValue_UIf(condition, thenStatement, elseStatement); }
@Test public void serialization() { SerializableTester.reserializeAndAssert( UIf.create( UFreeIdent.create("cond"), UBlock.create( UExpressionStatement.create( UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("y")))), UBlock.create( UExpressionStatement.create( UAssign.create(UFreeIdent.create("x"), UFreeIdent.create("z")))))); }
public static List<String> splitSql(final String sql) { final List<String> commands = new ArrayList<>(); StringBuilder current = new StringBuilder(); int index = 0; while (index < sql.length()) { if (sql.charAt(index) == SINGLE_QUOTE) { final int closingToken = sql.indexOf(SINGLE_QUOTE, index + 1); validateToken(String.valueOf(SINGLE_QUOTE), closingToken); current.append(sql, index, closingToken + 1); index = closingToken + 1; } else if (index < sql.length() - 1 && sql.startsWith(SHORT_COMMENT_OPENER, index)) { index = sql.indexOf(SHORT_COMMENT_CLOSER, index + 1) + 1; validateToken(SHORT_COMMENT_CLOSER, index - 1); } else if (index < sql.length() - 1 && sql.startsWith(LONG_COMMENT_OPENER, index)) { index = sql.indexOf(LONG_COMMENT_CLOSER, index + 1) + 2; validateToken(LONG_COMMENT_CLOSER, index - 2); } else if (sql.charAt(index) == SEMICOLON) { current.append(';'); commands.add(current.toString()); current = new StringBuilder(); index++; } else { current.append(sql.charAt(index)); index++; } } if (!current.toString().trim().isEmpty()) { throw new MigrationException(String.format( "Unmatched command at end of file; missing semicolon: '%s'", current )); } return commands; }
@Test public void shouldSplitCommandsWithComments() { // When: List<String> commands = CommandParser.splitSql("-- Before\n" + "CREATE STREAM valid_purchases AS\n" + " SELECT *\n" + " FROM purchases\n" + " WHERE cost > 0.00 AND quantity > 0;\n" + "-- After\n" + "CREATE OR REPLACE STREAM valid_purchases AS\n" + " SELECT *\n" + " FROM purchases\n" + " WHERE quantity > 0;" + "/* Let's insert some values now! -- it's fun! */\n" + "INSERT INTO purchases VALUES ('c''ow', -90);\n" + "INSERT INTO purchases VALUES ('/*she*/ep', 80);\n" + "INSERT INTO purchases VALUES ('pol/*ar;;be--ar*/;', 200000);"); // Then: assertThat(commands.size(), is(5)); assertThat(commands.get(0), is("CREATE STREAM valid_purchases AS\n SELECT *\n FROM purchases\n WHERE cost > 0.00 AND quantity > 0;")); assertThat(commands.get(1), is("\nCREATE OR REPLACE STREAM valid_purchases AS\n SELECT *\n FROM purchases\n WHERE quantity > 0;")); assertThat(commands.get(2), is("\nINSERT INTO purchases VALUES ('c''ow', -90);")); assertThat(commands.get(3), is("\nINSERT INTO purchases VALUES ('/*she*/ep', 80);")); assertThat(commands.get(4), is("\nINSERT INTO purchases VALUES ('pol/*ar;;be--ar*/;', 200000);")); }
@Override public @Nonnull Mail take(int priority) throws InterruptedException, IllegalStateException { checkIsMailboxThread(); checkTakeStateConditions(); Mail head = takeOrNull(batch, priority); if (head != null) { return head; } final ReentrantLock lock = this.lock; lock.lockInterruptibly(); try { Mail headMail; while ((headMail = takeOrNull(queue, priority)) == null) { // to ease debugging notEmpty.await(1, TimeUnit.SECONDS); } hasNewMail = !queue.isEmpty(); return headMail; } finally { lock.unlock(); } }
@Test void testConcurrentPutTakeBlocking() throws Exception { testPutTake(mailbox -> mailbox.take(DEFAULT_PRIORITY)); }
public CaseCreationResponse startCase( ProcessDefinitionResponse processDefinition, Map<String, Serializable> rawInputs) throws Exception { if (processDefinition == null) { throw new IllegalArgumentException("ProcessDefinition is null"); } if (rawInputs == null) { throw new IllegalArgumentException("The contract input is null"); } Map<String, Serializable> inputs = BonitaAPIUtil.getInstance(bonitaApiConfig) .prepareInputs(processDefinition, rawInputs); WebTarget resource = getBaseResource().path("process/{processId}/instantiation") .resolveTemplate("processId", processDefinition.getId()); return resource.request().accept(MediaType.APPLICATION_JSON) .post(entity(inputs, MediaType.APPLICATION_JSON), CaseCreationResponse.class); }
@Test public void testStartCaseEmptyProcessDefinitionId() { BonitaAPI bonitaApi = BonitaAPIBuilder .build(new BonitaAPIConfig("hostname", "port", "username", "password")); Map<String, Serializable> map = new HashMap<>(); assertThrows(IllegalArgumentException.class, () -> bonitaApi.startCase(null, map)); }
public Future<Collection<Integer>> resizeAndReconcilePvcs(KafkaStatus kafkaStatus, List<PersistentVolumeClaim> pvcs) { Set<Integer> podIdsToRestart = new HashSet<>(); List<Future<Void>> futures = new ArrayList<>(pvcs.size()); for (PersistentVolumeClaim desiredPvc : pvcs) { Future<Void> perPvcFuture = pvcOperator.getAsync(reconciliation.namespace(), desiredPvc.getMetadata().getName()) .compose(currentPvc -> { if (currentPvc == null || currentPvc.getStatus() == null || !"Bound".equals(currentPvc.getStatus().getPhase())) { // This branch handles the following conditions: // * The PVC doesn't exist yet, we should create it // * The PVC is not Bound, we should reconcile it return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "Resizing".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound, but it is already resizing => Nothing to do, we should let it resize LOGGER.debugCr(reconciliation, "The PVC {} is resizing, nothing to do", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else if (currentPvc.getStatus().getConditions().stream().anyMatch(cond -> "FileSystemResizePending".equals(cond.getType()) && "true".equals(cond.getStatus().toLowerCase(Locale.ENGLISH)))) { // The PVC is Bound and resized but waiting for FS resizing => We need to restart the pod which is using it podIdsToRestart.add(getPodIndexFromPvcName(desiredPvc.getMetadata().getName())); LOGGER.infoCr(reconciliation, "The PVC {} is waiting for file system resizing and the pod using it might need to be restarted.", desiredPvc.getMetadata().getName()); return Future.succeededFuture(); } else { // The PVC is Bound and resizing is not in progress => We should check if the SC supports resizing and check if size changed Long currentSize = StorageUtils.convertToMillibytes(currentPvc.getSpec().getResources().getRequests().get("storage")); Long desiredSize = StorageUtils.convertToMillibytes(desiredPvc.getSpec().getResources().getRequests().get("storage")); if (!currentSize.equals(desiredSize)) { // The sizes are different => we should resize (shrinking will be handled in StorageDiff, so we do not need to check that) return resizePvc(kafkaStatus, currentPvc, desiredPvc); } else { // size didn't change, just reconcile return pvcOperator.reconcile(reconciliation, reconciliation.namespace(), desiredPvc.getMetadata().getName(), desiredPvc) .map((Void) null); } } }); futures.add(perPvcFuture); } return Future.all(futures) .map(podIdsToRestart); }
@Test public void testVolumesResized(VertxTestContext context) { List<PersistentVolumeClaim> pvcs = List.of( createPvc("data-pod-0"), createPvc("data-pod-1"), createPvc("data-pod-2") ); ResourceOperatorSupplier supplier = ResourceUtils.supplierWithMocks(false); // Mock the PVC Operator PvcOperator mockPvcOps = supplier.pvcOperations; when(mockPvcOps.getAsync(eq(NAMESPACE), ArgumentMatchers.startsWith("data-"))) .thenAnswer(invocation -> { String pvcName = invocation.getArgument(1); PersistentVolumeClaim currentPvc = pvcs.stream().filter(pvc -> pvcName.equals(pvc.getMetadata().getName())).findFirst().orElse(null); if (currentPvc != null) { PersistentVolumeClaim pvcWithStatus = new PersistentVolumeClaimBuilder(currentPvc) .withNewStatus() .withPhase("Bound") .withCapacity(Map.of("storage", new Quantity("100Gi", null))) .endStatus() .build(); return Future.succeededFuture(pvcWithStatus); } else { return Future.succeededFuture(); } }); ArgumentCaptor<PersistentVolumeClaim> pvcCaptor = ArgumentCaptor.forClass(PersistentVolumeClaim.class); when(mockPvcOps.reconcile(any(), anyString(), anyString(), pvcCaptor.capture())).thenReturn(Future.succeededFuture()); // Mock the StorageClass Operator StorageClassOperator mockSco = supplier.storageClassOperations; when(mockSco.getAsync(eq(STORAGE_CLASS_NAME))).thenReturn(Future.succeededFuture(RESIZABLE_STORAGE_CLASS)); // Reconcile the PVCs PvcReconciler reconciler = new PvcReconciler( new Reconciliation("test-trigger", Kafka.RESOURCE_KIND, NAMESPACE, CLUSTER_NAME), mockPvcOps, mockSco ); Checkpoint async = context.checkpoint(); reconciler.resizeAndReconcilePvcs(new KafkaStatus(), pvcs) .onComplete(res -> { assertThat(res.succeeded(), is(true)); assertThat(res.result().size(), is(0)); assertThat(pvcCaptor.getAllValues().size(), is(3)); assertThat(pvcCaptor.getAllValues(), is(pvcs)); async.flag(); }); }
@Udf public List<Integer> generateSeriesInt( @UdfParameter(description = "The beginning of the series") final int start, @UdfParameter(description = "Marks the end of the series (inclusive)") final int end ) { return generateSeriesInt(start, end, end - start > 0 ? 1 : -1); }
@Test public void shouldComputePositiveIntRange() { final List<Integer> range = rangeUdf.generateSeriesInt(0, 9); assertThat(range, hasSize(10)); int val = 0; for (final Integer i : range) { assertThat(val++, is(i)); } }
@Override public InterpreterResult interpret(String sql, InterpreterContext contextInterpreter) { logger.info("Run SQL command '{}'", sql); return executeSql(sql); }
@Test void testWithQueryPrefix() { InterpreterResult ret = bqInterpreter.interpret( "#standardSQL\n WITH t AS (select 1) SELECT * FROM t", context); assertEquals(InterpreterResult.Code.SUCCESS, ret.code()); }
@Override public Set<GrokPattern> bulkLoad(Collection<String> patternIds) { final DBCursor<GrokPattern> dbCursor = dbCollection.find(DBQuery.in("_id", patternIds)); return ImmutableSet.copyOf((Iterator<GrokPattern>) dbCursor); }
@Test @MongoDBFixtures("MongoDbGrokPatternServiceTest.json") public void bulkLoadReturnsEmptySetIfGrokPatternsNotFound() { final List<String> idList = ImmutableList.of("56250da2d4000000deadbeef"); final Set<GrokPattern> grokPatterns = service.bulkLoad(idList); assertThat(grokPatterns).isEmpty(); }
public ApplicationBuilder livenessProbe(String livenessProbe) { this.livenessProbe = livenessProbe; return getThis(); }
@Test void livenessProbe() { ApplicationBuilder builder = new ApplicationBuilder(); builder.livenessProbe("TestProbe"); Assertions.assertEquals("TestProbe", builder.build().getLivenessProbe()); }
@Override public long getTokenValidityInSeconds() { return jwtTokenManager.getTokenValidityInSeconds(); }
@Test void testGetTokenValidityInSeconds() { assertTrue(cachedJwtTokenManager.getTokenValidityInSeconds() > 0); }
public static DumpedPrivateKey fromBase58(@Nullable Network network, String base58) throws AddressFormatException, AddressFormatException.WrongNetwork { byte[] versionAndDataBytes = Base58.decodeChecked(base58); int version = versionAndDataBytes[0] & 0xFF; byte[] bytes = Arrays.copyOfRange(versionAndDataBytes, 1, versionAndDataBytes.length); if (network == null) { for (NetworkParameters p : Networks.get()) if (version == p.getDumpedPrivateKeyHeader()) return new DumpedPrivateKey(p.network(), bytes); throw new AddressFormatException.InvalidPrefix("No network found for version " + version); } else { NetworkParameters params = NetworkParameters.of(network); if (version == params.getDumpedPrivateKeyHeader()) return new DumpedPrivateKey(network, bytes); throw new AddressFormatException.WrongNetwork(version); } }
@Test(expected = AddressFormatException.InvalidDataLength.class) public void fromBase58_tooLong() { String base58 = Base58.encodeChecked(NetworkParameters.of(MAINNET).getDumpedPrivateKeyHeader(), new byte[34]); DumpedPrivateKey.fromBase58((Network) null, base58); }
@Override public Set<Subnet> subnets() { return osNetworkStore.subnets(); }
@Test public void testGetSubnetsByNetworkId() { createBasicNetworks(); assertEquals("Subnet did not match", 1, target.subnets(NETWORK_ID).size()); assertEquals("Subnet did not match", 0, target.subnets(UNKNOWN_ID).size()); }
@VisibleForTesting static Optional<Dependency> parseDependency(String line) { Matcher dependencyMatcher = SHADE_INCLUDE_MODULE_PATTERN.matcher(line); if (!dependencyMatcher.find()) { return Optional.empty(); } return Optional.of( Dependency.create( dependencyMatcher.group("groupId"), dependencyMatcher.group("artifactId"), dependencyMatcher.group("version"), dependencyMatcher.group("classifier"))); }
@Test void testLineParsingWithNonJarType() { assertThat( ShadeParser.parseDependency( "Including external:dependency1:pom:1.0 in the shaded jar.")) .isPresent(); }
@SuppressWarnings("unchecked") @Override public void handle(LogHandlerEvent event) { switch (event.getType()) { case APPLICATION_STARTED: LogHandlerAppStartedEvent appStartedEvent = (LogHandlerAppStartedEvent) event; this.appOwners.put(appStartedEvent.getApplicationId(), appStartedEvent.getUser()); this.dispatcher.getEventHandler().handle( new ApplicationEvent(appStartedEvent.getApplicationId(), ApplicationEventType.APPLICATION_LOG_HANDLING_INITED)); break; case CONTAINER_FINISHED: // Ignore break; case APPLICATION_FINISHED: LogHandlerAppFinishedEvent appFinishedEvent = (LogHandlerAppFinishedEvent) event; ApplicationId appId = appFinishedEvent.getApplicationId(); String user = appOwners.remove(appId); if (user == null) { LOG.error("Unable to locate user for {}", appId); // send LOG_HANDLING_FAILED out NonAggregatingLogHandler.this.dispatcher.getEventHandler().handle( new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED)); break; } LogDeleterRunnable logDeleter = new LogDeleterRunnable(user, appId); long deletionTimestamp = System.currentTimeMillis() + this.deleteDelaySeconds * 1000; LogDeleterProto deleterProto = LogDeleterProto.newBuilder() .setUser(user) .setDeletionTime(deletionTimestamp) .build(); try { stateStore.storeLogDeleter(appId, deleterProto); } catch (IOException e) { LOG.error("Unable to record log deleter state", e); } try { boolean logDeleterStarted = false; if (enableTriggerDeleteBySize) { final long appLogSize = calculateSizeOfAppLogs(user, appId); if (appLogSize >= deleteThreshold) { LOG.info("Log Deletion for application: {}, with no delay, size={}", appId, appLogSize); sched.schedule(logDeleter, 0, TimeUnit.SECONDS); logDeleterStarted = true; } } if (!logDeleterStarted) { LOG.info("Scheduling Log Deletion for application: {}, with delay of {} seconds", appId, this.deleteDelaySeconds); sched.schedule(logDeleter, this.deleteDelaySeconds, TimeUnit.SECONDS); } } catch (RejectedExecutionException e) { // Handling this event in local thread before starting threads // or after calling sched.shutdownNow(). logDeleter.run(); } break; default: } }
@Test public void testRecovery() throws Exception { File[] localLogDirs = getLocalLogDirFiles(this.getClass().getName(), 2); String localLogDirsString = localLogDirs[0].getAbsolutePath() + "," + localLogDirs[1].getAbsolutePath(); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDirsString); conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false); conf.setLong(YarnConfiguration.NM_LOG_RETAIN_SECONDS, YarnConfiguration.DEFAULT_NM_LOG_RETAIN_SECONDS); dirsHandler.init(conf); appEventHandler.resetLogHandlingEvent(); assertFalse(appEventHandler.receiveLogHandlingFinishEvent()); NMStateStoreService stateStore = new NMMemoryStateStoreService(); stateStore.init(conf); stateStore.start(); NonAggregatingLogHandlerWithMockExecutor logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler, stateStore); logHandler.init(conf); logHandler.start(); logHandler.handle(new LogHandlerAppStartedEvent(appId, user, null, null)); logHandler.handle(new LogHandlerContainerFinishedEvent(container11, ContainerType.APPLICATION_MASTER, 0)); logHandler.handle(new LogHandlerAppFinishedEvent(appId)); // simulate a restart and verify deletion is rescheduled logHandler.close(); logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler, stateStore); logHandler.init(conf); logHandler.start(); ArgumentCaptor<Runnable> schedArg = ArgumentCaptor.forClass(Runnable.class); verify(logHandler.mockSched).schedule(schedArg.capture(), anyLong(), eq(TimeUnit.MILLISECONDS)); // execute the runnable and verify another restart has nothing scheduled schedArg.getValue().run(); logHandler.close(); logHandler = new NonAggregatingLogHandlerWithMockExecutor(dispatcher, mockDelService, dirsHandler, stateStore); logHandler.init(conf); logHandler.start(); verify(logHandler.mockSched, never()).schedule(any(Runnable.class), anyLong(), any(TimeUnit.class)); // wait events get drained. this.dispatcher.await(); assertTrue(appEventHandler.receiveLogHandlingFinishEvent()); appEventHandler.resetLogHandlingEvent(); assertFalse(appEventHandler.receiveLogHandlingFailedEvent()); // send an app finish event against a removed app logHandler.handle(new LogHandlerAppFinishedEvent(appId)); this.dispatcher.await(); // verify to receive a log failed event. assertTrue(appEventHandler.receiveLogHandlingFailedEvent()); assertFalse(appEventHandler.receiveLogHandlingFinishEvent()); logHandler.close(); }
@Override public double cdf(double k) { if (lambda == 0) { if (k >= 0) { return 1.0; } else { return 0.0; } } if (k < 0) { return 0.0; } else { return Gamma.regularizedUpperIncompleteGamma(Math.floor(k + 1), lambda); } }
@Test public void testCdf() { System.out.println("cdf"); PoissonDistribution instance = new PoissonDistribution(3.5); instance.rand(); assertEquals(0, instance.cdf(-1), 1E-7); assertEquals(0.03019738, instance.cdf(0), 1E-7); assertEquals(0.1358882, instance.cdf(1), 1E-7); assertEquals(0.3208472, instance.cdf(2), 1E-7); assertEquals(0.5366327, instance.cdf(3), 1E-7); assertEquals(0.725445, instance.cdf(4), 1E-6); assertEquals(0.9989806, instance.cdf(10), 1E-6); assertEquals(0.999999, instance.cdf(15), 1E-6); assertEquals(1.000000, instance.cdf(20), 1E-6); }
@Override public boolean matches(ExpressionTree expressionTree, VisitorState state) { if (expressionTree instanceof LiteralTree) { LiteralTree literalTree = (LiteralTree) expressionTree; Object actualValue = literalTree.getValue(); return actualValue instanceof String && matcher.test((String) actualValue); } else { return false; } }
@Test public void notMatches() { // TODO(b/67738557): consolidate helpers for creating fake trees LiteralTree tree = new LiteralTree() { @Override public Kind getKind() { throw new UnsupportedOperationException(); } @Override public <R, D> R accept(TreeVisitor<R, D> visitor, D data) { throw new UnsupportedOperationException(); } @Override public Object getValue() { return "a string literal"; } }; assertThat(new StringLiteral("different string").matches(tree, null)).isFalse(); IdentifierTree idTree = new IdentifierTree() { @Override public Name getName() { return null; } @Override public Kind getKind() { throw new UnsupportedOperationException(); } @Override public <R, D> R accept(TreeVisitor<R, D> visitor, D data) { throw new UnsupportedOperationException(); } }; assertThat(new StringLiteral("test").matches(idTree, null)).isFalse(); // TODO(b/67738557): consolidate helpers for creating fake trees LiteralTree intTree = new LiteralTree() { @Override public Object getValue() { return 5; } @Override public Kind getKind() { throw new UnsupportedOperationException(); } @Override public <R, D> R accept(TreeVisitor<R, D> visitor, D data) { throw new UnsupportedOperationException(); } }; assertThat(new StringLiteral("test").matches(intTree, null)).isFalse(); }
@SuppressWarnings("unchecked") @Override public Object handle(ProceedingJoinPoint proceedingJoinPoint, CircuitBreaker circuitBreaker, String methodName) throws Throwable { CircuitBreakerOperator circuitBreakerOperator = CircuitBreakerOperator.of(circuitBreaker); Object returnValue = proceedingJoinPoint.proceed(); return executeRxJava2Aspect(circuitBreakerOperator, returnValue, methodName); }
@Test public void testReactorTypes() throws Throwable { CircuitBreaker circuitBreaker = CircuitBreaker.ofDefaults("test"); when(proceedingJoinPoint.proceed()).thenReturn(Single.just("Test")); assertThat(rxJava2CircuitBreakerAspectExt .handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull(); when(proceedingJoinPoint.proceed()).thenReturn(Flowable.just("Test")); assertThat(rxJava2CircuitBreakerAspectExt .handle(proceedingJoinPoint, circuitBreaker, "testMethod")).isNotNull(); }
@Override public void onActivityPaused(Activity activity) { }
@Test public void onActivityPaused() { mActivityLifecycle.onActivityPaused(mActivity); }
public FEELFnResult<Object> invoke(@ParameterName("list") List list) { if ( list == null || list.isEmpty() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null or empty")); } else { try { return FEELFnResult.ofResult(Collections.min(list, new InterceptNotComparableComparator())); } catch (ClassCastException e) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "contains items that are not comparable")); } } }
@Test void invokeEmptyArray() { FunctionTestUtil.assertResultError(minFunction.invoke(new Object[]{}), InvalidParametersEvent.class); }
@Retryable(DataAccessResourceFailureException.class) public void updateSearchEntry(Extension extension) { if (!isEnabled()) { return; } try { rwLock.writeLock().lock(); var stats = new SearchStats(repositories); var indexQuery = new IndexQueryBuilder() .withObject(relevanceService.toSearchEntry(extension, stats)) .build(); var indexOps = searchOperations.indexOps(ExtensionSearch.class); searchOperations.index(indexQuery, indexOps.getIndexCoordinates()); } finally { rwLock.writeLock().unlock(); } }
@Test public void testRelevanceReviewCount() { var index = mockIndex(true); var ext1 = mockExtension("foo", "n1", "u1",4.0, 2, 0, LocalDateTime.parse("2020-01-01T00:00"), false, false); var ext2 = mockExtension("bar", "n2", "u2",4.0, 100, 0, LocalDateTime.parse("2020-01-01T00:00"), false, false); search.updateSearchEntry(ext1); search.updateSearchEntry(ext2); assertThat(index.entries).hasSize(2); assertThat(index.entries.get(0).relevance).isLessThan(index.entries.get(1).relevance); }
@Deprecated public static <ElemT, ViewT> PCollectionView<ViewT> getView( AppliedPTransform< PCollection<ElemT>, PCollection<ElemT>, PTransform<PCollection<ElemT>, PCollection<ElemT>>> application) throws IOException { RunnerApi.PTransform transformProto = PTransformTranslation.toProto( application, Collections.emptyList(), SdkComponents.create(application.getPipeline().getOptions())); checkArgument( PTransformTranslation.CREATE_VIEW_TRANSFORM_URN.equals(transformProto.getSpec().getUrn()), "Illegal attempt to extract %s from transform %s with name \"%s\" and URN \"%s\"", PCollectionView.class.getSimpleName(), application.getTransform(), application.getFullName(), transformProto.getSpec().getUrn()); return (PCollectionView<ViewT>) SerializableUtils.deserializeFromByteArray( transformProto.getSpec().getPayload().toByteArray(), PCollectionView.class.getSimpleName()); }
@Test public void testEncodedProto() throws Exception { SdkComponents components = SdkComponents.create(); components.registerEnvironment(Environments.createDockerEnvironment("java")); components.registerPCollection(testPCollection); AppliedPTransform<?, ?, ?> appliedPTransform = AppliedPTransform.of( "foo", PValues.expandInput(testPCollection), PValues.expandOutput(createViewTransform.getView()), createViewTransform, ResourceHints.create(), p); FunctionSpec payload = PTransformTranslation.toProto(appliedPTransform, components).getSpec(); // Checks that the payload is what it should be PCollectionView<?> deserializedView = (PCollectionView<?>) SerializableUtils.deserializeFromByteArray( payload.getPayload().toByteArray(), PCollectionView.class.getSimpleName()); assertThat(deserializedView, Matchers.equalTo(createViewTransform.getView())); }
public HttpRequest addInterceptor(HttpInterceptor<HttpRequest> interceptor) { return addRequestInterceptor(interceptor); }
@Test @Disabled public void addInterceptorTest() { HttpUtil.createGet("https://hutool.cn") .addInterceptor(Console::log) .addResponseInterceptor((res)-> Console.log(res.getStatus())) .execute(); }
public synchronized int getStartOfBlockIndex() { return startOfBlockIndex; }
@Test public void testGetStartOfBlockIndex() { int expected = -1; assertEquals(expected, instance.getStartOfBlockIndex(), "Unexpected initial value"); expected = 0; instance.startOfBlockIndex = expected; assertEquals(expected, instance.getStartOfBlockIndex()); expected = 5; instance.startOfBlockIndex = expected; assertEquals(expected, instance.getStartOfBlockIndex()); }
public static PipelineDataSourceConfiguration newInstance(final String type, final String param) { switch (type) { case StandardPipelineDataSourceConfiguration.TYPE: return new StandardPipelineDataSourceConfiguration(param); case ShardingSpherePipelineDataSourceConfiguration.TYPE: return new ShardingSpherePipelineDataSourceConfiguration(param); default: throw new UnsupportedSQLOperationException(String.format("Unsupported data source type `%s`", type)); } }
@Test void assertNewInstanceForShardingSpherePipelineDataSourceConfiguration() { assertThat(PipelineDataSourceConfigurationFactory.newInstance(ShardingSpherePipelineDataSourceConfiguration.TYPE, "dataSources:\n" + " foo_ds:\n" + " url: jdbc:mock://127.0.0.1/foo_db"), instanceOf(ShardingSpherePipelineDataSourceConfiguration.class)); }
@Override public int hashCode() { if (value == null) { return 31; } // Using recommended hashing algorithm from Effective Java for longs and doubles if (isIntegral(this)) { long value = getAsNumber().longValue(); return (int) (value ^ (value >>> 32)); } if (value instanceof Number) { long value = Double.doubleToLongBits(getAsNumber().doubleValue()); return (int) (value ^ (value >>> 32)); } return value.hashCode(); }
@Test public void testByteEqualsInteger() { JsonPrimitive p1 = new JsonPrimitive((byte) 10); JsonPrimitive p2 = new JsonPrimitive(10); assertThat(p1).isEqualTo(p2); assertThat(p1.hashCode()).isEqualTo(p2.hashCode()); }
public static CredentialService freeInstance() { return freeInstance(null); }
@Test void testFreeInstance() { CredentialService credentialService1 = CredentialService.getInstance(); CredentialService credentialService2 = CredentialService.freeInstance(); assertEquals(credentialService1, credentialService2); }
@Override public List<MeasurementOption> decode(ArrayNode json, CodecContext context) { if (json == null) { return null; } List<MeasurementOption> moList = new ArrayList<>(); json.forEach(node -> moList.add(MeasurementOption.valueOf(node.asText()))); return moList; }
@Test public void testDecodeArrayNodeCodecContext() throws JsonProcessingException, IOException { String moStr = "{\"measurementsEnabled\": " + "[\"FRAME_DELAY_RANGE_BACKWARD_AVERAGE\", " + "\"INTER_FRAME_DELAY_VARIATION_FORWARD_AVERAGE\"]}"; InputStream input = new ByteArrayInputStream( moStr.getBytes(StandardCharsets.UTF_8)); JsonNode cfg = mapper.readTree(input); Iterable<MeasurementOption> moIter = context .codec(MeasurementOption.class) .decode((ArrayNode) cfg.get("measurementsEnabled"), context); Iterator<MeasurementOption> source = moIter.iterator(); List<MeasurementOption> moList = new ArrayList<>(); source.forEachRemaining(moList::add); assertEquals(MeasurementOption.FRAME_DELAY_RANGE_BACKWARD_AVERAGE.toString(), moList.get(0).name()); assertEquals(MeasurementOption.INTER_FRAME_DELAY_VARIATION_FORWARD_AVERAGE.toString(), moList.get(1).name()); }
@ExecuteOn(TaskExecutors.IO) @Get(uri = "/{executionId}") @Operation(tags = {"Executions"}, summary = "Get an execution") public Execution get( @Parameter(description = "The execution id") @PathVariable String executionId ) { return executionRepository .findById(tenantService.resolveTenant(), executionId) .orElse(null); }
@Test void getNotFound() { HttpClientResponseException e = assertThrows( HttpClientResponseException.class, () -> client.toBlocking().retrieve(GET("/api/v1/executions/exec_id_not_found")) ); assertThat(e.getStatus(), is(HttpStatus.NOT_FOUND)); }
public static MetadataCoderV2 of() { return INSTANCE; }
@Test public void testCoderSerializable() { CoderProperties.coderSerializable(MetadataCoderV2.of()); }
public static DMNModel findDMNModel(List<DMNModel> dmnModels, List<String> pathToFind, int step) { List<DMNModel> result = new ArrayList<>(); String pathToCompare = String.join(delimiter, pathToFind.subList(0, step)); for (DMNModel dmnModel : dmnModels) { String modelPath = new StringBuilder(dmnModel.getResource().getSourcePath()).reverse().toString(); if (modelPath.startsWith(pathToCompare)) { result.add(dmnModel); } } if (result.size() == 0) { throw new ImpossibleToFindDMNException("Retrieving the DMNModel has failed. Make sure the used DMN asset does not " + "produce any compilation errors and that the project does not " + "contain multiple DMN assets with the same name and namespace. " + "In addition, check if the reference to the DMN file is correct " + "in the Settings panel. " + "After addressing the issues, build the project again."); } else if (result.size() == 1) { return result.get(0); } else { return findDMNModel(dmnModels, pathToFind, step + 1); } }
@Test public void findDMNModel() { List<String> pathToFind = List.of(new StringBuilder("to/find").reverse().toString().split("/")); List<DMNModel> models = List.of(createDMNModelMock("this/should/not/match"), createDMNModelMock("find"), createDMNModelMock("something/to/find")); DMNSimulationUtils.findDMNModel(models, pathToFind, 1); List<String> impossibleToFind = List.of(new StringBuilder("not/find").reverse().toString().split("/")); assertThatThrownBy(() -> DMNSimulationUtils.findDMNModel(models, impossibleToFind, 1)) .isInstanceOf(ImpossibleToFindDMNException.class) .hasMessage("Retrieving the DMNModel has failed. Make sure the used DMN asset does not " + "produce any compilation errors and that the project does not " + "contain multiple DMN assets with the same name and namespace. " + "In addition, check if the reference to the DMN file is correct " + "in the Settings panel. " + "After addressing the issues, build the project again."); }
public static <T> Write<T> write() { return new Write<>(); }
@Test public void testWriteWithoutPreparedStatementAndNonRowType() throws Exception { final int rowsToAdd = 10; String tableName = DatabaseTestHelper.getTestTableName("UT_WRITE_PS_NON_ROW"); DatabaseTestHelper.createTable(DATA_SOURCE, tableName); try { List<RowWithSchema> data = getRowsWithSchemaToWrite(rowsToAdd); pipeline .apply(Create.of(data)) .apply( JdbcIO.<RowWithSchema>write() .withDataSourceConfiguration(DATA_SOURCE_CONFIGURATION) .withBatchSize(10L) .withTable(tableName)); pipeline.run(); assertRowCount(DATA_SOURCE, tableName, rowsToAdd); } finally { DatabaseTestHelper.deleteTable(DATA_SOURCE, tableName); } }
public void isEqualTo(@Nullable Object expected) { standardIsEqualTo(expected); }
@Test public void isEqualToFailureWithNulls() { Object o = null; expectFailure.whenTesting().that(o).isEqualTo("a"); assertFailureKeys("expected", "but was"); assertFailureValue("expected", "a"); assertFailureValue("but was", "null"); }
static void populateKiePMMLFieldOperatorValueListWithCompoundPredicates(final List<KiePMMLFieldOperatorValue> toPopulate, final List<CompoundPredicate> compoundPredicates, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap) { final List<KiePMMLFieldOperatorValue> nestedAndPredicates = new LinkedList<>(); final List<KiePMMLFieldOperatorValue> nestedOrPredicates = new LinkedList<>(); compoundPredicates.forEach(nestedCompoundPredicate -> { switch ((nestedCompoundPredicate).getBooleanOperator()) { case OR: nestedOrPredicates.addAll(getConstraintEntriesFromAndOrCompoundPredicate(nestedCompoundPredicate, fieldTypeMap)); break; case AND: nestedAndPredicates.addAll(getConstraintEntriesFromAndOrCompoundPredicate(nestedCompoundPredicate, fieldTypeMap)); break; default: throw new IllegalStateException(String.format("Unmanaged CompoundPredicate.booleanOperator %s at populateKiePMMLFieldOperatorValueListWithCompoundPredicates", nestedCompoundPredicate.getBooleanOperator())); } }); if (!nestedAndPredicates.isEmpty()) { toPopulate.add(new KiePMMLFieldOperatorValue(null, BOOLEAN_OPERATOR.AND, Collections.emptyList(), nestedAndPredicates)); } if (!nestedOrPredicates.isEmpty()) { toPopulate.add(new KiePMMLFieldOperatorValue(null, BOOLEAN_OPERATOR.OR, Collections.emptyList(), nestedOrPredicates)); } }
@Test void populateKiePMMLFieldOperatorValueListWithCompoundPredicates() { final List<KiePMMLFieldOperatorValue> toPopulate = new ArrayList<>(); KiePMMLASTFactoryUtils.populateKiePMMLFieldOperatorValueListWithCompoundPredicates(toPopulate, compoundPredicates, fieldTypeMap); assertThat(toPopulate).isNotEmpty(); assertThat(toPopulate).hasSize(2); // one entry is for "AND" compounds and the other is for "OR" ones final Map<CompoundPredicate.BooleanOperator, List<CompoundPredicate>> partitionedCompoundPredicates = compoundPredicates.stream() .collect(Collectors.groupingBy(CompoundPredicate::getBooleanOperator)); partitionedCompoundPredicates.forEach((booleanOperator, compoundPredicates) -> { final KiePMMLFieldOperatorValue operatorValue = toPopulate.stream() .filter(kiePMMLFieldOperatorValue -> kiePMMLFieldOperatorValue.getOperator().equals(BOOLEAN_OPERATOR.byName(booleanOperator.value()))) .findFirst() .orElseThrow(() -> new RuntimeException("Failed toRetrieve KiePMMLFieldOperatorValue for " + "BooleanOperator " + booleanOperator)); final List<KiePMMLFieldOperatorValue> nestedKiePMMLFieldOperatorValues = operatorValue.getNestedKiePMMLFieldOperatorValues(); final List<Predicate> nestedPredicates = compoundPredicates.stream().flatMap(compoundPredicate -> compoundPredicate.getPredicates().stream()).collect(Collectors.toList()); assertThat(nestedKiePMMLFieldOperatorValues).hasSameSizeAs(nestedPredicates); nestedKiePMMLFieldOperatorValues.forEach(new Consumer<KiePMMLFieldOperatorValue>() { @Override public void accept(KiePMMLFieldOperatorValue kiePMMLFieldOperatorValue) { assertThat(kiePMMLFieldOperatorValue.getKiePMMLOperatorValues()).hasSize(1); final KiePMMLOperatorValue kiePMMLOperatorValue = kiePMMLFieldOperatorValue.getKiePMMLOperatorValues().get(0); SimplePredicate simplePredicate = nestedPredicates.stream() .map(predicate -> (SimplePredicate) predicate) .filter(predicate -> predicate.getField().equals(getOriginalPredicateName(kiePMMLFieldOperatorValue.getName()))) .findFirst() .orElseThrow(() -> new RuntimeException("Failed to find SimplePredicate for " + kiePMMLFieldOperatorValue.getName())); commonVerifyKiePMMLOperatorValue(kiePMMLOperatorValue, simplePredicate); nestedPredicates.remove(simplePredicate); } }); assertThat(nestedPredicates).isEmpty(); }); }
@Override public EpoxyModel<?> remove(int index) { notifyRemoval(index, 1); return super.remove(index); }
@Test public void testRemoveObjectNotAdded() { boolean removed = modelList.remove(new TestModel()); assertFalse(removed); verifyNoMoreInteractions(observer); }
public RuntimeOptionsBuilder parse(Map<String, String> properties) { return parse(properties::get); }
@Test void should_parse_wip() { properties.put(Constants.WIP_PROPERTY_NAME, "true"); RuntimeOptions options = cucumberPropertiesParser.parse(properties).build(); assertThat(options.isWip(), equalTo(true)); }
@Override public void blame(BlameInput input, BlameOutput result) { for (InputFile inputFile : input.filesToBlame()) { processFile(inputFile, result); } }
@Test public void testBlame() throws IOException { File source = new File(baseDir, "src/foo.xoo"); FileUtils.write(source, "sample content"); File scm = new File(baseDir, "src/foo.xoo.scm"); FileUtils.write(scm, "123,julien,2014-12-12\n234,julien,2014-12-24"); DefaultInputFile inputFile = new TestInputFileBuilder("foo", "src/foo.xoo") .setLanguage(Xoo.KEY) .setModuleBaseDir(baseDir.toPath()) .build(); fs.add(inputFile); BlameOutput result = mock(BlameOutput.class); when(input.filesToBlame()).thenReturn(Arrays.asList(inputFile)); new XooBlameCommand().blame(input, result); verify(result).blameResult(inputFile, Arrays.asList( new BlameLine().revision("123").author("julien").date(DateUtils.parseDate("2014-12-12")), new BlameLine().revision("234").author("julien").date(DateUtils.parseDate("2014-12-24")))); }
@Deprecated public Path makeQualified(FileSystem fs) { return makeQualified(fs.getUri(), fs.getWorkingDirectory()); }
@Test (timeout = 30000) public void testMakeQualified() throws URISyntaxException { URI defaultUri = new URI("hdfs://host1/dir1"); URI wd = new URI("hdfs://host2/dir2"); // The scheme from defaultUri is used but the path part is not assertEquals(new Path("hdfs://host1/dir/file"), new Path("file").makeQualified(defaultUri, new Path("/dir"))); // The defaultUri is only used if the path + wd has no scheme assertEquals(new Path("hdfs://host2/dir2/file"), new Path("file").makeQualified(defaultUri, new Path(wd))); }
public COM verifyCom(byte[] data, Class<? extends COM> type) throws RdaException { final COM com = read(data, type); if (!com.getDataGroups().containsAll(com.getRdaDataGroups())) { throw new RdaException( RdaError.COM, String.format("Not all data groups are available: %s", com.getDataGroups()) ); } return com; }
@Test public void shouldThrowErrorOnAsn1ExceptionCOM() throws Exception { final CardVerifier verifier = verifier(null, null); final byte[] com = readFixture("nik2014/efCom"); com[1]--; Exception exception = assertThrows(RdaException.class, () -> { verifier.verifyCom(com, TravelDocumentCOM.class); }); assertEquals(RdaError.PARSE_FILE, ((RdaException) exception).error); assertEquals("ASN1 parsing error: Read beyond bound 24 >= 23", exception.getMessage()); assertEquals(Asn1Exception.class, exception.getCause().getClass()); }
@Override public List<CodegenTableDO> getCodegenTableList(Long dataSourceConfigId) { return codegenTableMapper.selectListByDataSourceConfigId(dataSourceConfigId); }
@Test public void testGetCodegenTableList() { // mock 数据 CodegenTableDO table01 = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(table01); CodegenTableDO table02 = randomPojo(CodegenTableDO.class, o -> o.setScene(CodegenSceneEnum.ADMIN.getScene())); codegenTableMapper.insert(table02); // 准备参数 Long dataSourceConfigId = table01.getDataSourceConfigId(); // 调用 List<CodegenTableDO> result = codegenService.getCodegenTableList(dataSourceConfigId); // 断言 assertEquals(1, result.size()); assertPojoEquals(table01, result.get(0)); }
public static DelayableTimer createRegisterTimerCallback( MailboxExecutor mailboxExecutor, TimerService timerService) { return (callable, delay) -> { ScheduledFuture<?> scheduledFuture = timerService.registerTimer( timerService.getCurrentProcessingTime() + delay.toMillis(), timestamp -> mailboxExecutor.execute( () -> callable.call(), "Execute checkpoint barrier handler delayed action")); return () -> scheduledFuture.cancel(false); }; }
@Test void testDelayableTimerNotHiddenException() throws Exception { TaskMailbox mailbox = new TaskMailboxImpl(); MailboxProcessor mailboxProcessor = new MailboxProcessor(controller -> {}, mailbox, StreamTaskActionExecutor.IMMEDIATE); MailboxExecutor mailboxExecutor = new MailboxExecutorImpl( mailbox, 0, StreamTaskActionExecutor.IMMEDIATE, mailboxProcessor); TestProcessingTimeService timerService = new TestProcessingTimeService(); timerService.setCurrentTime(System.currentTimeMillis()); BarrierAlignmentUtil.DelayableTimer delayableTimer = BarrierAlignmentUtil.createRegisterTimerCallback(mailboxExecutor, timerService); Duration delay = Duration.ofMinutes(10); delayableTimer.registerTask( () -> { // simulate Exception in checkpoint sync phase throw new ExpectedTestException(); }, delay); timerService.advance(delay.toMillis()); assertThatThrownBy(mailboxProcessor::runMailboxStep) .as("BarrierAlignmentUtil.DelayableTimer should not hide exceptions") .isInstanceOf(ExpectedTestException.class); }
public KsqlGenericRecord build( final List<ColumnName> columnNames, final List<Expression> expressions, final LogicalSchema schema, final DataSourceType dataSourceType ) { final List<ColumnName> columns = columnNames.isEmpty() ? implicitColumns(schema) : columnNames; if (columns.size() != expressions.size()) { throw new KsqlException( "Expected a value for each column." + " Expected Columns: " + columnNames + ". Got " + expressions); } final LogicalSchema schemaWithPseudoColumns = withPseudoColumns(schema); for (ColumnName col : columns) { if (!schemaWithPseudoColumns.findColumn(col).isPresent()) { throw new KsqlException("Column name " + col + " does not exist."); } if (SystemColumns.isDisallowedForInsertValues(col)) { throw new KsqlException("Inserting into column " + col + " is not allowed."); } } final Map<ColumnName, Object> values = resolveValues( columns, expressions, schemaWithPseudoColumns, functionRegistry, config ); if (dataSourceType == DataSourceType.KTABLE) { final String noValue = schemaWithPseudoColumns.key().stream() .map(Column::name) .filter(colName -> !values.containsKey(colName)) .map(ColumnName::text) .collect(Collectors.joining(", ")); if (!noValue.isEmpty()) { throw new KsqlException("Value for primary key column(s) " + noValue + " is required for tables"); } } final long ts = (long) values.getOrDefault(SystemColumns.ROWTIME_NAME, clock.getAsLong()); final GenericKey key = buildKey(schema, values); final GenericRow value = buildValue(schema, values); return KsqlGenericRecord.of(key, value, ts); }
@Test public void shouldThrowOnTableMissingKey() { // Given: final LogicalSchema schema = LogicalSchema.builder() .keyColumn(KEY, SqlTypes.STRING) .valueColumn(COL0, SqlTypes.STRING) .valueColumn(COL1, SqlTypes.STRING) .build(); final List<ColumnName> names = ImmutableList.of(COL0, COL1); final Expression exp = new StringLiteral("a"); // When: final KsqlException e = assertThrows(KsqlException.class, () -> recordFactory.build( names, ImmutableList.of(exp, exp), schema, DataSourceType.KTABLE )); // Then: assertThat(e.getMessage(), containsString("Value for primary key column")); }
public Status getStatus() { return getStatus(migrationHistory.getLastMigrationNumber(), migrationSteps.getMaxMigrationNumber()); }
@Test public void getStatus_returns_UP_TO_DATE_when_max_migration_number_in_table_is_equal_to_max_migration_number_in_configuration() { mockMaxMigrationNumberInDb(150L); mockMaxMigrationNumberInConfig(150L); assertThat(underTest.getStatus()).isEqualTo(UP_TO_DATE); }
@Override public List<PartitionKey> getPrunedPartitions(Table table, ScalarOperator predicate, long limit, TableVersionRange version) { IcebergTable icebergTable = (IcebergTable) table; String dbName = icebergTable.getRemoteDbName(); String tableName = icebergTable.getRemoteTableName(); if (version.end().isEmpty()) { return new ArrayList<>(); } PredicateSearchKey key = PredicateSearchKey.of(dbName, tableName, version.end().get(), predicate); triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit); List<PartitionKey> partitionKeys = new ArrayList<>(); List<FileScanTask> icebergSplitTasks = splitTasks.get(key); if (icebergSplitTasks == null) { throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]", dbName, tableName, predicate); } Set<List<String>> scannedPartitions = new HashSet<>(); PartitionSpec spec = icebergTable.getNativeTable().spec(); List<Column> partitionColumns = icebergTable.getPartitionColumnsIncludeTransformed(); boolean existPartitionTransformedEvolution = ((IcebergTable) table).hasPartitionTransformedEvolution(); for (FileScanTask fileScanTask : icebergSplitTasks) { org.apache.iceberg.PartitionData partitionData = (org.apache.iceberg.PartitionData) fileScanTask.file().partition(); List<String> values = PartitionUtil.getIcebergPartitionValues( spec, partitionData, existPartitionTransformedEvolution); if (values.size() != partitionColumns.size()) { // ban partition evolution and non-identify column. continue; } if (scannedPartitions.contains(values)) { continue; } else { scannedPartitions.add(values); } try { List<com.starrocks.catalog.Type> srTypes = new ArrayList<>(); for (PartitionField partitionField : spec.fields()) { if (partitionField.transform().isVoid()) { continue; } if (!partitionField.transform().isIdentity()) { Type sourceType = spec.schema().findType(partitionField.sourceId()); Type resultType = partitionField.transform().getResultType(sourceType); if (resultType == Types.DateType.get()) { resultType = Types.IntegerType.get(); } srTypes.add(fromIcebergType(resultType)); continue; } srTypes.add(icebergTable.getColumn(icebergTable.getPartitionSourceName(spec.schema(), partitionField)).getType()); } if (existPartitionTransformedEvolution) { srTypes = partitionColumns.stream() .map(Column::getType) .collect(Collectors.toList()); } partitionKeys.add(createPartitionKeyWithType(values, srTypes, table.getType())); } catch (Exception e) { LOG.error("create partition key failed.", e); throw new StarRocksConnectorException(e.getMessage()); } } return partitionKeys; }
@Test public void testPartitionPruneWithDuplicated() { IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG); List<Column> columns = Lists.newArrayList(new Column("id", INT), new Column("data", STRING)); IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog, Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null); mockedNativeTableA.newFastAppend().appendFile(FILE_A).appendFile(FILE_A_1).commit(); mockedNativeTableA.refresh(); IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db_name", "table_name", "", columns, mockedNativeTableA, Maps.newHashMap()); TableVersionRange versionRange = TableVersionRange.withEnd(Optional.of( mockedNativeTableA.currentSnapshot().snapshotId())); List<PartitionKey> partitionKeys = metadata.getPrunedPartitions(icebergTable, null, 1, versionRange); Assert.assertEquals(1, partitionKeys.size()); Assert.assertTrue(partitionKeys.get(0) instanceof IcebergPartitionKey); PartitionKey partitionKey = partitionKeys.get(0); Assert.assertEquals("types: [INT]; keys: [0]; ", partitionKey.toString()); }
@VisibleForTesting ComputeNode getBackend(Long backendID) { return availableID2Backend.get(backendID); }
@Test public void testGetBackend() { DefaultWorkerProvider workerProvider = new DefaultWorkerProvider(id2Backend, id2ComputeNode, availableId2Backend, availableId2ComputeNode, true); testGetBackendHelper(workerProvider, availableId2Backend); }
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator, EvictionChecker... evictionCheckers) { Preconditions.isNotNull(compositionOperator, "composition"); Preconditions.isNotNull(evictionCheckers, "evictionCheckers"); if (evictionCheckers.length == 0) { throw new IllegalArgumentException("EvictionCheckers cannot be empty!"); } switch (compositionOperator) { case AND: return new CompositeEvictionCheckerWithAndComposition(evictionCheckers); case OR: return new CompositeEvictionCheckerWithOrComposition(evictionCheckers); default: throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator); } }
@Test(expected = IllegalArgumentException.class) public void evictionCheckersCannotBeEmpty() { CompositeEvictionChecker.newCompositeEvictionChecker( CompositeEvictionChecker.CompositionOperator.AND); }
@Inject public PartialResultQueryManager() { this.queue = new PriorityBlockingQueue<>(1, comparing(PartialResultQueryTaskTracker::getMaxEndTime)); }
@Test public void testPartialResultQueryManager() throws Exception { PartialResultQueryManager partialResultQueryManager = new PartialResultQueryManager(); assertEquals(0, partialResultQueryManager.getQueueSize()); PartialResultQueryTaskTracker tracker1 = new PartialResultQueryTaskTracker(partialResultQueryManager, 0.0, 2.0, warningCollector); PartialResultQueryTaskTracker tracker2 = new PartialResultQueryTaskTracker(partialResultQueryManager, 0.0, 2.0, warningCollector); // Assert that the trackers created above will have a default maxEndTime = 0. So current_time is always > tracker's maxEndTime.Meaning tracker is instantly ready for partial results. assertEquals(0, tracker1.getMaxEndTime()); assertEquals(0, tracker2.getMaxEndTime()); partialResultQueryManager.addQueryTaskTracker(tracker1); partialResultQueryManager.addQueryTaskTracker(tracker2); // Assert that the trackers are added to the queue assertEquals(2, partialResultQueryManager.getQueueSize()); // Sleep for 2s so that we give enough time to partialResultQueryManager to wake up and clear the trackers in queue Thread.sleep(2000); // Assert the trackers are cleared assertEquals(0, partialResultQueryManager.getQueueSize()); partialResultQueryManager.stop(); }
static long sizeOf(Mutation m) { if (m.getOperation() == Mutation.Op.DELETE) { return sizeOf(m.getKeySet()); } long result = 0; for (Value v : m.getValues()) { switch (v.getType().getCode()) { case ARRAY: result += estimateArrayValue(v); break; case STRUCT: throw new IllegalArgumentException("Structs are not supported in mutation."); default: result += estimatePrimitiveValue(v); } } return result; }
@Test(expected = IllegalArgumentException.class) public void unsupportedStructType() { Mutation struct = Mutation.newInsertOrUpdateBuilder("test") .set("one") .to(Struct.newBuilder().build()) .build(); MutationSizeEstimator.sizeOf(struct); }
@Override public int compare(final List<String> o1, final List<String> o2) { if (o1.size() < o2.size()) { return -1; } else if (o1.size() > o2.size()) { return 1; } else { int index = 0; while (index < o1.size()) { String item1 = o1.get(index); String item2 = o2.get(index++); final int comparisonResult = item1.compareToIgnoreCase(item2); if (comparisonResult != 0) { return comparisonResult; } } return 0; } }
@Test void testListsWithTheSameElementsAreEqual() { assertEquals(0, toTest.compare(List.of("mum"), List.of("mum"))); assertEquals(0, toTest.compare(List.of("mum", "dad"), List.of("mum", "dad"))); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParseFailsOnEmptyDate() throws Exception { assertThrows(NaturalDateParser.DateNotParsableException.class, () -> { naturalDateParser.parse(""); }); }
@Udf(description = "Converts an INT value in radians to a value in degrees") public Double degrees( @UdfParameter( value = "value", description = "The value in radians to convert to degrees." ) final Integer value ) { return degrees(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNegative() { assertThat(udf.degrees(-Math.PI), closeTo(-180.0, 0.000000000000001)); assertThat(udf.degrees(-2 * Math.PI), closeTo(-360.0, 0.000000000000001)); assertThat(udf.degrees(-1.2345), closeTo(-70.73163980890013, 0.000000000000001)); assertThat(udf.degrees(-2), closeTo(-114.59155902616465, 0.000000000000001)); assertThat(udf.degrees(-2L), closeTo(-114.59155902616465, 0.000000000000001)); }
DecodedJWT verifyJWT(PublicKey publicKey, String publicKeyAlg, DecodedJWT jwt) throws AuthenticationException { if (publicKeyAlg == null) { incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM); throw new AuthenticationException("PublicKey algorithm cannot be null"); } Algorithm alg; try { switch (publicKeyAlg) { case ALG_RS256: alg = Algorithm.RSA256((RSAPublicKey) publicKey, null); break; case ALG_RS384: alg = Algorithm.RSA384((RSAPublicKey) publicKey, null); break; case ALG_RS512: alg = Algorithm.RSA512((RSAPublicKey) publicKey, null); break; case ALG_ES256: alg = Algorithm.ECDSA256((ECPublicKey) publicKey, null); break; case ALG_ES384: alg = Algorithm.ECDSA384((ECPublicKey) publicKey, null); break; case ALG_ES512: alg = Algorithm.ECDSA512((ECPublicKey) publicKey, null); break; default: incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM); throw new AuthenticationException("Unsupported algorithm: " + publicKeyAlg); } } catch (ClassCastException e) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); throw new AuthenticationException("Expected PublicKey alg [" + publicKeyAlg + "] does match actual alg."); } // We verify issuer when retrieving the PublicKey, so it is not verified here. // The claim presence requirements are based on https://openid.net/specs/openid-connect-basic-1_0.html#IDToken Verification verifierBuilder = JWT.require(alg) .acceptLeeway(acceptedTimeLeewaySeconds) .withAnyOfAudience(allowedAudiences) .withClaimPresence(RegisteredClaims.ISSUED_AT) .withClaimPresence(RegisteredClaims.EXPIRES_AT) .withClaimPresence(RegisteredClaims.NOT_BEFORE) .withClaimPresence(RegisteredClaims.SUBJECT); if (isRoleClaimNotSubject) { verifierBuilder = verifierBuilder.withClaimPresence(roleClaim); } JWTVerifier verifier = verifierBuilder.build(); try { return verifier.verify(jwt); } catch (TokenExpiredException e) { incrementFailureMetric(AuthenticationExceptionCode.EXPIRED_JWT); throw new AuthenticationException("JWT expired: " + e.getMessage()); } catch (SignatureVerificationException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT_SIGNATURE); throw new AuthenticationException("JWT signature verification exception: " + e.getMessage()); } catch (InvalidClaimException e) { incrementFailureMetric(AuthenticationExceptionCode.INVALID_JWT_CLAIM); throw new AuthenticationException("JWT contains invalid claim: " + e.getMessage()); } catch (AlgorithmMismatchException e) { incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH); throw new AuthenticationException("JWT algorithm does not match Public Key algorithm: " + e.getMessage()); } catch (JWTDecodeException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT); throw new AuthenticationException("Error while decoding JWT: " + e.getMessage()); } catch (JWTVerificationException | IllegalArgumentException e) { incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT); throw new AuthenticationException("JWT verification failed: " + e.getMessage()); } }
@Test public void ensureExpiredTokenFails() { KeyPair keyPair = Keys.keyPairFor(SignatureAlgorithm.RS256); DefaultJwtBuilder defaultJwtBuilder = new DefaultJwtBuilder(); addValidMandatoryClaims(defaultJwtBuilder, basicProviderAudience); defaultJwtBuilder.setExpiration(Date.from(Instant.now().minusSeconds(60))); defaultJwtBuilder.signWith(keyPair.getPrivate()); DecodedJWT jwt = JWT.decode(defaultJwtBuilder.compact()); Assert.assertThrows(AuthenticationException.class, () -> basicProvider.verifyJWT(keyPair.getPublic(), SignatureAlgorithm.RS256.getValue(), jwt)); }
@Override public void open() { super.open(); for (String propertyKey : properties.stringPropertyNames()) { LOGGER.debug("propertyKey: {}", propertyKey); String[] keyValue = propertyKey.split("\\.", 2); if (2 == keyValue.length) { LOGGER.debug("key: {}, value: {}", keyValue[0], keyValue[1]); Properties prefixProperties; if (basePropertiesMap.containsKey(keyValue[0])) { prefixProperties = basePropertiesMap.get(keyValue[0]); } else { prefixProperties = new Properties(); basePropertiesMap.put(keyValue[0].trim(), prefixProperties); } prefixProperties.put(keyValue[1].trim(), getProperty(propertyKey)); } } Set<String> removeKeySet = new HashSet<>(); for (String key : basePropertiesMap.keySet()) { if (!COMMON_KEY.equals(key)) { Properties properties = basePropertiesMap.get(key); if (!properties.containsKey(DRIVER_KEY) || !properties.containsKey(URL_KEY)) { LOGGER.error("{} will be ignored. {}.{} and {}.{} is mandatory.", key, DRIVER_KEY, key, key, URL_KEY); removeKeySet.add(key); } } } for (String key : removeKeySet) { basePropertiesMap.remove(key); } LOGGER.debug("JDBC PropertiesMap: {}", basePropertiesMap); setMaxLineResults(); setMaxRows(); //TODO(zjffdu) Set different sql splitter for different sql dialects. this.sqlSplitter = new SqlSplitter(); }
@Test void testInvalidSelectWithRefresh() throws IOException, InterpreterException { Properties properties = new Properties(); properties.setProperty("common.max_count", "1000"); properties.setProperty("common.max_retry", "3"); properties.setProperty("default.driver", "org.h2.Driver"); properties.setProperty("default.url", getJdbcConnection()); properties.setProperty("default.user", ""); properties.setProperty("default.password", ""); JDBCInterpreter t = new JDBCInterpreter(properties); t.open(); context.getLocalProperties().put("refreshInterval", "1000"); String sqlQuery = "select * from invalid_table;"; InterpreterResult interpreterResult = t.interpret(sqlQuery, context); assertEquals(InterpreterResult.Code.ERROR, interpreterResult.code()); assertTrue(interpreterResult.message() .get(0).getData().contains("Table \"INVALID_TABLE\" not found;"), interpreterResult.toString()); }
public void disableCurrentNodeAbility(AbilityKey abilityKey) { Map<String, Boolean> abilities = this.currentNodeAbilities.get(abilityKey.getMode()); if (abilities != null) { doTurn(abilities, abilityKey, false); } }
@Test void testDisableCurrentNodeAbility() throws InterruptedException { isOn = false; abilityControlManager.disableCurrentNodeAbility(AbilityKey.SERVER_TEST_1); TimeUnit.MILLISECONDS.sleep(1100); assertTrue(notified); if (null != assertionError) { throw assertionError; } }