focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static CacheContext defaults() { return new CacheContext(); }
@Test public void defaults() { CacheContext defaultContext = CacheContext.defaults(); assertEquals(CacheQuota.UNLIMITED, defaultContext.getCacheQuota()); assertEquals(CacheScope.GLOBAL, defaultContext.getCacheScope()); assertNull(defaultContext.getCacheIdentifier()); assertNull(defaultContext.getHiveCacheContext()); }
public List<Service> importServiceDefinition(String repositoryUrl, Secret repositorySecret, boolean disableSSLValidation, boolean mainArtifact) throws MockRepositoryImportException { log.info("Importing service definitions from {}", repositoryUrl); File localFile = null; Map<String, List<String>> fileProperties = null; if (repositoryUrl.startsWith("http")) { try { HTTPDownloader.FileAndHeaders fileAndHeaders = HTTPDownloader .handleHTTPDownloadToFileAndHeaders(repositoryUrl, repositorySecret, disableSSLValidation); localFile = fileAndHeaders.getLocalFile(); fileProperties = fileAndHeaders.getResponseHeaders(); } catch (IOException ioe) { throw new MockRepositoryImportException(repositoryUrl + " cannot be downloaded", ioe); } } else { // Simply build localFile from repository url. localFile = new File(repositoryUrl); } RelativeReferenceURLBuilder referenceURLBuilder = RelativeReferenceURLBuilderFactory .getRelativeReferenceURLBuilder(fileProperties); String artifactName = referenceURLBuilder.getFileName(repositoryUrl, fileProperties); // Initialize a reference resolver to the folder of this repositoryUrl. ReferenceResolver referenceResolver = new ReferenceResolver(repositoryUrl, repositorySecret, disableSSLValidation, referenceURLBuilder); return importServiceDefinition(localFile, referenceResolver, new ArtifactInfo(artifactName, mainArtifact)); }
@Test void testImportServiceDefinitionMainAndSecondariesWithAPIMetadata() { List<Service> services = null; try { File artifactFile = new File( "target/test-classes/io/github/microcks/service/weather-forecast-raw-openapi.yaml"); services = service.importServiceDefinition(artifactFile, null, new ArtifactInfo("weather-forecast-raw-openapi.yaml", true)); } catch (MockRepositoryImportException mrie) { mrie.printStackTrace(); fail("No MockRepositoryImportException should have be thrown"); } try { File artifactFile = new File("target/test-classes/io/github/microcks/service/weather-forecast-postman.json"); services = service.importServiceDefinition(artifactFile, null, new ArtifactInfo("weather-forecast-postman.json", false)); } catch (MockRepositoryImportException mrie) { mrie.printStackTrace(); fail("No MockRepositoryImportException should have be thrown"); } try { File artifactFile = new File("target/test-classes/io/github/microcks/service/weather-forecast-metadata.yaml"); services = service.importServiceDefinition(artifactFile, null, new ArtifactInfo("weather-forecast-metadata.yaml", false)); } catch (MockRepositoryImportException mrie) { mrie.printStackTrace(); fail("No MockRepositoryImportException should have be thrown"); } // Inspect Service own attributes. Service importedSvc = services.get(0); assertEquals("WeatherForecast API", importedSvc.getName()); assertEquals("1.1.0", importedSvc.getVersion()); assertEquals("weather-forecast-raw-openapi.yaml", importedSvc.getSourceArtifact()); assertNotNull(importedSvc.getMetadata()); assertEquals(3, importedSvc.getMetadata().getLabels().size()); assertEquals("weather", importedSvc.getMetadata().getLabels().get("domain")); assertEquals("GA", importedSvc.getMetadata().getLabels().get("status")); assertEquals("Team C", importedSvc.getMetadata().getLabels().get("team")); assertEquals(1, importedSvc.getOperations().size()); assertEquals(100, importedSvc.getOperations().get(0).getDefaultDelay().longValue()); assertEquals(DispatchStyles.FALLBACK, importedSvc.getOperations().get(0).getDispatcher()); assertNotNull(importedSvc.getOperations().get(0).getDispatcherRules()); assertEquals(5, importedSvc.getOperations().get(0).getResourcePaths().size()); }
@Override public int run(InputStream in, PrintStream out, PrintStream err, List<String> args) throws Exception { boolean useJavaCC = "--useJavaCC".equals(getArg(args, 0, null)); if (args.isEmpty() || args.size() > (useJavaCC ? 3 : 2) || isRequestingHelp(args)) { err.println("Usage: idl2schemata [--useJavaCC] [idl [outdir]]"); err.println(); err.println("If an output directory is not specified, " + "outputs to current directory."); return -1; } String inputName = getArg(args, useJavaCC ? 1 : 0, "-"); File inputFile = "-".equals(inputName) ? null : new File(inputName); File outputDirectory = getOutputDirectory(getArg(args, useJavaCC ? 2 : 1, "")); if (useJavaCC) { try (Idl parser = new Idl(inputFile)) { final Protocol protocol = parser.CompilationUnit(); final List<String> warnings = parser.getWarningsAfterParsing(); for (String warning : warnings) { err.println("Warning: " + warning); } for (Schema schema : protocol.getTypes()) { print(schema, outputDirectory); } } } else { IdlReader parser = new IdlReader(); IdlFile idlFile = inputFile == null ? parser.parse(in) : parser.parse(inputFile.toPath()); for (String warning : idlFile.getWarnings()) { err.println("Warning: " + warning); } for (Schema schema : idlFile.getNamedSchemas().values()) { print(schema, outputDirectory); } } return 0; }
@Test public void testSplitIdlIntoSchemataUsingJavaCC() throws Exception { String idl = "src/test/idl/protocol.avdl"; String outdir = "target/test-split"; ByteArrayOutputStream buffer = new ByteArrayOutputStream(); List<String> arglist = Arrays.asList("--useJavaCC", idl, outdir); new IdlToSchemataTool().run(null, null, new PrintStream(buffer), arglist); String[] files = new File(outdir).list(); assertEquals(4, files.length); String warnings = readPrintStreamBuffer(buffer); assertEquals( "Warning: Found documentation comment at line 19, column 1. Ignoring previous one at line 1, column 1: " + "\"Licensed to the Apache Software Foundation (ASF) under one\n" + "or more contributor license agreements. See the NOTICE file\n" + "distributed with this work for additional information\n" + "regarding copyright ownership. The ASF licenses this file\n" + "to you under the Apache License, Version 2.0 (the\n" + "\"License\"); you may not use this file except in compliance\n" + "with the License. You may obtain a copy of the License at\n" + "\n https://www.apache.org/licenses/LICENSE-2.0\n\n" + "Unless required by applicable law or agreed to in writing, software\n" + "distributed under the License is distributed on an \"AS IS\" BASIS,\n" + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + "See the License for the specific language governing permissions and\n" + "limitations under the License.\"\nDid you mean to use a multiline comment ( /* ... */ ) instead?", warnings); }
static List<String> tokenize(String cron) throws IllegalArgumentException { StringTokenizer tokenize = new StringTokenizer(cron); List<String> result = new ArrayList<String>(); while (tokenize.hasMoreTokens()) { result.add(tokenize.nextToken()); } if (result.size() != NUMBER_TOKENS) { throw new IllegalArgumentException("Not a valid cron entry - wrong number of tokens(" + result.size() + "): " + cron); } return result; }
@Test public void testTokenize() { String test = "*/5 * * * *"; List<String> list = CronParser.tokenize(test); assertEquals(list.size(), 5); test = "*/5 * * * * *"; try { list = CronParser.tokenize(test); fail("Should have throw an exception"); } catch (Throwable e) { } test = "*/5 * * * *"; try { list = CronParser.tokenize(test); fail("Should have throw an exception"); } catch (Throwable e) { } test = "0 1 2 3 4"; list = CronParser.tokenize(test); assertEquals(list.size(), 5); assertEquals(list.get(0), "0"); assertEquals(list.get(1), "1"); assertEquals(list.get(2), "2"); assertEquals(list.get(3), "3"); assertEquals(list.get(4), "4"); }
@Override public void nameFromFilename() { if ( !Utils.isEmpty( filename ) ) { setName( Const.createName( filename ) ); } }
@Test public void testNameFromFilename() { assertNull( meta.getName() ); assertNull( meta.getFilename() ); meta.nameFromFilename(); assertNull( meta.getName() ); meta.setFilename( "/path/to/my/file 2.ktr" ); meta.nameFromFilename(); assertEquals( "file 2", meta.getName() ); }
@Beta public static Application fromBuilder(Builder builder) throws Exception { return builder.build(); }
@Test void component_with_config() throws Exception { MockApplicationConfig config = new MockApplicationConfig(new MockApplicationConfig.Builder().mystruct(new MockApplicationConfig.Mystruct.Builder().id("foo").value("bar"))); try (ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .component("foo", MockDocproc.class, config))))) { Component c = app.getComponentById("foo"); assertNotNull(c); } }
@Override public String toString() { return String.format("The giant looks %s, %s and %s.", health, fatigue, nourishment); }
@Test void testSetHealth() { final var model = new GiantModel(Health.HEALTHY, Fatigue.ALERT, Nourishment.SATURATED); assertEquals(Health.HEALTHY, model.getHealth()); var messageFormat = "The giant looks %s, alert and saturated."; for (final var health : Health.values()) { model.setHealth(health); assertEquals(health, model.getHealth()); assertEquals(String.format(messageFormat, health), model.toString()); } }
@Override public Set<Tuple> zRangeWithScores(byte[] key, long start, long end) { if (executorService.getServiceManager().isResp3()) { return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY_V2, key, start, end, "WITHSCORES"); } return read(key, ByteArrayCodec.INSTANCE, ZRANGE_ENTRY, key, start, end, "WITHSCORES"); }
@Test public void testZRangeWithScores() { StringRedisTemplate redisTemplate = new StringRedisTemplate(); redisTemplate.setConnectionFactory(new RedissonConnectionFactory(redisson)); redisTemplate.afterPropertiesSet(); redisTemplate.boundZSetOps("test").add("1", 10); redisTemplate.boundZSetOps("test").add("2", 20); redisTemplate.boundZSetOps("test").add("3", 30); Set<ZSetOperations.TypedTuple<String>> objs = redisTemplate.boundZSetOps("test").rangeWithScores(0, 100); assertThat(objs).hasSize(3); assertThat(objs).containsExactlyInAnyOrder(ZSetOperations.TypedTuple.of("1", 10D), ZSetOperations.TypedTuple.of("2", 20D), ZSetOperations.TypedTuple.of("3", 30D)); }
public int getVersion() { return version; }
@Test public void testGetVersion() { assertEquals(TestParameters.VP_ITSP_VERSION, chmItspHeader.getVersion()); }
@Override public void onChange(Job job) { sendObject(job); if (job.hasState(SUCCEEDED) || job.hasState(FAILED) || job.hasState(DELETED)) { close(); } }
@Test void sseConnectionIsClosedIfJobStateIsFailed() throws IOException { JobSseExchange jobSseExchange = new JobSseExchange(httpExchange, storageProvider, new JacksonJsonMapper()); jobSseExchange.onChange(aFailedJob().build()); verify(storageProvider).removeJobStorageOnChangeListener(jobSseExchange); }
public boolean setLocations(DefaultIssue issue, @Nullable Object locations) { if (!locationsEqualsIgnoreHashes(locations, issue.getLocations())) { issue.setLocations(locations); issue.setChanged(true); issue.setLocationsChanged(true); return true; } return false; }
@Test void change_locations_if_primary_text_rage_changed() { DbCommons.TextRange range = DbCommons.TextRange.newBuilder().setStartLine(1).build(); DbIssues.Locations locations = DbIssues.Locations.newBuilder() .setTextRange(range) .build(); DbIssues.Locations locations2 = locations.toBuilder().setTextRange(range.toBuilder().setEndLine(2).build()).build(); issue.setLocations(locations); boolean updated = underTest.setLocations(issue, locations2); assertThat(updated).isTrue(); assertThat((Object) issue.getLocations()).isEqualTo(locations2); assertThat(issue.locationsChanged()).isTrue(); assertThat(issue.currentChange()).isNull(); assertThat(issue.mustSendNotifications()).isFalse(); }
@Override public String intercept(ActionInvocation invocation) throws Exception { // NOPMD // cette méthode est appelée par struts if (DISABLED || !STRUTS_COUNTER.isDisplayed()) { return invocation.invoke(); } boolean systemError = false; try { // Requested action name. final String actionName = getRequestName(invocation); STRUTS_COUNTER.bindContextIncludingCpu(actionName); return invocation.invoke(); } catch (final Error e) { // on catche Error pour avoir les erreurs systèmes // mais pas Exception qui sont fonctionnelles en général systemError = true; throw e; } finally { // on enregistre la requête dans les statistiques STRUTS_COUNTER.addRequestForCurrentContext(systemError); } }
@Test public void testStruts() throws Exception { final Counter strutsCounter = MonitoringProxy.getStrutsCounter(); strutsCounter.clear(); final StrutsInterceptor strutsInterceptor = new StrutsInterceptor(); final ActionInvocation invocation = createNiceMock(ActionInvocation.class); final MockActionProxy proxy = new MockActionProxy(); proxy.setInvocation(invocation); proxy.setActionName("test.action"); proxy.setMethod(null); proxy.setNamespace("testnamespace"); expect(invocation.getProxy()).andReturn(proxy).anyTimes(); replay(invocation); strutsCounter.setDisplayed(false); strutsInterceptor.intercept(invocation); final String requestsCount = "requestsCount"; assertSame(requestsCount, 0, strutsCounter.getRequestsCount()); strutsCounter.setDisplayed(true); strutsInterceptor.intercept(invocation); assertSame(requestsCount, 1, strutsCounter.getRequestsCount()); verify(invocation); final ActionInvocation invocation2 = createNiceMock(ActionInvocation.class); final MockActionProxy proxy2 = new MockActionProxy(); proxy2.setInvocation(invocation2); proxy2.setActionName("test2.action"); proxy2.setMethod("execute"); proxy2.setNamespace("testnamespace"); expect(invocation2.getProxy()).andReturn(proxy2).anyTimes(); replay(invocation2); strutsInterceptor.intercept(invocation2); assertSame(requestsCount, 2, strutsCounter.getRequestsCount()); verify(invocation2); final ActionInvocation invocation3 = createNiceMock(ActionInvocation.class); final MockActionProxy proxy3 = new MockActionProxy(); proxy3.setInvocation(invocation3); proxy3.setActionName("test3.action"); proxy3.setMethod("testmethod"); proxy3.setNamespace("testnamespace"); expect(invocation3.getProxy()).andReturn(proxy3).anyTimes(); expect(invocation3.invoke()).andThrow(new UnknownError("test d'erreur")).anyTimes(); replay(invocation3); try { strutsInterceptor.intercept(invocation3); } catch (final UnknownError e) { assertNotNull("ok", e); } assertSame(requestsCount, 3, strutsCounter.getRequestsCount()); verify(invocation3); }
@Override public List<Intent> compile(MultiPointToSinglePointIntent intent, List<Intent> installable) { Map<DeviceId, Link> links = new HashMap<>(); ConnectPoint egressPoint = intent.egressPoint(); final boolean allowMissingPaths = intentAllowsPartialFailure(intent); boolean hasPaths = false; boolean missingSomePaths = false; for (ConnectPoint ingressPoint : intent.ingressPoints()) { if (ingressPoint.deviceId().equals(egressPoint.deviceId())) { if (deviceService.isAvailable(ingressPoint.deviceId())) { hasPaths = true; } else { missingSomePaths = true; } continue; } Path path = getPath(intent, ingressPoint.deviceId(), egressPoint.deviceId()); if (path != null) { hasPaths = true; for (Link link : path.links()) { if (links.containsKey(link.dst().deviceId())) { // We've already reached the existing tree with the first // part of this path. Add the merging point with different // incoming port, but don't add the remainder of the path // in case it differs from the path we already have. links.put(link.src().deviceId(), link); break; } links.put(link.src().deviceId(), link); } } else { missingSomePaths = true; } } // Allocate bandwidth on existing paths if a bandwidth constraint is set List<ConnectPoint> ingressCPs = intent.filteredIngressPoints().stream() .map(fcp -> fcp.connectPoint()) .collect(Collectors.toList()); ConnectPoint egressCP = intent.filteredEgressPoint().connectPoint(); List<ConnectPoint> pathCPs = links.values().stream() .flatMap(l -> Stream.of(l.src(), l.dst())) .collect(Collectors.toList()); pathCPs.addAll(ingressCPs); pathCPs.add(egressCP); allocateBandwidth(intent, pathCPs); if (!hasPaths) { throw new IntentException("Cannot find any path between ingress and egress points."); } else if (!allowMissingPaths && missingSomePaths) { throw new IntentException("Missing some paths between ingress and egress points."); } Intent result = LinkCollectionIntent.builder() .appId(intent.appId()) .key(intent.key()) .treatment(intent.treatment()) .selector(intent.selector()) .links(Sets.newHashSet(links.values())) .filteredIngressPoints(intent.filteredIngressPoints()) .filteredEgressPoints(ImmutableSet.of(intent.filteredEgressPoint())) .priority(intent.priority()) .constraints(intent.constraints()) .resourceGroup(intent.resourceGroup()) .build(); return Collections.singletonList(result); }
@Test public void testMultiIngressCompilation() { Set<FilteredConnectPoint> ingress = Sets.newHashSet(new FilteredConnectPoint(new ConnectPoint(DID_1, PORT_1)), new FilteredConnectPoint(new ConnectPoint(DID_2, PORT_1)), new FilteredConnectPoint(new ConnectPoint(DID_3, PORT_1))); FilteredConnectPoint egress = new FilteredConnectPoint(new ConnectPoint(DID_5, PORT_1)); MultiPointToSinglePointIntent intent = makeIntent(ingress, egress); assertThat(intent, is(notNullValue())); final String[] hops = {S4}; MultiPointToSinglePointIntentCompiler compiler = makeCompiler(hops); assertThat(compiler, is(notNullValue())); List<Intent> result = compiler.compile(intent, null); assertThat(result, is(notNullValue())); assertThat(result, hasSize(1)); Intent resultIntent = result.get(0); assertThat(resultIntent instanceof LinkCollectionIntent, is(true)); if (resultIntent instanceof LinkCollectionIntent) { LinkCollectionIntent linkIntent = (LinkCollectionIntent) resultIntent; assertThat(linkIntent.links(), hasSize(4)); assertThat(linkIntent.links(), linksHasPath(S1, S4)); assertThat(linkIntent.links(), linksHasPath(S2, S4)); assertThat(linkIntent.links(), linksHasPath(S3, S4)); assertThat(linkIntent.links(), linksHasPath(S4, S5)); } assertThat("key is inherited", resultIntent.key(), is(intent.key())); }
public static List<String> scanXmlThemes(ZipInputStream zipInputStream) throws IOException { if (zipInputStream == null) { return Collections.emptyList(); } List<String> xmlThemes = new ArrayList<>(); try { ZipEntry zipEntry; while ((zipEntry = zipInputStream.getNextEntry()) != null) { if (zipEntry.isDirectory()) { continue; } String fileName = zipEntryName(zipEntry.getName()); if (isXmlTheme(fileName)) { xmlThemes.add(fileName); } } } finally { IOUtils.closeQuietly(zipInputStream); } return xmlThemes; }
@Test public void scanZipForXmlThemes() throws IOException { ZipInputStream zis = new ZipInputStream(new BufferedInputStream(ZipXmlThemeResourceProviderTest.class.getResourceAsStream("/xmlthemetest.zip"))); Assert.assertNotNull(zis); List<String> xmlThemes = ZipXmlThemeResourceProvider.scanXmlThemes(zis); Assert.assertEquals(4, xmlThemes.size()); Assert.assertTrue(xmlThemes.contains("one.xml")); Assert.assertTrue(xmlThemes.contains("two.xml")); Assert.assertTrue(xmlThemes.contains("res/three.xml")); Assert.assertTrue(xmlThemes.contains("res/sub/four.xml")); }
public GoConfigHolder loadConfigHolder(final String content, Callback callback) throws Exception { CruiseConfig configForEdit; CruiseConfig config; LOGGER.debug("[Config Save] Loading config holder"); configForEdit = deserializeConfig(content); if (callback != null) callback.call(configForEdit); config = preprocessAndValidate(configForEdit); return new GoConfigHolder(config, configForEdit); }
@Test void shouldLoadConfigWithConfigRepo() throws Exception { CruiseConfig cruiseConfig = xmlLoader.loadConfigHolder(ONE_CONFIG_REPO).config; assertThat(cruiseConfig.getConfigRepos().size()).isEqualTo(1); ConfigRepoConfig configRepo = cruiseConfig.getConfigRepos().get(0); assertThat(configRepo.getRepo()).isEqualTo(git("https://github.com/tomzo/gocd-indep-config-part.git")); }
static PredicateIndex getIndex(BenchmarkArguments args, Config config, Map<String, Object> output) throws IOException { if (args.feedFile != null) { PredicateIndexBuilder builder = new PredicateIndexBuilder(config); AtomicInteger idCounter = new AtomicInteger(); VespaFeedParser.parseDocuments( args.feedFile, Integer.MAX_VALUE, p -> builder.indexDocument(idCounter.incrementAndGet(), p)); builder.getStats().putValues(output); return builder.build(); } else { try (DataInputStream in = new DataInputStream(new BufferedInputStream(new FileInputStream(args.indexFile)))) { long start = System.currentTimeMillis(); PredicateIndex index = PredicateIndex.fromInputStream(in); output.put("Time deserialize index", System.currentTimeMillis() - start); return index; } } }
@Test void testFeed() throws IOException { HitsVerificationBenchmark.BenchmarkArguments args = new HitsVerificationBenchmark.BenchmarkArguments(); args.feedFile = "src/test/resources/vespa-feed.json"; Config config = new Config.Builder().build(); Map<String, Object> output = new HashMap<>(); HitsVerificationBenchmark.getIndex(args, config, output); assertEquals(206, output.get("Interval index entries")); }
@Override public Optional<String> getValue(Object arg, String type) { if (arg instanceof Map) { Map<?, ?> map = (Map<?, ?>) arg; Object object = map.get(getKey(type)); return object == null ? Optional.empty() : Optional.of(String.valueOf(object)); } return Optional.empty(); }
@Test public void testValue() { TypeStrategy strategy = new MapTypeStrategy(); Map<String, String> map = new HashMap<>(); map.put("foo", "bar"); // normal Assert.assertEquals("bar", strategy.getValue(map, ".get(\"foo\")").orElse(null)); // test null Assert.assertNotEquals("bar", strategy.getValue(map, ".get(\"bar\")").orElse(null)); // test non map Assert.assertNotEquals("bar", strategy.getValue("foo", ".get(\"foo\")").orElse(null)); // the test is not equal Assert.assertNotEquals("foo", strategy.getValue(map, ".get(\"foo\")").orElse(null)); }
public static UserAgent parse(String userAgentString) { return UserAgentParser.parse(userAgentString); }
@Test public void parseQuarkTest() { final String uaString = "Mozilla/5.0 (iPhone; CPU iPhone OS 12_4_1 like Mac OS X; zh-cn) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/16G102 Quark/3.6.2.993 Mobile"; final UserAgent ua = UserAgentUtil.parse(uaString); assertEquals("Quark", ua.getBrowser().toString()); assertEquals("3.6.2.993", ua.getVersion()); assertEquals("Webkit", ua.getEngine().toString()); assertEquals("601.1.46", ua.getEngineVersion()); assertEquals("iPhone", ua.getOs().toString()); assertEquals("12_4_1", ua.getOsVersion()); assertEquals("iPhone", ua.getPlatform().toString()); assertTrue(ua.isMobile()); }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { Schema schema = table.getSchema(); ObjectNode properties = table.getProperties(); Optional<ParsedLocation> parsedLocation = Optional.empty(); if (!Strings.isNullOrEmpty(table.getLocation())) { parsedLocation = Optional.of(parseLocation(checkArgumentNotNull(table.getLocation()))); } List<String> topics = mergeParam(parsedLocation.map(loc -> loc.topic), (ArrayNode) properties.get("topics")); List<String> allBootstrapServers = mergeParam( parsedLocation.map(loc -> loc.brokerLocation), (ArrayNode) properties.get("bootstrap_servers")); String bootstrapServers = String.join(",", allBootstrapServers); Optional<String> payloadFormat = properties.has("format") ? Optional.of(properties.get("format").asText()) : Optional.empty(); if (Schemas.isNestedSchema(schema)) { Optional<PayloadSerializer> serializer = payloadFormat.map( format -> PayloadSerializers.getSerializer( format, checkArgumentNotNull(schema.getField(PAYLOAD_FIELD).getType().getRowSchema()), TableUtils.convertNode2Map(properties))); return new NestedPayloadKafkaTable(schema, bootstrapServers, topics, serializer); } else { /* * CSV is handled separately because multiple rows can be produced from a single message, which * adds complexity to payload extraction. It remains here and as the default because it is the * historical default, but it will not be extended to support attaching extended attributes to * rows. */ if (payloadFormat.orElse("csv").equals("csv")) { return new BeamKafkaCSVTable(schema, bootstrapServers, topics); } PayloadSerializer serializer = PayloadSerializers.getSerializer( payloadFormat.get(), schema, TableUtils.convertNode2Map(properties)); return new PayloadSerializerKafkaTable(schema, bootstrapServers, topics, serializer); } }
@Test public void testBuildBeamSqlAvroTable() { Table table = mockTable("hello", "avro"); BeamSqlTable sqlTable = provider.buildBeamSqlTable(table); assertNotNull(sqlTable); assertTrue(sqlTable instanceof BeamKafkaTable); BeamKafkaTable kafkaTable = (BeamKafkaTable) sqlTable; assertEquals(LOCATION_BROKER, kafkaTable.getBootstrapServers()); assertEquals(ImmutableList.of(LOCATION_TOPIC), kafkaTable.getTopics()); }
@Override public AttributedList<Path> read(final Path directory, final List<String> replies) throws FTPInvalidListException { final AttributedList<Path> children = new AttributedList<>(); if(replies.isEmpty()) { return children; } // At least one entry successfully parsed boolean success = false; for(String line : replies) { final Map<String, Map<String, String>> file = this.parseFacts(line); if(null == file) { log.error(String.format("Error parsing line %s", line)); continue; } for(Map.Entry<String, Map<String, String>> f : file.entrySet()) { final String name = f.getKey(); // size -- Size in octets // modify -- Last modification time // create -- Creation time // type -- Entry type // unique -- Unique id of file/directory // perm -- File permissions, whether read, write, execute is allowed for the login id. // lang -- Language of the file name per IANA [11] registry. // media-type -- MIME media-type of file contents per IANA registry. // charset -- Character set per IANA registry (if not UTF-8) final Map<String, String> facts = f.getValue(); if(!facts.containsKey("type")) { log.error(String.format("No type fact in line %s", line)); continue; } final Path parsed; if("dir".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.directory)); } else if("file".equals(facts.get("type").toLowerCase(Locale.ROOT))) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file)); } else if(facts.get("type").toLowerCase(Locale.ROOT).matches("os\\.unix=slink:.*")) { parsed = new Path(directory, PathNormalizer.name(f.getKey()), EnumSet.of(Path.Type.file, Path.Type.symboliclink)); // Parse symbolic link target in Type=OS.unix=slink:/foobar;Perm=;Unique=keVO1+4G4; foobar final String[] type = facts.get("type").split(":"); if(type.length == 2) { final String target = type[1]; if(target.startsWith(String.valueOf(Path.DELIMITER))) { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(target), EnumSet.of(Path.Type.file))); } else { parsed.setSymlinkTarget(new Path(PathNormalizer.normalize(String.format("%s/%s", directory.getAbsolute(), target)), EnumSet.of(Path.Type.file))); } } else { log.warn(String.format("Missing symbolic link target for type %s in line %s", facts.get("type"), line)); continue; } } else { log.warn(String.format("Ignored type %s in line %s", facts.get("type"), line)); continue; } if(!success) { if(parsed.isDirectory() && directory.getName().equals(name)) { log.warn(String.format("Possibly bogus response line %s", line)); } else { success = true; } } if(name.equals(".") || name.equals("..")) { if(log.isDebugEnabled()) { log.debug(String.format("Skip %s", name)); } continue; } if(facts.containsKey("size")) { parsed.attributes().setSize(Long.parseLong(facts.get("size"))); } if(facts.containsKey("unix.uid")) { parsed.attributes().setOwner(facts.get("unix.uid")); } if(facts.containsKey("unix.owner")) { parsed.attributes().setOwner(facts.get("unix.owner")); } if(facts.containsKey("unix.gid")) { parsed.attributes().setGroup(facts.get("unix.gid")); } if(facts.containsKey("unix.group")) { parsed.attributes().setGroup(facts.get("unix.group")); } if(facts.containsKey("unix.mode")) { parsed.attributes().setPermission(new Permission(facts.get("unix.mode"))); } else if(facts.containsKey("perm")) { if(PreferencesFactory.get().getBoolean("ftp.parser.mlsd.perm.enable")) { Permission.Action user = Permission.Action.none; final String flags = facts.get("perm"); if(StringUtils.contains(flags, 'r') || StringUtils.contains(flags, 'l')) { // RETR command may be applied to that object // Listing commands, LIST, NLST, and MLSD may be applied user = user.or(Permission.Action.read); } if(StringUtils.contains(flags, 'w') || StringUtils.contains(flags, 'm') || StringUtils.contains(flags, 'c')) { user = user.or(Permission.Action.write); } if(StringUtils.contains(flags, 'e')) { // CWD command naming the object should succeed user = user.or(Permission.Action.execute); if(parsed.isDirectory()) { user = user.or(Permission.Action.read); } } final Permission permission = new Permission(user, Permission.Action.none, Permission.Action.none); parsed.attributes().setPermission(permission); } } if(facts.containsKey("modify")) { // Time values are always represented in UTC parsed.attributes().setModificationDate(this.parseTimestamp(facts.get("modify"))); } if(facts.containsKey("create")) { // Time values are always represented in UTC parsed.attributes().setCreationDate(this.parseTimestamp(facts.get("create"))); } children.add(parsed); } } if(!success) { throw new FTPInvalidListException(children); } return children; }
@Test(expected = FTPInvalidListException.class) public void test14333() throws Exception { Path path = new Path("/TEST", EnumSet.of(Path.Type.directory)); String[] replies = new String[]{ " /TEST" }; final AttributedList<Path> children = new FTPMlsdListResponseReader() .read(path, Arrays.asList(replies)); assertTrue(children.isEmpty()); }
@Override public SchemaKStream<?> buildStream(final PlanBuildContext buildContext) { if (!joinKey.isForeignKey()) { ensureMatchingPartitionCounts(buildContext.getServiceContext().getTopicClient()); } final JoinerFactory joinerFactory = new JoinerFactory( buildContext, this, buildContext.buildNodeContext(getId().toString())); return joinerFactory.getJoiner(left.getNodeOutputType(), right.getNodeOutputType()).join(); }
@Test public void shouldNotPerformStreamStreamJoinWithoutJoinWindow() { // Given: when(left.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); when(right.getNodeOutputType()).thenReturn(DataSourceType.KSTREAM); final JoinNode joinNode = new JoinNode(nodeId, INNER, joinKey, true, left, right, empty(), "KAFKA"); // When: final Exception e = assertThrows( KsqlException.class, () -> joinNode.buildStream(planBuildContext) ); // Then: assertThat(e.getMessage(), containsString( "Stream-Stream joins must have a WITHIN clause specified. None was provided.")); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowDistVariableStatement sqlStatement, final ContextManager contextManager) { ShardingSphereMetaData metaData = contextManager.getMetaDataContexts().getMetaData(); String variableName = sqlStatement.getName(); if (isConfigurationKey(variableName)) { return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getConfigurationValue(metaData, variableName))); } if (isTemporaryConfigurationKey(variableName)) { return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getTemporaryConfigurationValue(metaData, variableName))); } return Collections.singleton(new LocalDataQueryResultRow(variableName.toLowerCase(), getConnectionSize(variableName))); }
@Test void assertShowTemporaryPropsVariable() { when(contextManager.getMetaDataContexts().getMetaData().getTemporaryProps()) .thenReturn(new TemporaryConfigurationProperties(PropertiesBuilder.build(new Property("proxy-meta-data-collector-enabled", Boolean.FALSE.toString())))); ShowDistVariableExecutor executor = new ShowDistVariableExecutor(); Collection<LocalDataQueryResultRow> actual = executor.getRows(new ShowDistVariableStatement("PROXY_META_DATA_COLLECTOR_ENABLED"), contextManager); assertThat(actual.size(), is(1)); LocalDataQueryResultRow row = actual.iterator().next(); assertThat(row.getCell(1), is("proxy_meta_data_collector_enabled")); assertThat(row.getCell(2), is("false")); }
public String[] getFiles( final VariableSpace space ) { return getFiles( space, true ); }
@Test public void testGetFiles() { String[] filePaths; filePaths = meta.getFiles( "foo", "txt", false ); assertNotNull( filePaths ); assertEquals( 1, filePaths.length ); assertEquals( "foo.txt", filePaths[ 0 ] ); filePaths = meta.getFiles( "foo", "txt", true ); assertNotNull( filePaths ); assertEquals( 1, filePaths.length ); assertEquals( "foo.txt", filePaths[ 0 ] ); when( meta.isStepNrInFilename() ).thenReturn( true ); filePaths = meta.getFiles( "foo", "txt", false ); assertNotNull( filePaths ); assertEquals( 1, filePaths.length ); assertEquals( "foo_<step>.txt", filePaths[ 0 ] ); filePaths = meta.getFiles( "foo", "txt", true ); assertNotNull( filePaths ); assertEquals( 4, filePaths.length ); assertEquals( "foo_0.txt", filePaths[ 0 ] ); assertEquals( "foo_1.txt", filePaths[ 1 ] ); assertEquals( "foo_2.txt", filePaths[ 2 ] ); assertEquals( "...", filePaths[ 3 ] ); when( meta.isPartNrInFilename() ).thenReturn( true ); filePaths = meta.getFiles( "foo", "txt", false ); assertNotNull( filePaths ); assertEquals( 1, filePaths.length ); assertEquals( "foo_<step>_<partition>.txt", filePaths[ 0 ] ); when( meta.getSplitEvery() ).thenReturn( 1 ); filePaths = meta.getFiles( "foo", "txt", false ); assertNotNull( filePaths ); assertEquals( 1, filePaths.length ); assertEquals( "foo_<step>_<partition>_<split>.txt", filePaths[ 0 ] ); }
static BlockStmt getAttributeVariableDeclaration(final String variableName, final Attribute attribute, final List<Field<?>> fields) { final MethodDeclaration methodDeclaration = ATTRIBUTE_TEMPLATE.getMethodsByName(GETKIEPMMLATTRIBUTE).get(0).clone(); final BlockStmt attributeBody = methodDeclaration.getBody().orElseThrow(() -> new KiePMMLException(String.format(MISSING_BODY_TEMPLATE, methodDeclaration))); final VariableDeclarator variableDeclarator = getVariableDeclarator(attributeBody, ATTRIBUTE) .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_IN_BODY, ATTRIBUTE, attributeBody))); variableDeclarator.setName(variableName); final BlockStmt toReturn = new BlockStmt(); String predicateVariableName = String.format("%s_Predicate", variableName); BlockStmt toAdd = getKiePMMLPredicate(predicateVariableName, attribute.getPredicate(), fields); toAdd.getStatements().forEach(toReturn::addStatement); final Expression complexPartialScoreExpression; if (attribute.getComplexPartialScore() != null) { String complexPartialScoreVariableName = String.format("%s_ComplexPartialScore", variableName); toAdd = getComplexPartialScoreVariableDeclaration(complexPartialScoreVariableName, attribute.getComplexPartialScore()); toAdd.getStatements().forEach(toReturn::addStatement); complexPartialScoreExpression = new NameExpr(complexPartialScoreVariableName); } else { complexPartialScoreExpression = new NullLiteralExpr(); } final MethodCallExpr initializer = variableDeclarator.getInitializer() .orElseThrow(() -> new KiePMMLException(String.format(MISSING_VARIABLE_INITIALIZER_TEMPLATE, ATTRIBUTE, attributeBody))) .asMethodCallExpr(); final MethodCallExpr builder = getChainedMethodCallExprFrom("builder", initializer); builder.setArgument(0, new StringLiteralExpr(variableName)); builder.setArgument(2, new NameExpr(predicateVariableName)); getChainedMethodCallExprFrom("withPartialScore", initializer).setArgument(0, getExpressionForObject(attribute.getPartialScore())); getChainedMethodCallExprFrom("withComplexPartialScore", initializer).setArgument(0, complexPartialScoreExpression); attributeBody.getStatements().forEach(toReturn::addStatement); return toReturn; }
@Test void getAttributeVariableDeclarationWithComplexPartialScore() throws IOException { final String variableName = "variableName"; Attribute attribute = new Attribute(); attribute.setReasonCode(REASON_CODE); Array.Type arrayType = Array.Type.STRING; List<String> values = getStringObjects(arrayType, 4); CompoundPredicate compoundPredicate = getCompoundPredicate(values, arrayType); attribute.setPredicate(compoundPredicate); attribute.setComplexPartialScore(getComplexPartialScore()); String valuesString = values.stream() .map(valueString -> "\"" + valueString + "\"") .collect(Collectors.joining(",")); DataDictionary dataDictionary = new DataDictionary(); for (Predicate predicate : compoundPredicate.getPredicates()) { DataField toAdd = null; if (predicate instanceof SimplePredicate) { toAdd = new DataField(); toAdd.setName(((SimplePredicate) predicate).getField()); toAdd.setDataType(DataType.DOUBLE); } else if (predicate instanceof SimpleSetPredicate) { toAdd = new DataField(); toAdd.setName(((SimpleSetPredicate) predicate).getField()); toAdd.setDataType(DataType.DOUBLE); } if (toAdd != null) { dataDictionary.addDataFields(toAdd); } } BlockStmt retrieved = KiePMMLAttributeFactory.getAttributeVariableDeclaration(variableName, attribute, getFieldsFromDataDictionary(dataDictionary)); String text = getFileContent(TEST_01_SOURCE); Statement expected = JavaParserUtils.parseBlock(String.format(text, variableName, valuesString)); assertThat(JavaParserUtils.equalsNode(expected, retrieved)).isTrue(); List<Class<?>> imports = Arrays.asList(KiePMMLAttribute.class, KiePMMLComplexPartialScore.class, KiePMMLCompoundPredicate.class, KiePMMLConstant.class, KiePMMLSimplePredicate.class, KiePMMLSimpleSetPredicate.class, Arrays.class, Collections.class); commonValidateCompilationWithImports(retrieved, imports); }
public void handleAssignment(final Map<TaskId, Set<TopicPartition>> activeTasks, final Map<TaskId, Set<TopicPartition>> standbyTasks) { log.info("Handle new assignment with:\n" + "\tNew active tasks: {}\n" + "\tNew standby tasks: {}\n" + "\tExisting active tasks: {}\n" + "\tExisting standby tasks: {}", activeTasks.keySet(), standbyTasks.keySet(), activeTaskIds(), standbyTaskIds()); topologyMetadata.addSubscribedTopicsFromAssignment( activeTasks.values().stream().flatMap(Collection::stream).collect(Collectors.toSet()), logPrefix ); final Map<TaskId, Set<TopicPartition>> activeTasksToCreate = new HashMap<>(activeTasks); final Map<TaskId, Set<TopicPartition>> standbyTasksToCreate = new HashMap<>(standbyTasks); final Map<Task, Set<TopicPartition>> tasksToRecycle = new HashMap<>(); final Set<Task> tasksToCloseClean = new TreeSet<>(Comparator.comparing(Task::id)); final Set<TaskId> tasksToLock = tasks.allTaskIds().stream() .filter(x -> activeTasksToCreate.containsKey(x) || standbyTasksToCreate.containsKey(x)) .collect(Collectors.toSet()); maybeLockTasks(tasksToLock); // first put aside those unrecognized tasks because of unknown named-topologies tasks.clearPendingTasksToCreate(); tasks.addPendingActiveTasksToCreate(pendingTasksToCreate(activeTasksToCreate)); tasks.addPendingStandbyTasksToCreate(pendingTasksToCreate(standbyTasksToCreate)); // first rectify all existing tasks: // 1. for tasks that are already owned, just update input partitions / resume and skip re-creating them // 2. for tasks that have changed active/standby status, just recycle and skip re-creating them // 3. otherwise, close them since they are no longer owned final Map<TaskId, RuntimeException> failedTasks = new LinkedHashMap<>(); if (stateUpdater == null) { handleTasksWithoutStateUpdater(activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean); } else { handleTasksWithStateUpdater( activeTasksToCreate, standbyTasksToCreate, tasksToRecycle, tasksToCloseClean, failedTasks ); failedTasks.putAll(collectExceptionsAndFailedTasksFromStateUpdater()); } final Map<TaskId, RuntimeException> taskCloseExceptions = closeAndRecycleTasks(tasksToRecycle, tasksToCloseClean); maybeUnlockTasks(tasksToLock); failedTasks.putAll(taskCloseExceptions); maybeThrowTaskExceptions(failedTasks); createNewTasks(activeTasksToCreate, standbyTasksToCreate); }
@Test public void shouldRecycleStandbyTaskInStateUpdater() { final StandbyTask standbyTaskToRecycle = standbyTask(taskId03, taskId03ChangelogPartitions) .inState(State.RUNNING) .withInputPartitions(taskId03Partitions).build(); final StreamTask recycledActiveTask = statefulTask(taskId03, taskId03ChangelogPartitions) .inState(State.CREATED) .withInputPartitions(taskId03Partitions).build(); final TasksRegistry tasks = mock(TasksRegistry.class); final TaskManager taskManager = setUpTaskManager(ProcessingMode.AT_LEAST_ONCE, tasks, true); when(stateUpdater.getTasks()).thenReturn(mkSet(standbyTaskToRecycle)); when(activeTaskCreator.createActiveTaskFromStandby(standbyTaskToRecycle, taskId03Partitions, consumer)) .thenReturn(recycledActiveTask); final CompletableFuture<StateUpdater.RemovedTaskResult> future = new CompletableFuture<>(); when(stateUpdater.remove(standbyTaskToRecycle.id())).thenReturn(future); future.complete(new StateUpdater.RemovedTaskResult(standbyTaskToRecycle)); taskManager.handleAssignment( mkMap(mkEntry(standbyTaskToRecycle.id(), standbyTaskToRecycle.inputPartitions())), Collections.emptyMap() ); verify(tasks).addPendingTasksToInit(Collections.singleton(recycledActiveTask)); verify(activeTaskCreator).createTasks(consumer, Collections.emptyMap()); verify(standbyTaskCreator).createTasks(Collections.emptyMap()); }
@Operation(summary = "batchCopyByCodes", description = "COPY_PROCESS_DEFINITION_NOTES") @Parameters({ @Parameter(name = "codes", description = "PROCESS_DEFINITION_CODES", required = true, schema = @Schema(implementation = String.class, example = "3,4")), @Parameter(name = "targetProjectCode", description = "TARGET_PROJECT_CODE", required = true, schema = @Schema(implementation = long.class, example = "123")) }) @PostMapping(value = "/batch-copy") @ResponseStatus(HttpStatus.OK) @ApiException(BATCH_COPY_PROCESS_DEFINITION_ERROR) public Result copyProcessDefinition(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @Parameter(name = "projectCode", description = "PROJECT_CODE", required = true) @PathVariable long projectCode, @RequestParam(value = "codes", required = true) String codes, @RequestParam(value = "targetProjectCode", required = true) long targetProjectCode) { return returnDataList( processDefinitionService.batchCopyProcessDefinition(loginUser, projectCode, codes, targetProjectCode)); }
@Test public void testBatchCopyProcessDefinition() { long projectCode = 1L; long targetProjectCode = 2L; String code = "1"; Map<String, Object> result = new HashMap<>(); putMsg(result, Status.SUCCESS); Mockito.when(processDefinitionService.batchCopyProcessDefinition(user, projectCode, code, targetProjectCode)) .thenReturn(result); Result response = processDefinitionController.copyProcessDefinition(user, projectCode, code, targetProjectCode); Assertions.assertTrue(response != null && response.isSuccess()); }
@Override public List<byte[]> mGet(byte[]... keys) { if (isQueueing() || isPipelined()) { for (byte[] key : keys) { read(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } return null; } CommandBatchService es = new CommandBatchService(executorService); for (byte[] key: keys) { es.readAsync(key, ByteArrayCodec.INSTANCE, RedisCommands.GET, key); } BatchResult<byte[]> r = (BatchResult<byte[]>) es.execute(); return r.getResponses(); }
@Test public void testMGet() { Map<byte[], byte[]> map = new HashMap<>(); for (int i = 0; i < 10; i++) { map.put(("test" + i).getBytes(), ("test" + i*100).getBytes()); } connection.mSet(map); List<byte[]> r = connection.mGet(map.keySet().toArray(new byte[0][])); assertThat(r).containsExactly(map.values().toArray(new byte[0][])); }
@Override public long getMaxCacheSize() { // 返回默认值 return 32 * 1024 * 1024; }
@Test public void getMaxCacheSize() { Assert.assertEquals(32 * 1024 * 1024, mSensorsAPI.getMaxCacheSize()); }
public boolean statsHaveChanged() { if (!aggregatedStats.hasUpdatesFromAllDistributors()) { return false; } for (ContentNodeStats contentNodeStats : aggregatedStats.getStats()) { int nodeIndex = contentNodeStats.getNodeIndex(); boolean currValue = mayHaveMergesPendingInGlobalSpace(nodeIndex); Boolean prevValue = prevMayHaveMergesPendingInGlobalSpace(nodeIndex); if (prevValue != null) { if (prevValue != currValue) { return true; } } else { return true; } } return false; }
@Test void stats_have_changed_if_one_node_has_buckets_pending_to_in_sync_transition() { Fixture f = Fixture.fromStats(stats().bucketsPending(0).bucketsPending(1)); f.newAggregatedStats(stats().bucketsPending(0).inSync(1)); assertTrue(f.statsHaveChanged()); }
public String getStatus() { return String.format("Publisher %-30s: shutdown=%5s, queue=%7d/%-7d", publisherName, shutdown, currentEventSize(), queueMaxSize); }
@Test void getStatus() throws NacosException { traceEventPublisher.publish(new TraceTestEvent()); traceEventPublisher.publish(new TraceTestEvent.TraceTestEvent1()); traceEventPublisher.publish(new TraceTestEvent.TraceTestEvent2()); String expectedStatus = "Publisher TraceTestEvent : shutdown=false, queue= 3/8 "; assertEquals(traceEventPublisher.getStatus(), expectedStatus); traceEventPublisher.addSubscriber(subscriber, TraceTestEvent.TraceTestEvent1.class); ThreadUtils.sleep(2000L); expectedStatus = "Publisher TraceTestEvent : shutdown=false, queue= 0/8 "; assertEquals(traceEventPublisher.getStatus(), expectedStatus); traceEventPublisher.shutdown(); expectedStatus = "Publisher TraceTestEvent : shutdown= true, queue= 0/8 "; assertEquals(traceEventPublisher.getStatus(), expectedStatus); }
@Override public int readInt() throws EOFException { if (availableLong() < 4) { throw new EOFException(); } int result = _dataBuffer.getInt(_currentOffset); _currentOffset += 4; return result; }
@Test void testReadInt() throws EOFException { int read = _dataBufferPinotInputStream.readInt(); assertEquals(read, _byteBuffer.getInt(0)); assertEquals(_dataBufferPinotInputStream.getCurrentOffset(), Integer.BYTES); }
public static PrestoIcebergPartitionSpec toPrestoPartitionSpec(PartitionSpec spec, TypeManager typeManager) { return new PrestoIcebergPartitionSpec( spec.specId(), toPrestoSchema(spec.schema(), typeManager), toPartitionFields(spec)); }
@Test(dataProvider = "allTransforms") public void testToPrestoPartitionSpec(String transform, String name) { // Create a test TypeManager TypeManager typeManager = createTestFunctionAndTypeManager(); // Create a mock PartitionSpec PartitionSpec partitionSpec = partitionSpec(transform, name); PrestoIcebergPartitionSpec expectedPrestoPartitionSpec = prestoIcebergPartitionSpec(transform, name, typeManager); // Convert Iceberg PartitionSpec to Presto Iceberg Partition Spec PrestoIcebergPartitionSpec prestoIcebergPartitionSpec = toPrestoPartitionSpec(partitionSpec, typeManager); // Check that the result is not null assertNotNull(prestoIcebergPartitionSpec); assertEquals(prestoIcebergPartitionSpec, expectedPrestoPartitionSpec); }
@Override public PermissionTicket createTicket(ResourceSet resourceSet, Set<String> scopes) { // check to ensure that the scopes requested are a subset of those in the resource set if (!scopeService.scopesMatch(resourceSet.getScopes(), scopes)) { throw new InsufficientScopeException("Scopes of resource set are not enough for requested permission."); } Permission perm = new Permission(); perm.setResourceSet(resourceSet); perm.setScopes(scopes); PermissionTicket ticket = new PermissionTicket(); ticket.setPermission(perm); ticket.setTicket(UUID.randomUUID().toString()); ticket.setExpiration(new Date(System.currentTimeMillis() + permissionExpirationSeconds * 1000L)); return repository.save(ticket); }
@Test public void testCreate_ticket() { PermissionTicket perm = permissionService.createTicket(rs1, scopes1); // we want there to be a non-null ticket assertNotNull(perm.getTicket()); }
@PublicAPI(usage = ACCESS) public JavaClasses importUrl(URL url) { return importUrls(singletonList(url)); }
@Test public void creates_relations_between_classes_and_interfaces() { JavaClasses classes = new ClassFileImporter().importUrl(getClass().getResource("testexamples/classhierarchyimport")); JavaClass baseClass = classes.get(BaseClass.class); JavaClass otherInterface = classes.get(OtherInterface.class); JavaClass subclass = classes.get(Subclass.class); JavaClass subinterface = classes.get(Subinterface.class); JavaClass otherSubclass = classes.get(OtherSubclass.class); JavaClass parentInterface = classes.get(ParentInterface.class); JavaClass grandParentInterface = classes.get(GrandParentInterface.class); JavaClass someCollection = classes.get(SomeCollection.class); JavaClass collectionInterface = classes.get(CollectionInterface.class); assertThat(baseClass.getRawInterfaces()).containsOnly(otherInterface); assertThat(baseClass.getAllRawInterfaces()).containsOnly(otherInterface, grandParentInterface); assertThat(subclass.getRawInterfaces()).containsOnly(subinterface); assertThat(subclass.getAllRawInterfaces()).containsOnly( subinterface, otherInterface, parentInterface, grandParentInterface); assertThat(otherSubclass.getRawInterfaces()).containsOnly(parentInterface); assertThat(otherSubclass.getAllRawInterfaces()).containsOnly(parentInterface, grandParentInterface, otherInterface); assertThat(someCollection.getRawInterfaces()).containsOnly(collectionInterface, otherInterface, subinterface); assertThat(someCollection.getAllRawInterfaces()).extractingResultOf("reflect").containsOnly( CollectionInterface.class, OtherInterface.class, Subinterface.class, ParentInterface.class, GrandParentInterface.class, Collection.class, Iterable.class); }
public Range<PartitionKey> handleNewSinglePartitionDesc(Map<ColumnId, Column> schema, SingleRangePartitionDesc desc, long partitionId, boolean isTemp) throws DdlException { Range<PartitionKey> range; try { range = checkAndCreateRange(schema, desc, isTemp); setRangeInternal(partitionId, isTemp, range); } catch (IllegalArgumentException e) { // Range.closedOpen may throw this if (lower > upper) throw new DdlException("Invalid key range: " + e.getMessage()); } idToDataProperty.put(partitionId, desc.getPartitionDataProperty()); idToReplicationNum.put(partitionId, desc.getReplicationNum()); idToInMemory.put(partitionId, desc.isInMemory()); idToStorageCacheInfo.put(partitionId, desc.getDataCacheInfo()); return range; }
@Test public void testBigIntNormal() throws DdlException, AnalysisException { Column k1 = new Column("k1", new ScalarType(PrimitiveType.BIGINT), true, null, "", ""); partitionColumns.add(k1); singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p1", new PartitionKeyDesc(Lists .newArrayList(new PartitionValue("-9223372036854775806"))), null)); singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p2", new PartitionKeyDesc(Lists .newArrayList(new PartitionValue("-9223372036854775805"))), null)); singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p3", new PartitionKeyDesc(Lists .newArrayList(new PartitionValue("0"))), null)); singleRangePartitionDescs.add(new SingleRangePartitionDesc(false, "p4", new PartitionKeyDesc(Lists .newArrayList(new PartitionValue("9223372036854775806"))), null)); partitionInfo = new RangePartitionInfo(partitionColumns); for (SingleRangePartitionDesc singleRangePartitionDesc : singleRangePartitionDescs) { singleRangePartitionDesc.analyze(1, null); partitionInfo.handleNewSinglePartitionDesc(MetaUtils.buildIdToColumn(partitionColumns), singleRangePartitionDesc, 20000L, false); } }
public static KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>> fetchAll( final ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>> store, final Instant lower, final Instant upper ) { final List<ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>>> stores = getStores(store); final Function<ReadOnlyWindowStore<GenericKey, ValueAndTimestamp<GenericRow>>, KeyValueIterator<Windowed<GenericKey>, ValueAndTimestamp<GenericRow>>> fetchFunc = windowStore -> fetchAllUncached(windowStore, lower, upper); return findFirstNonEmptyIterator(stores, fetchFunc); }
@Test public void shouldCallUnderlyingStoreForTableScans() throws IllegalAccessException { when(provider.stores(any(), any())).thenReturn(ImmutableList.of(meteredWindowStore)); SERDES_FIELD.set(meteredWindowStore, serdes); when(meteredWindowStore.wrapped()).thenReturn(wrappedWindowStore); when(wrappedWindowStore.wrapped()).thenReturn(windowStore); when(windowStore.fetchAll(any(), any())).thenReturn(keyValueIterator); when(keyValueIterator.hasNext()).thenReturn(false); WindowStoreCacheBypass.fetchAll(store, Instant.ofEpochMilli(100), Instant.ofEpochMilli(200)); verify(windowStore).fetchAll(Instant.ofEpochMilli(100L), Instant.ofEpochMilli(200L)); }
private boolean processBackgroundEvents() { AtomicReference<KafkaException> firstError = new AtomicReference<>(); LinkedList<BackgroundEvent> events = new LinkedList<>(); backgroundEventQueue.drainTo(events); for (BackgroundEvent event : events) { try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent<?>) event); backgroundEventProcessor.process(event); } catch (Throwable t) { KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); if (!firstError.compareAndSet(null, e)) log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } backgroundEventReaper.reap(time.milliseconds()); if (firstError.get() != null) throw firstError.get(); return !events.isEmpty(); }
@Test public void testProcessBackgroundEventsTimesOut() throws Exception { consumer = newConsumer(); Time time = new MockTime(); Timer timer = time.timer(1000); CompletableFuture<?> future = mock(CompletableFuture.class); doAnswer(invocation -> { long timeout = invocation.getArgument(0, Long.class); timer.sleep(timeout); throw new java.util.concurrent.TimeoutException("Intentional timeout"); }).when(future).get(any(Long.class), any(TimeUnit.class)); assertThrows(TimeoutException.class, () -> consumer.processBackgroundEvents(future, timer)); // Because we forced our mocked future to continuously time out, we should have no time remaining. assertEquals(0, timer.remainingMs()); }
public FEELFnResult<List<Object>> invoke(@ParameterName( "list" ) List list, @ParameterName( "position" ) BigDecimal position) { if ( list == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "list", "cannot be null")); } if ( position == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be null")); } if ( position.intValue() == 0 ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "cannot be zero (parameter 'position' is 1-based)")); } if ( position.abs().intValue() > list.size() ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "position", "inconsistent with 'list' size")); } // spec requires us to return a new list List<Object> result = new ArrayList<>( list ); if( position.intValue() > 0 ) { result.remove( position.intValue()-1 ); } else { result.remove( list.size()+position.intValue() ); } return FEELFnResult.ofResult( result ); }
@Test void invokeNull() { FunctionTestUtil.assertResultError(removeFunction.invoke((List) null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(removeFunction.invoke(null, BigDecimal.ONE), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(removeFunction.invoke(Collections.emptyList(), null), InvalidParametersEvent.class); }
public static boolean reserved(Uuid uuid) { return uuid.getMostSignificantBits() == 0 && uuid.getLeastSignificantBits() < 100; }
@Test void testUnassignedIsReserved() { assertTrue(DirectoryId.reserved(DirectoryId.UNASSIGNED)); }
@Override public void onDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { super.onDataReceived(device, data); if (data.size() < 7) { onInvalidDataReceived(device, data); return; } // First byte: flags int offset = 0; final int flags = data.getIntValue(Data.FORMAT_UINT8, offset++); // See UNIT_* for unit options final int unit = (flags & 0x01) == UNIT_mmHg ? UNIT_mmHg : UNIT_kPa; final boolean timestampPresent = (flags & 0x02) != 0; final boolean pulseRatePresent = (flags & 0x04) != 0; final boolean userIdPresent = (flags & 0x08) != 0; final boolean measurementStatusPresent = (flags & 0x10) != 0; if (data.size() < 7 + (timestampPresent ? 7 : 0) + (pulseRatePresent ? 2 : 0) + (userIdPresent ? 1 : 0) + (measurementStatusPresent ? 2 : 0)) { onInvalidDataReceived(device, data); return; } // Following bytes - systolic, diastolic and mean arterial pressure final float cuffPressure = data.getFloatValue(Data.FORMAT_SFLOAT, offset); // final float ignored_1 = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 2); // final float ignored_2 = data.getFloatValue(Data.FORMAT_SFLOAT, offset + 4); offset += 6; // Parse timestamp if present Calendar calendar = null; if (timestampPresent) { calendar = DateTimeDataCallback.readDateTime(data, offset); offset += 7; } // Parse pulse rate if present Float pulseRate = null; if (pulseRatePresent) { pulseRate = data.getFloatValue(Data.FORMAT_SFLOAT, offset); offset += 2; } // Read user id if present Integer userId = null; if (userIdPresent) { userId = data.getIntValue(Data.FORMAT_UINT8, offset); offset += 1; } // Read measurement status if present BPMStatus status = null; if (measurementStatusPresent) { final int measurementStatus = data.getIntValue(Data.FORMAT_UINT16_LE, offset); // offset += 2; status = new BPMStatus(measurementStatus); } onIntermediateCuffPressureReceived(device, cuffPressure, unit, pulseRate, userId, status, calendar); }
@Test public void onInvalidDataReceived_toShort() { final DataReceivedCallback callback = new IntermediateCuffPressureDataCallback() { @Override public void onIntermediateCuffPressureReceived(@NonNull final BluetoothDevice device, final float cuffPressure, final int unit, @Nullable final Float pulseRate, @Nullable final Integer userID, @Nullable final BPMStatus status, @Nullable final Calendar calendar) { assertEquals("Invalid data reported as correct", 1, 2); } @Override public void onInvalidDataReceived(@NonNull final BluetoothDevice device, @NonNull final Data data) { assertEquals("Invalid ICP", 6, data.size()); } }; final Data data = new Data(new byte[] { 1, 2, 3, 4, 5, 6 }); assertArrayEquals( new byte[] { 1, 2, 3, 4, 5, 6 }, data.getValue() ); callback.onDataReceived(null, data); }
@Override public long[] getValues() { return Arrays.copyOf(values, values.length); }
@Test public void canAlsoBeCreatedFromACollectionOfLongs() { final Snapshot other = new UniformSnapshot(asList(5L, 1L, 2L, 3L, 4L)); assertThat(other.getValues()) .containsOnly(1, 2, 3, 4, 5); }
public void startAsync() { try { udfLoader.load(); ProcessingLogServerUtils.maybeCreateProcessingLogTopic( serviceContext.getTopicClient(), processingLogConfig, ksqlConfig); if (processingLogConfig.getBoolean(ProcessingLogConfig.STREAM_AUTO_CREATE)) { log.warn("processing log auto-create is enabled, but this is not supported " + "for headless mode."); } rocksDBConfigSetterHandler.accept(ksqlConfig); processesQueryFile(readQueriesFile(queriesFile)); showWelcomeMessage(); final Properties properties = new Properties(); ksqlConfig.originals().forEach((key, value) -> { if (nonNull(value)) { properties.put(key, value.toString()); } }); versionChecker.start(KsqlModuleType.SERVER, properties); } catch (final Exception e) { log.error("Failed to start KSQL Server with query file: " + queriesFile, e); throw e; } }
@Test public void shouldSupportSchemaInference() { // Given: final PreparedStatement<CreateStream> cs = PreparedStatement.of("CS", new CreateStream(SOME_NAME, TableElements.of(), false, false, AVRO_PROPS, false)); givenQueryFileParsesTo(cs); when(sandBoxSchemaInjector.inject(argThat(configured(equalTo(cs))))) .thenReturn((ConfiguredStatement) CFG_0_WITH_SCHEMA); when(schemaInjector.inject(argThat(configured(equalTo(cs))))) .thenReturn((ConfiguredStatement) CFG_1_WITH_SCHEMA); // When: standaloneExecutor.startAsync(); // Then: verify(sandBox).execute(sandBoxServiceContext, CFG_0_WITH_SCHEMA); verify(ksqlEngine).execute(serviceContext, CFG_1_WITH_SCHEMA); }
@Override public DecimalData next() { if (nullRate == 0f || ThreadLocalRandom.current().nextFloat() > nullRate) { BigDecimal decimal = new BigDecimal( ThreadLocalRandom.current().nextDouble(min, max), new MathContext(precision, RoundingMode.DOWN)); return DecimalData.fromBigDecimal(decimal, precision, scale); } return null; }
@Test void testMinMax() { for (int precision = 1; precision <= 38; precision++) { for (int scale = 0; scale <= precision; scale++) { BigDecimal min = BigDecimal.valueOf(-10.0); BigDecimal max = BigDecimal.valueOf(10.0); DecimalDataRandomGenerator gen = new DecimalDataRandomGenerator( precision, scale, min.doubleValue(), max.doubleValue(), 0f); DecimalData result = gen.next(); assertThat(result) .as("Null value for DECIMAL(" + precision + "," + scale + ")") .isNotNull(); assertThat(result.toBigDecimal()) .as("value must be greater than or equal to min") .isGreaterThanOrEqualTo(min) .as("value must be less than or equal to max") .isLessThanOrEqualTo(max); } } }
public static byte[] getIncreasingByteArray(int len) { return getIncreasingByteArray(0, len); }
@Test public void getIncreasingByteArray() { class TestCase { byte[] mExpected; int mLength; int mStart; public TestCase(byte[] expected, int length, int start) { mExpected = expected; mLength = length; mStart = start; } } ArrayList<TestCase> testCases = new ArrayList<>(); testCases.add(new TestCase(new byte[] {}, 0, 0)); testCases.add(new TestCase(new byte[] {}, 0, 3)); testCases.add(new TestCase(new byte[] {0}, 1, 0)); testCases.add(new TestCase(new byte[] {0, 1, 2}, 3, 0)); testCases.add(new TestCase(new byte[] {3}, 1, 3)); testCases.add(new TestCase(new byte[] {3, 4, 5}, 3, 3)); for (TestCase testCase : testCases) { byte[] result = BufferUtils.getIncreasingByteArray(testCase.mStart, testCase.mLength); assertEquals(testCase.mExpected.length, result.length); for (int k = 0; k < result.length; k++) { assertEquals(testCase.mExpected[k], result[k]); } } }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { final PathAttributes attributes = new PathAttributes(); if(log.isDebugEnabled()) { log.debug(String.format("Read location for bucket %s", file)); } attributes.setRegion(new S3LocationFeature(session, session.getClient().getRegionEndpointCache()).getLocation(file).getIdentifier()); return attributes; } if(file.getType().contains(Path.Type.upload)) { final Write.Append append = new S3MultipartUploadService(session, new S3WriteFeature(session, acl), acl).append(file, new TransferStatus()); if(append.append) { return new PathAttributes().withSize(append.offset); } throw new NotfoundException(file.getAbsolute()); } try { PathAttributes attr; final Path bucket = containerService.getContainer(file); try { attr = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getVersionedObjectDetails( file.attributes().getVersionId(), bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))); } catch(ServiceException e) { switch(e.getResponseCode()) { case 405: if(log.isDebugEnabled()) { log.debug(String.format("Mark file %s as delete marker", file)); } // Only DELETE method is allowed for delete markers attr = new PathAttributes(); attr.setCustom(Collections.singletonMap(KEY_DELETE_MARKER, Boolean.TRUE.toString())); attr.setDuplicate(true); return attr; } throw new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); } if(StringUtils.isNotBlank(attr.getVersionId())) { if(log.isDebugEnabled()) { log.debug(String.format("Determine if %s is latest version for %s", attr.getVersionId(), file)); } // Determine if latest version try { final String latest = new S3AttributesAdapter(session.getHost()).toAttributes(session.getClient().getObjectDetails( bucket.isRoot() ? StringUtils.EMPTY : bucket.getName(), containerService.getKey(file))).getVersionId(); if(null != latest) { if(log.isDebugEnabled()) { log.debug(String.format("Found later version %s for %s", latest, file)); } // Duplicate if not latest version attr.setDuplicate(!latest.equals(attr.getVersionId())); } } catch(ServiceException e) { final BackgroundException failure = new S3ExceptionMappingService().map("Failure to read attributes of {0}", e, file); if(failure instanceof NotfoundException) { attr.setDuplicate(true); } else { throw failure; } } } return attr; } catch(NotfoundException e) { if(file.isDirectory()) { if(log.isDebugEnabled()) { log.debug(String.format("Search for common prefix %s", file)); } // File may be marked as placeholder but no placeholder file exists. Check for common prefix returned. try { new S3ObjectListService(session, acl).list(file, new CancellingListProgressListener(), String.valueOf(Path.DELIMITER), 1); } catch(ListCanceledException l) { // Found common prefix return PathAttributes.EMPTY; } catch(NotfoundException n) { throw e; } // Found common prefix return PathAttributes.EMPTY; } throw e; } }
@Test(expected = NotfoundException.class) public void testFindFileEuCentral() throws Exception { final Path test = new Path( new Path("test-eu-central-1-cyberduck", EnumSet.of(Path.Type.directory, Path.Type.volume)), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); final S3AttributesFinderFeature f = new S3AttributesFinderFeature(session, new S3AccessControlListFeature(session)); f.find(test); }
public static <T> T defaultIfNull(T value, T defaultValue) { return (value == null) ? defaultValue : value; }
@Test public void testDefaultIfNull() { Assert.assertEquals("", EagleEyeCoreUtils.defaultIfNull("", "bar")); Assert.assertEquals(" ", EagleEyeCoreUtils.defaultIfNull(" ", "bar")); Assert.assertEquals("bar", EagleEyeCoreUtils.defaultIfNull(null, "bar")); Assert.assertEquals("foo", EagleEyeCoreUtils.defaultIfNull("foo", "bar")); }
@Override public Type classify(final Throwable e) { Type type = Type.UNKNOWN; if (e instanceof KsqlFunctionException || (e instanceof StreamsException && ExceptionUtils.getRootCause(e) instanceof KsqlFunctionException)) { type = Type.USER; } if (type == Type.USER) { LOG.info( "Classified error as USER error based on invalid user input. Query ID: {} Exception: {}", queryId, e); } return type; }
@Test public void shouldClassifyKsqlFunctionExceptionAsUserError() { // Given: final Exception e = new KsqlFunctionException("foo"); // When: final Type type = new KsqlFunctionClassifier("").classify(e); // Then: assertThat(type, is(Type.USER)); }
public OutputBuffer(BufferType type, int outputBufferCapacity) { this.type = type; if (outputBufferCapacity > 0) { switch (type) { case DIRECT_BUFFER: this.byteBuffer = ByteBuffer.allocateDirect(outputBufferCapacity); this.byteBuffer.order(ByteOrder.BIG_ENDIAN); break; case HEAP_BUFFER: this.byteBuffer = ByteBuffer.allocate(outputBufferCapacity); this.byteBuffer.order(ByteOrder.BIG_ENDIAN); break; } } }
@Test public void testOutputBuffer() { final int size = 100; final OutputBuffer output1 = new OutputBuffer(BufferType.DIRECT_BUFFER, size); assertThat(output1.getType()).isEqualTo(BufferType.DIRECT_BUFFER); assertThat(output1.length()).isZero(); assertThat(output1.limit()).isEqualTo(size); final OutputBuffer output2 = new OutputBuffer(BufferType.HEAP_BUFFER, size); assertThat(output2.getType()).isEqualTo(BufferType.HEAP_BUFFER); assertThat(output2.length()).isZero(); assertThat(output2.limit()).isEqualTo(size); final OutputBuffer output3 = new OutputBuffer(new byte[size]); assertThat(output3.getType()).isEqualTo(BufferType.HEAP_BUFFER); assertThat(output3.length()).isZero(); assertThat(output3.limit()).isEqualTo(size); }
public static ReadOnlyHttp2Headers trailers(boolean validateHeaders, AsciiString... otherHeaders) { return new ReadOnlyHttp2Headers(validateHeaders, EMPTY_ASCII_STRINGS, otherHeaders); }
@Test public void pseudoHeaderNotAllowedAfterNonPseudoHeaders() { assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { ReadOnlyHttp2Headers.trailers(true, new AsciiString(":scheme"), new AsciiString("foo"), new AsciiString("othername"), new AsciiString("goo"), new AsciiString(":path"), new AsciiString("val")); } }); }
@Override @PublicAPI(usage = ACCESS) public boolean isAnnotatedWith(Class<? extends Annotation> type) { return isAnnotatedWith(type.getName()); }
@Test public void isAnnotatedWith_predicate() { assertThat(importField(SomeClass.class, "someField") .isAnnotatedWith(alwaysTrue())) .as("predicate matches").isTrue(); assertThat(importField(SomeClass.class, "someField") .isAnnotatedWith(alwaysFalse())) .as("predicate matches").isFalse(); }
void notifyPendingReceivedCallback(final Message<T> message, Exception exception) { if (pendingReceives.isEmpty()) { return; } // fetch receivedCallback from queue final CompletableFuture<Message<T>> receivedFuture = nextPendingReceive(); if (receivedFuture == null) { return; } if (exception != null) { internalPinnedExecutor.execute(() -> receivedFuture.completeExceptionally(exception)); return; } if (message == null) { IllegalStateException e = new IllegalStateException("received message can't be null"); internalPinnedExecutor.execute(() -> receivedFuture.completeExceptionally(e)); return; } if (getCurrentReceiverQueueSize() == 0) { // call interceptor and complete received callback trackMessage(message); interceptAndComplete(message, receivedFuture); return; } // increase permits for available message-queue messageProcessed(message); // call interceptor and complete received callback interceptAndComplete(message, receivedFuture); }
@Test(invocationTimeOut = 1000) public void testNotifyPendingReceivedCallback_InterceptorsWorksWithPrefetchDisabled() { CompletableFuture<Message<byte[]>> receiveFuture = new CompletableFuture<>(); MessageImpl message = mock(MessageImpl.class); ConsumerImpl<byte[]> spy = spy(consumer); consumer.pendingReceives.add(receiveFuture); consumerConf.setReceiverQueueSize(0); doReturn(message).when(spy).beforeConsume(any()); spy.notifyPendingReceivedCallback(message, null); Message<byte[]> receivedMessage = receiveFuture.join(); verify(spy, times(1)).beforeConsume(message); Assert.assertTrue(receiveFuture.isDone()); Assert.assertFalse(receiveFuture.isCompletedExceptionally()); Assert.assertEquals(receivedMessage, message); }
@Override public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) { return fromConnectData(topic, schema, value); }
@Test public void testSerializingIncorrectHeader() { assertThrows(DataException.class, () -> converter.fromConnectHeader(TOPIC, HEADER_NAME, schema, "not a valid number")); }
public FEELFnResult<Object> invoke(@ParameterName("input") String input, @ParameterName("pattern") String pattern, @ParameterName( "replacement" ) String replacement ) { return invoke(input, pattern, replacement, null); }
@Test void invokeNullWithFlags() { FunctionTestUtil.assertResultError(replaceFunction.invoke(null, null, null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke("testString", null, null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke("testString", "test", null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, "test", null, null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, "test", "ttt", null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, null, "ttt", null), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, null, null, "s"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke("testString", null, null, "s"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke("testString", "test", null, "s"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, "test", null, "s"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, "test", "ttt", "s"), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(replaceFunction.invoke(null, null, "ttt", "s"), InvalidParametersEvent.class); }
public boolean eval(StructLike data) { return new EvalVisitor().eval(data); }
@Test public void testIsNan() { Evaluator evaluator = new Evaluator(STRUCT, isNaN("y")); assertThat(evaluator.eval(TestHelpers.Row.of(1, Double.NaN, 3))).as("NaN is NaN").isTrue(); assertThat(evaluator.eval(TestHelpers.Row.of(1, 2.0, 3))).as("2 is not NaN").isFalse(); Evaluator structEvaluator = new Evaluator(STRUCT, isNaN("s5.s6.f")); assertThat( structEvaluator.eval( TestHelpers.Row.of( 1, 2, 3, null, TestHelpers.Row.of(TestHelpers.Row.of(Float.NaN))))) .as("NaN is NaN") .isTrue(); assertThat( structEvaluator.eval( TestHelpers.Row.of(1, 2, 3, null, TestHelpers.Row.of(TestHelpers.Row.of(4F))))) .as("4F is not NaN") .isFalse(); }
public static ScheduledTaskHandler of(UUID uuid, String schedulerName, String taskName) { return new ScheduledTaskHandlerImpl(uuid, -1, schedulerName, taskName); }
@Test public void of_equalitySameRef() { String initialURN = "urn:hzScheduledTaskHandler:39ffc539-a356-444c-bec7-6f644462c208-1SchedulerTask"; ScheduledTaskHandler handler = ScheduledTaskHandler.of(initialURN); assertEquals(handler, handler); }
public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; }
@Test public void testReadUnsignedInt() { ByteBuffer buffer = ByteBuffer.allocate(4); long writeValue = 133444; ByteUtils.writeUnsignedInt(buffer, writeValue); buffer.flip(); long readValue = ByteUtils.readUnsignedInt(buffer); assertEquals(writeValue, readValue); }
@Override protected void remove(String key) { localProperties.remove(key); }
@Test public void remove_should_not_throw_exception_if_key_is_not_present() { underTest.remove(randomAlphanumeric(90)); }
@Override public void transitionToActive(final StreamTask streamTask, final RecordCollector recordCollector, final ThreadCache newCache) { if (stateManager.taskType() != TaskType.ACTIVE) { throw new IllegalStateException("Tried to transition processor context to active but the state manager's " + "type was " + stateManager.taskType()); } this.streamTask = streamTask; this.collector = recordCollector; this.cache = newCache; addAllFlushListenersToNewCache(); }
@Test public void globalTimestampedWindowStoreShouldBeReadOnly() { foreachSetUp(); when(stateManager.taskType()).thenReturn(TaskType.ACTIVE); when(stateManager.getGlobalStore(anyString())).thenReturn(null); final TimestampedWindowStore<String, Long> windowStore = mock(TimestampedWindowStore.class); when(stateManager.getGlobalStore("GlobalTimestampedWindowStore")).thenAnswer(answer -> timestampedWindowStoreMock(windowStore)); context = buildProcessorContextImpl(streamsConfig, stateManager); final StreamTask task = mock(StreamTask.class); context.transitionToActive(task, null, null); mockProcessorNodeWithLocalKeyValueStore(); doTest("GlobalTimestampedWindowStore", (Consumer<TimestampedWindowStore<String, Long>>) store -> { verifyStoreCannotBeInitializedOrClosed(store); checkThrowsUnsupportedOperation(store::flush, "flush()"); checkThrowsUnsupportedOperation(() -> store.put("1", ValueAndTimestamp.make(1L, 1L), 1L), "put() [with timestamp]"); assertEquals(timestampedIters.get(0), store.fetchAll(0L, 0L)); assertEquals(windowStoreIter, store.fetch(KEY, 0L, 1L)); assertEquals(timestampedIters.get(1), store.fetch(KEY, KEY, 0L, 1L)); assertEquals(VALUE_AND_TIMESTAMP, store.fetch(KEY, 1L)); assertEquals(timestampedIters.get(2), store.all()); }); }
@Override public String getBasePath() { return basePath; }
@Test public void testGetBasePath() { assertThat(new DiscoveryPathConstructorImpl("/foo/bar").getBasePath()).isEqualTo("/foo/bar"); assertThat(new DiscoveryPathConstructorImpl("/").getBasePath()).isEqualTo("/"); }
@Override protected void doProcess(Exchange exchange, MetricsEndpoint endpoint, MetricRegistry registry, String metricsName) throws Exception { Message in = exchange.getIn(); Meter meter = registry.meter(metricsName); Long mark = endpoint.getMark(); Long finalMark = getLongHeader(in, HEADER_METER_MARK, mark); if (finalMark == null) { meter.mark(); } else { meter.mark(finalMark); } }
@Test public void testProcessMarkSet() throws Exception { when(endpoint.getMark()).thenReturn(MARK); when(in.getHeader(HEADER_METER_MARK, MARK, Long.class)).thenReturn(MARK); producer.doProcess(exchange, endpoint, registry, METRICS_NAME); inOrder.verify(exchange, times(1)).getIn(); inOrder.verify(registry, times(1)).meter(METRICS_NAME); inOrder.verify(endpoint, times(1)).getMark(); inOrder.verify(in, times(1)).getHeader(HEADER_METER_MARK, MARK, Long.class); inOrder.verify(meter, times(1)).mark(MARK); inOrder.verifyNoMoreInteractions(); }
@Override public int compareTo( MonetDbVersion mDbVersion ) { int result = majorVersion.compareTo( mDbVersion.majorVersion ); if ( result != 0 ) { return result; } result = minorVersion.compareTo( mDbVersion.minorVersion ); if ( result != 0 ) { return result; } result = patchVersion.compareTo( mDbVersion.patchVersion ); if ( result != 0 ) { return result; } return result; }
@Test public void testCompareVersions_DiffInMajor_LongVersion() throws Exception { String dbVersionBigger = "788.5.3.8.9.7.5"; String dbVersion = "785.2.2"; assertEquals( 1, new MonetDbVersion( dbVersionBigger ).compareTo( new MonetDbVersion( dbVersion ) ) ); }
@Override public int chown(String path, long uid, long gid) { return AlluxioFuseUtils.call(LOG, () -> chownInternal(path, uid, gid), FuseConstants.FUSE_CHOWN, "path=%s,uid=%d,gid=%d", path, uid, gid); }
@Test public void chownWithoutValidUidAndGid() throws Exception { long uid = AlluxioFuseUtils.ID_NOT_SET_VALUE; long gid = AlluxioFuseUtils.ID_NOT_SET_VALUE; mFuseFs.chown("/foo/bar", uid, gid); verify(mFileSystem, never()).setAttribute(any()); uid = AlluxioFuseUtils.ID_NOT_SET_VALUE_UNSIGNED; gid = AlluxioFuseUtils.ID_NOT_SET_VALUE_UNSIGNED; mFuseFs.chown("/foo/bar", uid, gid); verify(mFileSystem, never()).setAttribute(any()); }
@Draft public Object[] recvBinaryPicture(Socket socket, final String picture) { if (!BINARY_FORMAT.matcher(picture).matches()) { throw new ZMQException(picture + " is not in expected binary format " + BINARY_FORMAT.pattern(), ZError.EPROTO); } ZFrame frame = ZFrame.recvFrame(socket); if (frame == null) { return null; } // Get the data frame ZNeedle needle = new ZNeedle(frame); Object[] results = new Object[picture.length()]; for (int index = 0; index < picture.length(); index++) { char pattern = picture.charAt(index); switch (pattern) { case '1': { results[index] = needle.getNumber1(); break; } case '2': { results[index] = needle.getNumber2(); break; } case '4': { results[index] = needle.getNumber4(); break; } case '8': { results[index] = needle.getNumber8(); break; } case 's': { results[index] = needle.getString(); break; } case 'S': { results[index] = needle.getLongString(); break; } case 'b': case 'c': { int size = needle.getNumber4(); results[index] = needle.getBlock(size); break; } case 'f': { // Get next frame off socket results[index] = ZFrame.recvFrame(socket); break; } case 'm': { // Get zero or more remaining frames results[index] = ZMsg.recvMsg(socket); break; } default: assert (false) : "invalid picture element '" + pattern + "'"; } } return results; }
@Test(expected = ZMQException.class) public void testReceiveInvalidPictureMsgNotInTheEnd() { String picture = "m1"; pic.recvBinaryPicture(null, picture); }
public void setDefaultBrokerId(long defaultBrokerId) { this.defaultBrokerId = defaultBrokerId; }
@Test public void testSetDefaultBrokerId() { pullAPIWrapper.setDefaultBrokerId(MixAll.MASTER_ID); assertEquals(MixAll.MASTER_ID, pullAPIWrapper.getDefaultBrokerId()); }
public void transitionTo(ClassicGroupState groupState) { assertValidTransition(groupState); previousState = state; state = groupState; currentStateTimestamp = Optional.of(time.milliseconds()); metrics.onClassicGroupStateTransition(previousState, state); }
@Test public void testDeadToPreparingRebalanceIllegalTransition() { group.transitionTo(PREPARING_REBALANCE); group.transitionTo(DEAD); assertThrows(IllegalStateException.class, () -> group.transitionTo(PREPARING_REBALANCE)); }
@Override @CacheEvict(value = RedisKeyConstants.PERMISSION_MENU_ID_LIST, allEntries = true) // allEntries 清空所有缓存,因为 permission 如果变更,涉及到新老两个 permission。直接清理,简单有效 public void updateMenu(MenuSaveVO updateReqVO) { // 校验更新的菜单是否存在 if (menuMapper.selectById(updateReqVO.getId()) == null) { throw exception(MENU_NOT_EXISTS); } // 校验父菜单存在 validateParentMenu(updateReqVO.getParentId(), updateReqVO.getId()); // 校验菜单(自己) validateMenu(updateReqVO.getParentId(), updateReqVO.getName(), updateReqVO.getId()); // 更新到数据库 MenuDO updateObj = BeanUtils.toBean(updateReqVO, MenuDO.class); initMenuProperty(updateObj); menuMapper.updateById(updateObj); }
@Test public void testUpdateMenu_success() { // mock 数据(构造父子菜单) MenuDO sonMenuDO = createParentAndSonMenu(); Long sonId = sonMenuDO.getId(); // 准备参数 MenuSaveVO reqVO = randomPojo(MenuSaveVO.class, o -> { o.setId(sonId); o.setName("testSonName"); // 修改名字 o.setParentId(sonMenuDO.getParentId()); o.setType(MenuTypeEnum.MENU.getType()); }); // 调用 menuService.updateMenu(reqVO); // 校验记录的属性是否正确 MenuDO dbMenu = menuMapper.selectById(sonId); assertPojoEquals(reqVO, dbMenu); }
@Override public void trackTimerPause(String eventName) { }
@Test public void trackTimerPause() { mSensorsAPI.setTrackEventCallBack(new SensorsDataTrackEventCallBack() { @Override public boolean onTrackEvent(String eventName, JSONObject eventProperties) { Assert.fail(); return false; } }); mSensorsAPI.trackTimerStart("TestTimerEvent"); mSensorsAPI.trackTimerPause("TestTimerEvent"); mSensorsAPI.trackTimerEnd("TestTimerEvent"); }
public static <T> T convertQuietly(Type type, Object value) { return convertQuietly(type, value, null); }
@Test public void convertQuietlyTest() { assertThrows(Exception.class, () -> { final String a = "12"; final Object s = Convert.convert(int.class, a, a); assertEquals(12, s); }); }
public static boolean isDataSourceUnitActiveVersionNode(final String path) { return Pattern.compile(getMetaDataNode() + DATABASE_DATA_SOURCES_NODE + DATA_SOURCE_UNITS_NODE + ACTIVE_VERSION_SUFFIX, Pattern.CASE_INSENSITIVE).matcher(path).find(); }
@Test void assertIsDataSourceUnitActiveVersionNode() { assertTrue(DataSourceMetaDataNode.isDataSourceUnitActiveVersionNode("/metadata/logic_db/data_sources/units/foo_ds/active_version")); }
@Override public ResourceCounter releaseSlot(AllocationID allocationId, Exception cause) { final boolean wasSlotFree = slotPool.containsFreeSlot(allocationId); final Optional<AllocatedSlot> removedSlot = slotPool.removeSlot(allocationId); if (removedSlot.isPresent()) { final AllocatedSlot slot = removedSlot.get(); final Collection<AllocatedSlot> slotAsCollection = Collections.singleton(slot); return freeAndReleaseSlots( wasSlotFree ? Collections.emptySet() : slotAsCollection, slotAsCollection, cause); } else { return ResourceCounter.empty(); } }
@TestTemplate void testReleaseSlotReturnsSlot() throws InterruptedException { final NewSlotsService notifyNewSlots = new NewSlotsService(); final DefaultDeclarativeSlotPool slotPool = createDefaultDeclarativeSlotPoolWithNewSlotsListener(notifyNewSlots); final ResourceCounter resourceRequirements = createResourceRequirements(); final FreeSlotConsumer freeSlotConsumer = new FreeSlotConsumer(); final TestingTaskExecutorGateway testingTaskExecutorGateway = new TestingTaskExecutorGatewayBuilder() .setFreeSlotFunction(freeSlotConsumer) .createTestingTaskExecutorGateway(); increaseRequirementsAndOfferSlotsToSlotPool( slotPool, resourceRequirements, new LocalTaskManagerLocation(), testingTaskExecutorGateway); final Collection<? extends PhysicalSlot> physicalSlots = notifyNewSlots.takeNewSlots(); final PhysicalSlot physicalSlot = physicalSlots.iterator().next(); slotPool.releaseSlot(physicalSlot.getAllocationId(), new FlinkException("Test failure")); final AllocationID freedSlot = Iterables.getOnlyElement(freeSlotConsumer.drainFreedSlots()); assertThat(freedSlot).isEqualTo(physicalSlot.getAllocationId()); }
@GetMapping( path = "/api/{namespace}/{extension}", produces = MediaType.APPLICATION_JSON_VALUE ) @CrossOrigin @Operation(summary = "Provides metadata of the latest version of an extension") @ApiResponses({ @ApiResponse( responseCode = "200", description = "The extension metadata are returned in JSON format" ), @ApiResponse( responseCode = "404", description = "The specified extension could not be found", content = @Content() ), @ApiResponse( responseCode = "429", description = "A client has sent too many requests in a given amount of time", content = @Content(), headers = { @Header( name = "X-Rate-Limit-Retry-After-Seconds", description = "Number of seconds to wait after receiving a 429 response", schema = @Schema(type = "integer", format = "int32") ), @Header( name = "X-Rate-Limit-Remaining", description = "Remaining number of requests left", schema = @Schema(type = "integer", format = "int32") ) } ) }) public ResponseEntity<ExtensionJson> getExtension( @PathVariable @Parameter(description = "Extension namespace", example = "redhat") String namespace, @PathVariable @Parameter(description = "Extension name", example = "java") String extension ) { for (var registry : getRegistries()) { try { return ResponseEntity.ok() .cacheControl(CacheControl.noCache().cachePublic()) .body(registry.getExtension(namespace, extension, null)); } catch (NotFoundException exc) { // Try the next registry } } var json = ExtensionJson.error("Extension not found: " + NamingUtil.toExtensionId(namespace, extension)); return new ResponseEntity<>(json, HttpStatus.NOT_FOUND); }
@Test public void testLatestExtensionVersion() throws Exception { var extVersion = mockExtension(); Mockito.when(repositories.findExtensionVersion("foo", "bar", null, VersionAlias.LATEST)).thenReturn(extVersion); Mockito.when(repositories.findLatestVersionForAllUrls(extVersion.getExtension(), null, false, true)).thenReturn(extVersion); mockMvc.perform(get("/api/{namespace}/{extension}/{version}", "foo", "bar", "latest")) .andExpect(status().isOk()) .andExpect(content().json(extensionJson(e -> { e.namespace = "foo"; e.name = "bar"; e.version = "1.0.0"; e.verified = false; e.timestamp = "2000-01-01T10:00Z"; e.displayName = "Foo Bar"; e.versionAlias = List.of("latest"); }))); }
public static String getSHA1Checksum(File file) throws IOException, NoSuchAlgorithmException { return getChecksum(SHA1, file); }
@Test public void testGetSHA1Checksum() throws Exception { File file = new File(this.getClass().getClassLoader().getResource("checkSumTest.file").toURI().getPath()); //String expResult = "B8A9FF28B21BCB1D0B50E24A5243D8B51766851A"; String expResult = "b8a9ff28b21bcb1d0b50e24a5243d8b51766851a"; String result = Checksum.getSHA1Checksum(file); assertEquals(expResult, result); }
public void formatSource(CharSource input, CharSink output) throws FormatterException, IOException { // TODO(cushon): proper support for streaming input/output. Input may // not be feasible (parsing) but output should be easier. output.write(formatSource(input.read())); }
@Test public void blockCommentInteriorTrailingBlank() throws FormatterException { String input = "class T {\n/*\n* asd \n* fgh\n*/ \n\nint x;\n}"; String output = new Formatter().formatSource(input); String expect = "class T {\n /*\n * asd\n * fgh\n */\n\n int x;\n}\n"; assertThat(output).isEqualTo(expect); }
public boolean reserve(final ResourceProfile reservation) { checkResourceProfileNotNullOrUnknown(reservation); if (!availableBudget.allFieldsNoLessThan(reservation)) { return false; } availableBudget = availableBudget.subtract(reservation); LOG.debug("Resource budget reduced to {}.", availableBudget); return true; }
@Test void testReserve() { ResourceBudgetManager budgetManager = new ResourceBudgetManager(createResourceProfile(1.0, 100)); assertThat(budgetManager.reserve(createResourceProfile(0.7, 70))).isEqualTo(true); assertThat(budgetManager.getAvailableBudget()).isEqualTo(createResourceProfile(0.3, 30)); }
@Override public String execute(CommandContext commandContext, String[] args) { if (ArrayUtils.isNotEmpty(args)) { return processedTable.computeIfAbsent(args[0], this::commandHelp); } else { return processedTable.computeIfAbsent(MAIN_HELP, commandName -> mainHelp()); } }
@Test void testGreeting() { Help help = new Help(FrameworkModel.defaultModel()); String output = help.execute(Mockito.mock(CommandContext.class), new String[] {"greeting"}); assertThat(output, containsString("COMMAND NAME")); assertThat(output, containsString("greeting")); assertThat(output, containsString("EXAMPLE")); assertThat(output, containsString("greeting dubbo")); }
@Operation(summary = "createUdfFunc", description = "CREATE_UDF_FUNCTION_NOTES") @Parameters({ @Parameter(name = "type", description = "UDF_TYPE", required = true, schema = @Schema(implementation = UdfType.class)), @Parameter(name = "funcName", description = "FUNC_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "className", description = "CLASS_NAME", required = true, schema = @Schema(implementation = String.class)), @Parameter(name = "argTypes", description = "ARG_TYPES", schema = @Schema(implementation = String.class)), @Parameter(name = "database", description = "DATABASE_NAME", schema = @Schema(implementation = String.class)), @Parameter(name = "description", description = "UDF_DESC", schema = @Schema(implementation = String.class)), @Parameter(name = "resourceId", description = "RESOURCE_ID", required = true, schema = @Schema(implementation = int.class, example = "100")) }) @PostMapping(value = "/udf-func") @ResponseStatus(HttpStatus.CREATED) @ApiException(CREATE_UDF_FUNCTION_ERROR) public Result createUdfFunc(@Parameter(hidden = true) @RequestAttribute(value = Constants.SESSION_USER) User loginUser, @RequestParam(value = "type") UdfType type, @RequestParam(value = "funcName") String funcName, @RequestParam(value = "className") String className, @RequestParam(value = "fullName") String fullName, @RequestParam(value = "argTypes", required = false) String argTypes, @RequestParam(value = "database", required = false) String database, @RequestParam(value = "description", required = false) String description) { // todo verify the sourceName return udfFuncService.createUdfFunction(loginUser, funcName, className, fullName, argTypes, database, description, type); }
@Test public void testCreateUdfFunc() throws Exception { Result mockResult = new Result<>(); mockResult.setCode(Status.TENANT_NOT_EXIST.getCode()); Mockito.when(udfFuncService .createUdfFunction(Mockito.any(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.anyString(), Mockito.any())) .thenReturn(mockResult); MultiValueMap<String, String> paramsMap = new LinkedMultiValueMap<>(); paramsMap.add("type", String.valueOf(UdfType.HIVE)); paramsMap.add("funcName", "test_udf"); paramsMap.add("className", "com.test.word.contWord"); paramsMap.add("argTypes", "argTypes"); paramsMap.add("database", "database"); paramsMap.add("description", "description"); paramsMap.add("resourceId", "1"); paramsMap.add("fullName", "dolphinscheduler/resourcePath"); MvcResult mvcResult = mockMvc.perform(post("/resources/udf-func") .header(SESSION_ID, sessionId) .params(paramsMap)) .andExpect(status().isCreated()) .andExpect(content().contentType(MediaType.APPLICATION_JSON)) .andReturn(); Result result = JSONUtils.parseObject(mvcResult.getResponse().getContentAsString(), Result.class); Assertions.assertEquals(Status.TENANT_NOT_EXIST.getCode(), result.getCode().intValue()); logger.info(mvcResult.getResponse().getContentAsString()); }
@Override public void reset() { setValue.set(StringSetData.empty()); dirty.reset(); }
@Test public void testReset() { StringSetCell stringSetCell = new StringSetCell(MetricName.named("namespace", "name")); stringSetCell.add("hello"); Assert.assertNotEquals(stringSetCell.getDirty(), new DirtyState()); assertThat( stringSetCell.getCumulative().stringSet(), equalTo(StringSetData.create(ImmutableSet.of("hello")).stringSet())); stringSetCell.reset(); assertThat(stringSetCell.getCumulative(), equalTo(StringSetData.empty())); assertThat(stringSetCell.getDirty(), equalTo(new DirtyState())); }
@Override public void transform(Message message, DataType fromType, DataType toType) { final Map<String, Object> headers = message.getHeaders(); CloudEvent cloudEvent = CloudEvents.v1_0; headers.putIfAbsent(CloudEvents.CAMEL_CLOUD_EVENT_ID, message.getExchange().getExchangeId()); headers.putIfAbsent(CloudEvent.CAMEL_CLOUD_EVENT_VERSION, cloudEvent.version()); headers.put(CloudEvents.CAMEL_CLOUD_EVENT_TYPE, "org.apache.camel.event.google.storage.downloadTo"); if (message.getHeaders().containsKey(GoogleCloudStorageConstants.BUCKET_NAME)) { headers.put(CloudEvents.CAMEL_CLOUD_EVENT_SOURCE, "google.storage.bucket." + message.getHeader(GoogleCloudStorageConstants.BUCKET_NAME, String.class)); } headers.put(CloudEvents.CAMEL_CLOUD_EVENT_SUBJECT, message.getHeader(GoogleCloudStorageConstants.OBJECT_NAME, String.class)); headers.put(CloudEvents.CAMEL_CLOUD_EVENT_TIME, cloudEvent.getEventTime(message.getExchange())); }
@Test void shouldMapToCloudEvent() throws Exception { Exchange exchange = new DefaultExchange(camelContext); exchange.getMessage().setHeader(GoogleCloudStorageConstants.OBJECT_NAME, "test1"); exchange.getMessage().setHeader(GoogleCloudStorageConstants.BUCKET_NAME, "myBucket"); exchange.getMessage().setBody(new ByteArrayInputStream("Test1".getBytes(StandardCharsets.UTF_8))); transformer.transform(exchange.getMessage(), DataType.ANY, DataType.ANY); Assertions.assertTrue(exchange.getMessage().hasHeaders()); Assertions.assertTrue(exchange.getMessage().getHeaders().containsKey(GoogleCloudStorageConstants.OBJECT_NAME)); assertEquals("org.apache.camel.event.google.storage.downloadTo", exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_TYPE)); assertEquals("test1", exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SUBJECT)); assertEquals("google.storage.bucket.myBucket", exchange.getMessage().getHeader(CloudEvent.CAMEL_CLOUD_EVENT_SOURCE)); }
public static <T> Class<? extends T> convertStringToClassType(String className, Class<? extends T> targetClassType) { try { Class<?> clazz = Class.forName(className); if (!targetClassType.isAssignableFrom(clazz)){ throw new ConfigParseException("Class " + className + " is not a subclass of " + targetClassType.getName()); } return (Class<? extends T>) clazz; } catch (ClassNotFoundException e) { throw new ConfigParseException("Class not found: " + className, e); } }
@Test public void testConvertPredicate() { String predicateString = "io.github.resilience4j.commons.configuration.dummy.DummyPredicateThrowable"; Class<Predicate<Throwable>> clazz = (Class<Predicate<Throwable>>) ClassParseUtil.convertStringToClassType(predicateString, Predicate.class); Assertions.assertThat(clazz).isEqualTo(DummyPredicateThrowable.class); }
@Override public List<Change> computeDiff(List<T> source, List<T> target, DiffAlgorithmListener progress) { Objects.requireNonNull(source, "source list must not be null"); Objects.requireNonNull(target, "target list must not be null"); if (progress != null) { progress.diffStart(); } DiffData data = new DiffData(source, target); int maxIdx = source.size() + target.size(); buildScript(data, 0, source.size(), 0, target.size(), idx -> { if (progress != null) { progress.diffStep(idx, maxIdx); } }); if (progress != null) { progress.diffEnd(); } return data.script; }
@Test public void testDiffMyersExample1Forward() { List<String> original = Arrays.asList("A", "B", "C", "A", "B", "B", "A"); List<String> revised = Arrays.asList("C", "B", "A", "B", "A", "C"); final Patch<String> patch = Patch.generate(original, revised, new MyersDiffWithLinearSpace<String>().computeDiff(original, revised, null)); assertNotNull(patch); System.out.println(patch); assertEquals(5, patch.getDeltas().size()); assertEquals("Patch{deltas=[[InsertDelta, position: 0, lines: [C]], [DeleteDelta, position: 0, lines: [A]], [DeleteDelta, position: 2, lines: [C]], [DeleteDelta, position: 5, lines: [B]], [InsertDelta, position: 7, lines: [C]]]}", patch.toString()); }
public Plan validateReservationSubmissionRequest( ReservationSystem reservationSystem, ReservationSubmissionRequest request, ReservationId reservationId) throws YarnException { String message; if (reservationId == null) { message = "Reservation id cannot be null. Please try again specifying " + " a valid reservation id by creating a new reservation id."; throw RPCUtil.getRemoteException(message); } // Check if it is a managed queue String queue = request.getQueue(); Plan plan = getPlanFromQueue(reservationSystem, queue, AuditConstants.SUBMIT_RESERVATION_REQUEST); validateReservationDefinition(reservationId, request.getReservationDefinition(), plan, AuditConstants.SUBMIT_RESERVATION_REQUEST); return plan; }
@Test public void testSubmitReservationInvalidRecurrenceExpression() { // first check recurrence expression ReservationSubmissionRequest request = createSimpleReservationSubmissionRequest(1, 1, 1, 5, 3, "123abc"); plan = null; try { plan = rrValidator.validateReservationSubmissionRequest(rSystem, request, ReservationSystemTestUtil.getNewReservationId()); Assert.fail(); } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); Assert.assertTrue(message .startsWith("Invalid period ")); LOG.info(message); } // now check duration request = createSimpleReservationSubmissionRequest(1, 1, 1, 50, 3, "10"); plan = null; try { plan = rrValidator.validateReservationSubmissionRequest(rSystem, request, ReservationSystemTestUtil.getNewReservationId()); Assert.fail(); } catch (YarnException e) { Assert.assertNull(plan); String message = e.getMessage(); Assert.assertTrue(message .startsWith("Duration of the requested reservation:")); LOG.info(message); } }
@Override public Response postDelegationTokenExpiration(HttpServletRequest hsr) throws AuthorizationException, IOException, InterruptedException, Exception { if (hsr == null) { RouterAuditLogger.logFailure(getUser().getShortUserName(), POST_DELEGATION_TOKEN_EXPIRATION, UNKNOWN, TARGET_WEB_SERVICE, "Parameter error, the hsr is null."); throw new IllegalArgumentException("Parameter error, the hsr is null."); } try { // get Caller UserGroupInformation Configuration conf = federationFacade.getConf(); UserGroupInformation callerUGI = getKerberosUserGroupInformation(conf, hsr); return renewDelegationToken(hsr, callerUGI); } catch (YarnException e) { LOG.error("Renew delegation token request failed.", e); RouterAuditLogger.logFailure(getUser().getShortUserName(), POST_DELEGATION_TOKEN_EXPIRATION, UNKNOWN, TARGET_WEB_SERVICE, e.getLocalizedMessage()); return Response.status(Status.FORBIDDEN).entity(e.getMessage()).build(); } }
@Test public void testPostDelegationTokenExpiration() throws Exception { DelegationToken token = new DelegationToken(); token.setRenewer(TEST_RENEWER); Principal principal = mock(Principal.class); when(principal.getName()).thenReturn(TEST_RENEWER); HttpServletRequest request = mock(HttpServletRequest.class); when(request.getRemoteUser()).thenReturn(TEST_RENEWER); when(request.getUserPrincipal()).thenReturn(principal); when(request.getAuthType()).thenReturn("kerberos"); Response response = interceptor.postDelegationToken(token, request); Assert.assertNotNull(response); Object entity = response.getEntity(); Assert.assertNotNull(entity); Assert.assertTrue(entity instanceof DelegationToken); DelegationToken dtoken = (DelegationToken) entity; final String yarnTokenHeader = "Hadoop-YARN-RM-Delegation-Token"; when(request.getHeader(yarnTokenHeader)).thenReturn(dtoken.getToken()); Response renewResponse = interceptor.postDelegationTokenExpiration(request); Assert.assertNotNull(renewResponse); Object renewEntity = renewResponse.getEntity(); Assert.assertNotNull(renewEntity); Assert.assertTrue(renewEntity instanceof DelegationToken); // renewDelegation, we only return renewDate, other values are NULL. DelegationToken renewDToken = (DelegationToken) renewEntity; Assert.assertNull(renewDToken.getRenewer()); Assert.assertNull(renewDToken.getOwner()); Assert.assertNull(renewDToken.getKind()); Assert.assertTrue(renewDToken.getNextExpirationTime() > dtoken.getNextExpirationTime()); }
@Override public <T extends GetWorkBudgetSpender> void distributeBudget( ImmutableCollection<T> budgetOwners, GetWorkBudget getWorkBudget) { if (budgetOwners.isEmpty()) { LOG.debug("Cannot distribute budget to no owners."); return; } if (getWorkBudget.equals(GetWorkBudget.noBudget())) { LOG.debug("Cannot distribute 0 budget."); return; } Map<T, GetWorkBudget> desiredBudgets = computeDesiredBudgets(budgetOwners, getWorkBudget); for (Entry<T, GetWorkBudget> streamAndDesiredBudget : desiredBudgets.entrySet()) { GetWorkBudgetSpender getWorkBudgetSpender = streamAndDesiredBudget.getKey(); GetWorkBudget desired = streamAndDesiredBudget.getValue(); GetWorkBudget remaining = getWorkBudgetSpender.remainingBudget(); if (isBelowFiftyPercentOfTarget(remaining, desired)) { GetWorkBudget adjustment = desired.subtract(remaining); getWorkBudgetSpender.adjustBudget(adjustment); } } }
@Test public void testDistributeBudget_doesNotAdjustStreamBudgetWhenRemainingBudgetHighWithActiveWork() { GetWorkBudgetSpender getWorkBudgetSpender = spy( createGetWorkBudgetOwnerWithRemainingBudgetOf( GetWorkBudget.builder().setItems(5L).setBytes(5L).build())); createBudgetDistributor(10L) .distributeBudget( ImmutableList.of(getWorkBudgetSpender), GetWorkBudget.builder().setItems(20L).setBytes(20L).build()); verify(getWorkBudgetSpender, never()).adjustBudget(anyLong(), anyLong()); }
public static Runnable withContextClassLoader( Runnable runnable, ClassLoader contextClassLoader) { return () -> runWithContextClassLoader(runnable::run, contextClassLoader); }
@Test void testExecutorWithContextClassLoader() throws Exception { final Executor wrappedExecutor = ClassLoadingUtils.withContextClassLoader( Executors.newDirectExecutorService(), TEST_CLASS_LOADER); final CompletableFuture<ClassLoader> contextClassLoader = new CompletableFuture<>(); Runnable runnable = () -> contextClassLoader.complete(Thread.currentThread().getContextClassLoader()); wrappedExecutor.execute(runnable); assertThat(contextClassLoader.get()).isSameAs(TEST_CLASS_LOADER); }
public static ExecutableStage forGrpcPortRead( QueryablePipeline pipeline, PipelineNode.PCollectionNode inputPCollection, Set<PipelineNode.PTransformNode> initialNodes) { checkArgument( !initialNodes.isEmpty(), "%s must contain at least one %s.", GreedyStageFuser.class.getSimpleName(), PipelineNode.PTransformNode.class.getSimpleName()); // Choose the environment from an arbitrary node. The initial nodes may not be empty for this // subgraph to make any sense, there has to be at least one processor node // (otherwise the stage is gRPC Read -> gRPC Write, which doesn't do anything). Environment environment = getStageEnvironment(pipeline, initialNodes); ImmutableSet.Builder<PipelineNode.PTransformNode> fusedTransforms = ImmutableSet.builder(); fusedTransforms.addAll(initialNodes); Set<SideInputReference> sideInputs = new LinkedHashSet<>(); Set<UserStateReference> userStates = new LinkedHashSet<>(); Set<TimerReference> timers = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> fusedCollections = new LinkedHashSet<>(); Set<PipelineNode.PCollectionNode> materializedPCollections = new LinkedHashSet<>(); Queue<PipelineNode.PCollectionNode> fusionCandidates = new ArrayDeque<>(); for (PipelineNode.PTransformNode initialConsumer : initialNodes) { fusionCandidates.addAll(pipeline.getOutputPCollections(initialConsumer)); sideInputs.addAll(pipeline.getSideInputs(initialConsumer)); userStates.addAll(pipeline.getUserStates(initialConsumer)); timers.addAll(pipeline.getTimers(initialConsumer)); } while (!fusionCandidates.isEmpty()) { PipelineNode.PCollectionNode candidate = fusionCandidates.poll(); if (fusedCollections.contains(candidate) || materializedPCollections.contains(candidate)) { // This should generally mean we get to a Flatten via multiple paths through the graph and // we've already determined what to do with the output. LOG.debug( "Skipping fusion candidate {} because it is {} in this {}", candidate, fusedCollections.contains(candidate) ? "fused" : "materialized", ExecutableStage.class.getSimpleName()); continue; } PCollectionFusibility fusibility = canFuse(pipeline, candidate, environment, fusedCollections); switch (fusibility) { case MATERIALIZE: materializedPCollections.add(candidate); break; case FUSE: // All of the consumers of the candidate PCollection can be fused into this stage. Do so. fusedCollections.add(candidate); fusedTransforms.addAll(pipeline.getPerElementConsumers(candidate)); for (PipelineNode.PTransformNode consumer : pipeline.getPerElementConsumers(candidate)) { // The outputs of every transform fused into this stage must be either materialized or // themselves fused away, so add them to the set of candidates. fusionCandidates.addAll(pipeline.getOutputPCollections(consumer)); sideInputs.addAll(pipeline.getSideInputs(consumer)); } break; default: throw new IllegalStateException( String.format( "Unknown type of %s %s", PCollectionFusibility.class.getSimpleName(), fusibility)); } } return ImmutableExecutableStage.ofFullComponents( pipeline.getComponents(), environment, inputPCollection, sideInputs, userStates, timers, fusedTransforms.build(), materializedPCollections, ExecutableStage.DEFAULT_WIRE_CODER_SETTINGS); }
@Test public void fusesCompatibleEnvironments() { // (impulse.out) -> parDo -> parDo.out -> window -> window.out // parDo and window both have the environment "common" and can be fused together PTransform parDoTransform = PTransform.newBuilder() .putInputs("input", "impulse.out") .putOutputs("output", "parDo.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.PAR_DO_TRANSFORM_URN) .setPayload( ParDoPayload.newBuilder() .setDoFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("common") .build(); PTransform windowTransform = PTransform.newBuilder() .putInputs("input", "impulse.out") .putOutputs("output", "window.out") .setSpec( FunctionSpec.newBuilder() .setUrn(PTransformTranslation.ASSIGN_WINDOWS_TRANSFORM_URN) .setPayload( WindowIntoPayload.newBuilder() .setWindowFn(FunctionSpec.newBuilder()) .build() .toByteString())) .setEnvironmentId("common") .build(); QueryablePipeline p = QueryablePipeline.forPrimitivesIn( partialComponents .toBuilder() .putTransforms("parDo", parDoTransform) .putPcollections( "parDo.out", PCollection.newBuilder().setUniqueName("parDo.out").build()) .putTransforms("window", windowTransform) .putPcollections( "window.out", PCollection.newBuilder().setUniqueName("window.out").build()) .putEnvironments("common", Environments.createDockerEnvironment("common")) .build()); ExecutableStage subgraph = GreedyStageFuser.forGrpcPortRead( p, impulseOutputNode, ImmutableSet.of( PipelineNode.pTransform("parDo", parDoTransform), PipelineNode.pTransform("window", windowTransform))); // Nothing consumes the outputs of ParDo or Window, so they don't have to be materialized assertThat(subgraph.getOutputPCollections(), emptyIterable()); assertThat(subgraph, hasSubtransforms("parDo", "window")); }
@Override public SYMLINK3Response symlink(XDR xdr, RpcInfo info) { return symlink(xdr, getSecurityHandler(info), info.remoteAddress()); }
@Test(timeout = 60000) public void testSymlink() throws Exception { HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir); long dirId = status.getFileId(); int namenodeId = Nfs3Utils.getNamenodeId(config); XDR xdr_req = new XDR(); FileHandle handle = new FileHandle(dirId, namenodeId); SYMLINK3Request req = new SYMLINK3Request(handle, "fubar", new SetAttr3(), "bar"); req.serialize(xdr_req); // Attempt by an unprivileged user should fail. SYMLINK3Response response1 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandlerUnpriviledged, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES, response1.getStatus()); // Attempt by a privileged user should pass. SYMLINK3Response response2 = nfsd.symlink(xdr_req.asReadOnlyWrap(), securityHandler, new InetSocketAddress("localhost", 1234)); assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK, response2.getStatus()); }
public boolean write(final int msgTypeId, final DirectBuffer srcBuffer, final int offset, final int length) { checkTypeId(msgTypeId); checkMsgLength(length); final AtomicBuffer buffer = this.buffer; final int recordLength = length + HEADER_LENGTH; final int recordIndex = claimCapacity(buffer, recordLength); if (INSUFFICIENT_CAPACITY == recordIndex) { return false; } buffer.putIntOrdered(lengthOffset(recordIndex), -recordLength); MemoryAccess.releaseFence(); buffer.putBytes(encodedMsgOffset(recordIndex), srcBuffer, offset, length); buffer.putInt(typeOffset(recordIndex), msgTypeId); buffer.putIntOrdered(lengthOffset(recordIndex), recordLength); return true; }
@Test void shouldRejectWriteWhenInsufficientSpace() { final int length = 200; final long head = 0L; final long tail = head + (CAPACITY - align(length - ALIGNMENT, ALIGNMENT)); when(buffer.getLongVolatile(HEAD_COUNTER_INDEX)).thenReturn(head); when(buffer.getLong(TAIL_COUNTER_INDEX)).thenReturn(tail); final UnsafeBuffer srcBuffer = new UnsafeBuffer(allocateDirect(1024)); final int srcIndex = 0; assertFalse(ringBuffer.write(MSG_TYPE_ID, srcBuffer, srcIndex, length)); verify(buffer, never()).putBytes(anyInt(), eq(srcBuffer), anyInt(), anyInt()); verify(buffer, never()).putLong(anyInt(), anyInt()); verify(buffer, never()).putLongOrdered(anyInt(), anyInt()); verify(buffer, never()).putIntOrdered(anyInt(), anyInt()); }
public static Getter newMethodGetter(Object object, Getter parent, Method method, String modifier) throws Exception { return newGetter(object, parent, modifier, method.getReturnType(), method::invoke, (t, et) -> new MethodGetter(parent, method, modifier, t, et)); }
@Test public void newMethodGetter_whenExtractingFromNonEmpty_Collection_FieldAndParentIsNonEmptyMultiResult_nullValueFirst_thenInferReturnType() throws Exception { OuterObject object = new OuterObject("name", new InnerObject("inner", null, 0, 1, 2, 3)); Getter parentGetter = GetterFactory.newMethodGetter(object, null, innersCollectionMethod, "[any]"); Getter innerObjectNameGetter = GetterFactory.newMethodGetter(object, parentGetter, innerAttributesCollectionMethod, "[any]"); Class<?> returnType = innerObjectNameGetter.getReturnType(); assertEquals(Integer.class, returnType); }
public int doWork() { final long nowNs = nanoClock.nanoTime(); trackTime(nowNs); int workCount = 0; workCount += processTimers(nowNs); if (!asyncClientCommandInFlight) { workCount += clientCommandAdapter.receive(); } workCount += drainCommandQueue(); workCount += trackStreamPositions(workCount, nowNs); workCount += nameResolver.doWork(cachedEpochClock.time()); workCount += freeEndOfLifeResources(ctx.resourceFreeLimit()); return workCount; }
@Test void shouldInformClientsOfRemovedCounter() { final long registrationId = driverProxy.addCounter( COUNTER_TYPE_ID, counterKeyAndLabel, COUNTER_KEY_OFFSET, COUNTER_KEY_LENGTH, counterKeyAndLabel, COUNTER_LABEL_OFFSET, COUNTER_LABEL_LENGTH); driverConductor.doWork(); final long removeCorrelationId = driverProxy.removeCounter(registrationId); driverConductor.doWork(); final ArgumentCaptor<Integer> captor = ArgumentCaptor.forClass(Integer.class); final InOrder inOrder = inOrder(mockClientProxy); inOrder.verify(mockClientProxy).onCounterReady(eq(registrationId), captor.capture()); inOrder.verify(mockClientProxy).operationSucceeded(removeCorrelationId); inOrder.verify(mockClientProxy).onUnavailableCounter(eq(registrationId), captor.capture()); verify(spyCountersManager).free(captor.getValue()); }
public Output run(RunContext runContext) throws Exception { Logger logger = runContext.logger(); URI from = new URI(runContext.render(this.uri)); File tempFile = runContext.workingDir().createTempFile(filenameFromURI(from)).toFile(); // output Output.OutputBuilder builder = Output.builder(); // do it try ( ReactorStreamingHttpClient client = this.streamingClient(runContext, this.method); BufferedOutputStream output = new BufferedOutputStream(new FileOutputStream(tempFile)); ) { @SuppressWarnings("unchecked") HttpRequest<String> request = this.request(runContext); Long size = client .exchangeStream(request) .map(throwFunction(response -> { if (builder.code == null) { builder .code(response.code()) .headers(response.getHeaders().asMap()); } if (response.getBody().isPresent()) { byte[] bytes = response.getBody().get().toByteArray(); output.write(bytes); return (long) bytes.length; } else { return 0L; } })) .reduce(Long::sum) .block(); if (size == null) { size = 0L; } if (builder.headers != null && builder.headers.containsKey("Content-Length")) { long length = Long.parseLong(builder.headers.get("Content-Length").getFirst()); if (length != size) { throw new IllegalStateException("Invalid size, got " + size + ", expected " + length); } } output.flush(); runContext.metric(Counter.of("response.length", size, this.tags(request, null))); builder.length(size); if (size == 0) { if (this.failOnEmptyResponse) { throw new HttpClientResponseException("No response from server", HttpResponse.status(HttpStatus.SERVICE_UNAVAILABLE)); } else { logger.warn("File '{}' is empty", from); } } String filename = null; if (builder.headers != null && builder.headers.containsKey("Content-Disposition")) { String contentDisposition = builder.headers.get("Content-Disposition").getFirst(); filename = filenameFromHeader(runContext, contentDisposition); } builder.uri(runContext.storage().putFile(tempFile, filename)); logger.debug("File '{}' downloaded to '{}'", from, builder.uri); return builder.build(); } }
@Test void contentDisposition() throws Exception { EmbeddedServer embeddedServer = applicationContext.getBean(EmbeddedServer.class); embeddedServer.start(); Download task = Download.builder() .id(DownloadTest.class.getSimpleName()) .type(DownloadTest.class.getName()) .uri(embeddedServer.getURI() + "/content-disposition") .build(); RunContext runContext = TestsUtils.mockRunContext(this.runContextFactory, task, ImmutableMap.of()); Download.Output output = task.run(runContext); assertThat(output.getUri().toString(), endsWith("filename.jpg")); }
public List<MappingField> resolveAndValidateFields( List<MappingField> userFields, Map<String, String> options, NodeEngine nodeEngine ) { final InternalSerializationService serializationService = (InternalSerializationService) nodeEngine .getSerializationService(); final AbstractRelationsStorage relationsStorage = ((CalciteSqlOptimizer) nodeEngine.getSqlService().getOptimizer()) .relationsStorage(); // normalize and validate the names and external names for (MappingField field : userFields) { String name = field.name(); String externalName = field.externalName(); if (externalName == null) { if (name.equals(KEY) || name.equals(VALUE)) { externalName = name; } else { externalName = VALUE_PREFIX + name; } field.setExternalName(name); } if ((name.equals(KEY) && !externalName.equals(KEY)) || (name.equals(VALUE) && !externalName.equals(VALUE))) { throw QueryException.error("Cannot rename field: '" + name + '\''); } if (!EXT_NAME_PATTERN.matcher(externalName).matches()) { throw QueryException.error("Invalid external name: " + externalName); } } Stream<MappingField> keyFields = resolveAndValidateFields(true, userFields, options, serializationService, relationsStorage); Stream<MappingField> valueFields = resolveAndValidateFields(false, userFields, options, serializationService, relationsStorage); Map<String, MappingField> fields = Stream.concat(keyFields, valueFields) .collect(LinkedHashMap::new, (map, field) -> map.putIfAbsent(field.name(), field), Map::putAll); if (fields.isEmpty()) { throw QueryException.error("The resolved field list is empty"); } return new ArrayList<>(fields.values()); }
@Test public void when_formatIsMissingInOptionsWhileResolvingFields_then_throws() { assertThatThrownBy(() -> resolvers.resolveAndValidateFields(emptyList(), emptyMap(), nodeEngine)) .isInstanceOf(QueryException.class) .hasMessageContaining("Missing 'keyFormat' option"); }
public Set<FlowRule> flowtable() { JsonNode ents = object.path(ENTRIES); if (!ents.isArray()) { return ImmutableSet.of(); } ArrayNode entries = (ArrayNode) ents; Builder<FlowRule> builder = ImmutableSet.builder(); entries.forEach(entry -> builder.add(decode(entry, FlowRule.class))); return builder.build(); }
@Test public void readTest() throws JsonProcessingException, IOException { FlowTableConfig sut = new FlowTableConfig(); sut.init(DID, FlowTableConfig.CONFIG_KEY, cfgnode, mapper, noopDelegate); assertThat(sut.flowtable(), is(equalTo(ImmutableSet.of(FLOW_RULE)))); }
public CompletableFuture<Integer> read(ByteBuffer buf, long offset, long len, FileId fileId, String ufsPath, UfsReadOptions options) { Objects.requireNonNull(buf); if (offset < 0 || len < 0 || len > buf.remaining()) { throw new OutOfRangeRuntimeException(String.format( "offset is negative, len is negative, or len is greater than buf remaining. " + "offset: %s, len: %s, buf remaining: %s", offset, len, buf.remaining())); } if (mReadQueue.size() >= READ_CAPACITY) { throw new ResourceExhaustedRuntimeException("UFS read at capacity", true); } CompletableFuture<Integer> future = new CompletableFuture<>(); if (len == 0) { future.complete(0); return future; } Meter meter = mUfsBytesReadThroughputMetrics.computeIfAbsent(mUfsClient.getUfsMountPointUri(), uri -> MetricsSystem.meterWithTags(MetricKey.WORKER_BYTES_READ_UFS_THROUGHPUT.getName(), MetricKey.WORKER_BYTES_READ_UFS_THROUGHPUT.isClusterAggregated(), MetricInfo.TAG_UFS, MetricsSystem.escape(mUfsClient.getUfsMountPointUri()), MetricInfo.TAG_USER, options.getTag())); mReadQueue.add(new ReadTask(buf, ufsPath, fileId, offset, len, options, future, meter)); return future; }
@Test public void readFullBlock() throws Exception { mUfsIOManager.read(TEST_BUF, 0, TEST_BLOCK_SIZE, FIRST_BLOCK_ID, mTestFilePath, UfsReadOptions.getDefaultInstance()).get(); assertTrue(checkBuf(0, (int) TEST_BLOCK_SIZE, TEST_BUF)); TEST_BUF.clear(); }
@Beta public static Application fromBuilder(Builder builder) throws Exception { return builder.build(); }
@Test void search_default() throws Exception { try ( ApplicationFacade app = new ApplicationFacade(Application.fromBuilder(new Application.Builder().container("default", new Application.Builder.Container() .searcher(MockSearcher.class)))) ) { Result result = app.search(new Query("?query=foo&timeout=20000")); assertEquals(1, result.hits().size()); } }
public final void isNotNaN() { if (actual == null) { failWithActual(simpleFact("expected a float other than NaN")); } else { isNotEqualTo(NaN); } }
@Test public void isNotNaNIsNaN() { expectFailureWhenTestingThat(Float.NaN).isNotNaN(); }
public static Builder builder(String testId) { return new Builder(testId); }
@Test public void testCreateResourceManagerBuilderReturnsMongoDBResourceManager() { assertThat( MongoDBResourceManager.builder(TEST_ID) .useStaticContainer() .setHost(HOST) .setPort(MONGO_DB_PORT) .build()) .isInstanceOf(MongoDBResourceManager.class); }
@Udf public final <T> T nullIf( @UdfParameter(description = "expression 1") final T expr1, @UdfParameter(description = "expression 2") final T expr2 ) { if (expr1 == null) { return null; } if (expr1.equals(expr2)) { return null; } else { return expr1; } }
@Test public void shouldReturnValue1IfBothValuesAreNonEqual() { assertThat(udf.nullIf("a", "b"), is("a")); }
public static Descriptors.MethodDescriptor findMethodDescriptor(String base64ProtobufDescriptor, String serviceName, String methodName) throws InvalidProtocolBufferException, Descriptors.DescriptorValidationException { // Now we may have serviceName as being the FQDN. We have to find short version to later findServiceByName(). String shortServiceName = serviceName; if (serviceName.contains(".")) { shortServiceName = serviceName.substring(serviceName.lastIndexOf(".") + 1); } // Find descriptor with this service name as symbol. Descriptors.FileDescriptor fd = findFileDescriptorBySymbol(base64ProtobufDescriptor, shortServiceName); Descriptors.ServiceDescriptor sd = fd.findServiceByName(shortServiceName); return sd.findMethodByName(methodName); }
@Test void testFindMethodDescriptorWithDependency() { // This is the GoodbyeService with descriptor embedding the shared/uuid.proto dependency. String base64ProtobufDescriptor = "CjsKEXNoYXJlZC91dWlkLnByb3RvEgZzaGFyZWQiFgoEVVVJRBIOCgJpZBgBIAEoCVICaWRiBnByb3RvMwqDAwoQZ29vZGJ5ZS12MS5wcm90bxIiaW8uZ2l0aHViLm1pY3JvY2tzLmdycGMuZ29vZGJ5ZS52MRoRc2hhcmVkL3V1aWQucHJvdG8iSgoOR29vZGJ5ZVJlcXVlc3QSHAoJZmlyc3RuYW1lGAEgASgJUglmaXJzdG5hbWUSGgoIbGFzdG5hbWUYAiABKAlSCGxhc3RuYW1lIlkKD0dvb2RieWVSZXNwb25zZRIaCghmYXJld2VsbBgBIAEoCVIIZmFyZXdlbGwSKgoJbWVzc2FnZUlkGAIgASgLMgwuc2hhcmVkLlVVSURSCW1lc3NhZ2VJZDKEAQoOR29vZGJ5ZVNlcnZpY2UScgoHZ29vZGJ5ZRIyLmlvLmdpdGh1Yi5taWNyb2Nrcy5ncnBjLmdvb2RieWUudjEuR29vZGJ5ZVJlcXVlc3QaMy5pby5naXRodWIubWljcm9ja3MuZ3JwYy5nb29kYnllLnYxLkdvb2RieWVSZXNwb25zZUICUAFiBnByb3RvMw=="; Descriptors.MethodDescriptor desc = null; try { desc = GrpcUtil.findMethodDescriptor(base64ProtobufDescriptor, "GoodbyeService", "goodbye"); } catch (Exception e) { fail("No exception should be thrown while parsing protobuf descriptor and searching service"); } assertNotNull(desc); assertEquals("io.github.microcks.grpc.goodbye.v1.GoodbyeService.goodbye", desc.getFullName()); }
static FEELFnResult<Boolean> matchFunctionWithFlags(String input, String pattern, String flags) { log.debug("Input: {} , Pattern: {}, Flags: {}", input, pattern, flags); if ( input == null ) { throw new InvalidParameterException("input"); } if ( pattern == null ) { throw new InvalidParameterException("pattern"); } final String flagsString; if (flags != null && !flags.isEmpty()) { checkFlags(flags); if(!flags.contains("U")){ flags += "U"; } flagsString = String.format("(?%s)", flags); } else { flagsString = ""; } log.debug("flagsString: {}", flagsString); String stringToBeMatched = flagsString + pattern; log.debug("stringToBeMatched: {}", stringToBeMatched); Pattern p=Pattern.compile(stringToBeMatched); Matcher m = p.matcher( input ); boolean matchFound=m.find(); log.debug("matchFound: {}", matchFound); return FEELFnResult.ofResult(matchFound); }
@Test void invokeWithAllFlags() { FunctionTestUtil.assertResult(MatchesFunction.matchFunctionWithFlags("fo\nbar", "Fo.^bar", "smi"), true); }
List<Condition> run(boolean useKRaft) { List<Condition> warnings = new ArrayList<>(); checkKafkaReplicationConfig(warnings); checkKafkaBrokersStorage(warnings); if (useKRaft) { // Additional checks done for KRaft clusters checkKRaftControllerStorage(warnings); checkKRaftControllerCount(warnings); checkKafkaMetadataVersion(warnings); checkInterBrokerProtocolVersionInKRaft(warnings); checkLogMessageFormatVersionInKRaft(warnings); } else { // Additional checks done for ZooKeeper-based clusters checkKafkaLogMessageFormatVersion(warnings); checkKafkaInterBrokerProtocolVersion(warnings); checkKRaftMetadataStorageConfiguredForZooBasedCLuster(warnings); } return warnings; }
@Test public void checkKRaftMetadataConfigInKRaftMode() { // Kafka with Ephemeral storage KafkaNodePool ephemeralPool = new KafkaNodePoolBuilder(POOL_A) .editSpec() .withNewEphemeralStorage() .withKraftMetadata(KRaftMetadataStorage.SHARED) .endEphemeralStorage() .endSpec() .build(); KafkaSpecChecker checker = generateChecker(KAFKA, List.of(CONTROLLERS, ephemeralPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); List<Condition> warnings = checker.run(true); assertThat(warnings, hasSize(0)); // Kafka with Persistent storage KafkaNodePool persistentPool = new KafkaNodePoolBuilder(POOL_A) .editSpec() .withNewPersistentClaimStorage() .withSize("100Gi") .withKraftMetadata(KRaftMetadataStorage.SHARED) .endPersistentClaimStorage() .endSpec() .build(); checker = generateChecker(KAFKA, List.of(CONTROLLERS, persistentPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); warnings = checker.run(true); assertThat(warnings, hasSize(0)); // Kafka with JBOD storage KafkaNodePool jbodPool = new KafkaNodePoolBuilder(POOL_A) .editSpec() .withNewJbodStorage() .addNewPersistentClaimStorageVolume() .withId(0) .withSize("100Gi") .endPersistentClaimStorageVolume() .addNewPersistentClaimStorageVolume() .withId(0) .withSize("100Gi") .withKraftMetadata(KRaftMetadataStorage.SHARED) .endPersistentClaimStorageVolume() .endJbodStorage() .endSpec() .build(); checker = generateChecker(KAFKA, List.of(CONTROLLERS, jbodPool), KafkaVersionTestUtils.DEFAULT_KRAFT_VERSION_CHANGE); warnings = checker.run(true); assertThat(warnings, hasSize(0)); }