focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
boolean isAcceptable(final long leaderMemberId, final long memberId) { switch (sourceType) { case LEADER: return NULL_VALUE != leaderMemberId && leaderMemberId == memberId; case FOLLOWER: return NULL_VALUE == leaderMemberId || leaderMemberId != memberId; case ANY: return true; } throw new IllegalStateException("Unknown sourceType=" + sourceType); }
@Test void followerLogSourceTypeShouldOnlyAcceptFollowerAndUnknown() { final LogSourceValidator logSourceValidator = new LogSourceValidator(ClusterBackup.SourceType.FOLLOWER); final long leaderMemberId = 123; final long followerMemberId = 456; assertFalse(logSourceValidator.isAcceptable(leaderMemberId, leaderMemberId)); assertTrue(logSourceValidator.isAcceptable(leaderMemberId, followerMemberId)); assertTrue(logSourceValidator.isAcceptable(NULL_VALUE, NULL_VALUE)); assertTrue(logSourceValidator.isAcceptable(leaderMemberId, NULL_VALUE)); assertTrue(logSourceValidator.isAcceptable(NULL_VALUE, followerMemberId)); }
@Override protected void rollbackBlockStore(int height) throws BlockStoreException { lock.lock(); try { int currentHeight = getBestChainHeight(); checkArgument(height >= 0 && height <= currentHeight, () -> "bad height: " + height); if (height == currentHeight) return; // nothing to do // Look for the block we want to be the new chain head StoredBlock newChainHead = blockStore.getChainHead(); while (newChainHead.getHeight() > height) { newChainHead = newChainHead.getPrev(blockStore); if (newChainHead == null) throw new BlockStoreException("Unreachable height"); } // Modify store directly blockStore.put(newChainHead); this.setChainHead(newChainHead); } finally { lock.unlock(); } }
@Test public void rollbackBlockStore() throws Exception { Context.propagate(new Context(100, Coin.ZERO, false, true)); // This test simulates an issue on Android, that causes the VM to crash while receiving a block, so that the // block store is persisted but the wallet is not. Block b1 = TESTNET.getGenesisBlock().createNextBlock(coinbaseTo); Block b2 = b1.createNextBlock(coinbaseTo); // Add block 1, no frills. assertTrue(testNetChain.add(b1)); assertEquals(b1.cloneAsHeader(), testNetChain.getChainHead().getHeader()); assertEquals(1, testNetChain.getBestChainHeight()); assertEquals(1, testNetWallet.getLastBlockSeenHeight()); // Add block 2 while wallet is disconnected, to simulate crash. testNetChain.removeWallet(testNetWallet); assertTrue(testNetChain.add(b2)); assertEquals(b2.cloneAsHeader(), testNetChain.getChainHead().getHeader()); assertEquals(2, testNetChain.getBestChainHeight()); assertEquals(1, testNetWallet.getLastBlockSeenHeight()); // Add wallet back. This will detect the height mismatch and repair the damage done. testNetChain.addWallet(testNetWallet); assertEquals(b1.cloneAsHeader(), testNetChain.getChainHead().getHeader()); assertEquals(1, testNetChain.getBestChainHeight()); assertEquals(1, testNetWallet.getLastBlockSeenHeight()); // Now add block 2 correctly. assertTrue(testNetChain.add(b2)); assertEquals(b2.cloneAsHeader(), testNetChain.getChainHead().getHeader()); assertEquals(2, testNetChain.getBestChainHeight()); assertEquals(2, testNetWallet.getLastBlockSeenHeight()); }
@Override public boolean test(Pickle pickle) { String name = pickle.getName(); return patterns.stream().anyMatch(pattern -> pattern.matcher(name).find()); }
@Test void wildcard_name_pattern_matches_part_of_name() { Pickle pickle = createPickleWithName("a pickle name"); NamePredicate predicate = new NamePredicate(singletonList(Pattern.compile("a .* name"))); assertTrue(predicate.test(pickle)); }
@Udf public Integer abs(@UdfParameter final Integer val) { return (val == null) ? null : Math.abs(val); }
@Test public void shouldHandleNegative() { assertThat(udf.abs(-1), is(1)); assertThat(udf.abs(-1L), is(1L)); assertThat(udf.abs(-1.0), is(1.0)); assertThat(udf.abs(new BigDecimal(-1)), is(new BigDecimal(-1).abs())); }
public void upgrade() { final Set<MigratableRole> migratableRoles = findMigratableRoles(); migratableRoles.forEach(migratableRole -> { final Role role = migratableRole.role; final Set<String> migratedPermissions = migrateRoleToGrant(migratableRole); if (role.getPermissions().removeAll(migratedPermissions)) { LOG.debug("Updating role <{}> new permissions: <{}>", role.getName(), role.getPermissions()); if (role.getPermissions().isEmpty()) { LOG.info("Removing the now empty role <{}>", role.getName()); userService.dissociateAllUsersFromRole(role); roleService.delete(role.getName()); } else { try { roleService.save(role); } catch (ValidationException e) { LOG.error("Failed to update modified role <{}>", role.getName(), e); } } } }); }
@Test public void partlyMigratableRole() throws NotFoundException { final User testuser4 = userService.load("testuser3"); assertThat(testuser4).isNotNull(); assertThat(roleService.load("partly-migratable-role")).satisfies(role -> { assertThat(role.getPermissions().size()).isEqualTo(5); }); assertThat(dbGrantService.getForGranteesOrGlobal(ImmutableSet.of(grnRegistry.ofUser(testuser4)))).isEmpty(); migration.upgrade(); assertThat(roleService.load("partly-migratable-role")).satisfies(role -> { assertThat(role.getPermissions().size()).isEqualTo(3); }); assertThat(dbGrantService.getForGranteesOrGlobal(ImmutableSet.of(grnRegistry.ofUser(testuser4)))).isEmpty(); }
@Override protected WritableByteChannel create(HadoopResourceId resourceId, CreateOptions createOptions) throws IOException { return Channels.newChannel( resourceId.toPath().getFileSystem(configuration).create(resourceId.toPath())); }
@Test @Ignore("TestPipeline needs a way to take in HadoopFileSystemOptions") public void testReadPipeline() throws Exception { create("testFileA", "testDataA".getBytes(StandardCharsets.UTF_8)); create("testFileB", "testDataB".getBytes(StandardCharsets.UTF_8)); create("testFileC", "testDataC".getBytes(StandardCharsets.UTF_8)); HadoopFileSystemOptions options = TestPipeline.testingPipelineOptions().as(HadoopFileSystemOptions.class); options.setHdfsConfiguration(ImmutableList.of(fileSystem.configuration)); FileSystems.setDefaultPipelineOptions(options); PCollection<String> pc = p.apply(TextIO.read().from(testPath("testFile*").toString())); PAssert.that(pc).containsInAnyOrder("testDataA", "testDataB", "testDataC"); p.run(); }
@Udf public int instr(final String str, final String substring) { return instr(str, substring, 1); }
@Test public void shouldExtractFromStartForPositivePositions() { assertThat(udf.instr("CORPORATE FLOOR", "OR"), is(2)); assertThat(udf.instr("CORPORATE FLOOR", "OR", 3), is(5)); assertThat(udf.instr("CORPORATE FLOOR", "OR", 3, 2), is(14)); assertThat(udf.instr("CORPORATE FLOOR", "OR", 3, 5), is(0)); assertThat(udf.instr("CORPORATE FLOOR", "ATE"), is(7)); assertThat(udf.instr("CORPORATE FLOOR", "ATE", 2), is(7)); assertThat(udf.instr("CORPORATE FLOOR", "ATE", 3, 2), is(0)); }
public static String[] getAllNames() { String[] glyphNames = new String[NUMBER_OF_MAC_GLYPHS]; System.arraycopy(MAC_GLYPH_NAMES, 0, glyphNames, 0, NUMBER_OF_MAC_GLYPHS); return glyphNames; }
@Test void testAllNames() { String[] allNames = WGL4Names.getAllNames(); assertNotNull(allNames); assertEquals(WGL4Names.NUMBER_OF_MAC_GLYPHS, allNames.length); }
RunMapperResult mapRun(Run run) { if (run.getResults().isEmpty()) { return new RunMapperResult(); } String driverName = getToolDriverName(run); Map<String, Result.Level> ruleSeveritiesByRuleId = detectRulesSeverities(run, driverName); Map<String, Result.Level> ruleSeveritiesByRuleIdForNewCCT = detectRulesSeveritiesForNewTaxonomy(run, driverName); return new RunMapperResult() .newAdHocRules(toNewAdHocRules(run, driverName, ruleSeveritiesByRuleId, ruleSeveritiesByRuleIdForNewCCT)) .newExternalIssues(toNewExternalIssues(run, driverName, ruleSeveritiesByRuleId, ruleSeveritiesByRuleIdForNewCCT)); }
@Test public void mapRun_failsIfDriverNotSet() { when(run.getTool().getDriver()).thenReturn(null); assertThatIllegalArgumentException() .isThrownBy(() -> runMapper.mapRun(run)) .withMessage("The run does not have a tool driver name defined."); }
public Map<PluginClassLoaderDef, ClassLoader> create(Map<PluginClassLoaderDef, ClassLoader> previouslyCreatedClassloaders, Collection<PluginClassLoaderDef> newDefs) { ClassLoader baseClassLoader = baseClassLoader(); Collection<PluginClassLoaderDef> allDefs = new HashSet<>(); allDefs.addAll(newDefs); allDefs.addAll(previouslyCreatedClassloaders.keySet()); ClassloaderBuilder builder = new ClassloaderBuilder(previouslyCreatedClassloaders.values()); builder.newClassloader(API_CLASSLOADER_KEY, baseClassLoader); builder.setMask(API_CLASSLOADER_KEY, apiMask()); for (PluginClassLoaderDef def : newDefs) { builder.newClassloader(def.getBasePluginKey()); builder.setParent(def.getBasePluginKey(), API_CLASSLOADER_KEY, Mask.ALL); builder.setLoadingOrder(def.getBasePluginKey(), def.isSelfFirstStrategy() ? SELF_FIRST : PARENT_FIRST); for (File jar : def.getFiles()) { builder.addURL(def.getBasePluginKey(), fileToUrl(jar)); } exportResources(def, builder, allDefs); } return build(newDefs, builder); }
@Test public void classloader_exports_resources_to_other_classloaders() { PluginClassLoaderDef baseDef = basePluginDef(); PluginClassLoaderDef dependentDef = dependentPluginDef(); Map<PluginClassLoaderDef, ClassLoader> map = factory.create(emptyMap(), asList(baseDef, dependentDef)); ClassLoader baseClassloader = map.get(baseDef); ClassLoader dependentClassloader = map.get(dependentDef); // base-plugin exports its API package to other plugins assertThat(canLoadClass(dependentClassloader, "org.sonar.plugins.base.api.BaseApi")).isTrue(); assertThat(canLoadClass(dependentClassloader, BASE_PLUGIN_CLASSNAME)).isFalse(); assertThat(canLoadClass(dependentClassloader, DEPENDENT_PLUGIN_CLASSNAME)).isTrue(); // dependent-plugin does not export its classes assertThat(canLoadClass(baseClassloader, DEPENDENT_PLUGIN_CLASSNAME)).isFalse(); assertThat(canLoadClass(baseClassloader, BASE_PLUGIN_CLASSNAME)).isTrue(); }
public static String findBucketInHostname(final Host host) { if(StringUtils.isBlank(host.getProtocol().getDefaultHostname())) { if(log.isDebugEnabled()) { log.debug(String.format("No default hostname set in %s", host.getProtocol())); } return null; } final String hostname = host.getHostname(); if(hostname.equals(host.getProtocol().getDefaultHostname())) { return null; } if(hostname.endsWith(host.getProtocol().getDefaultHostname())) { if(log.isDebugEnabled()) { log.debug(String.format("Find bucket name in %s", hostname)); } return ServiceUtils.findBucketNameInHostname(hostname, host.getProtocol().getDefaultHostname()); } return null; }
@Test public void testGetBucket() { assertEquals("bucketname", RequestEntityRestStorageService.findBucketInHostname(new Host(new S3Protocol(), "bucketname.s3.amazonaws.com"))); assertNull(RequestEntityRestStorageService.findBucketInHostname(new Host(new TestProtocol(), "bucketname.s3.amazonaws.com"))); }
public static String getOnlineNodePath(final InstanceType instanceType) { return String.join("/", "", ROOT_NODE, COMPUTE_NODE, ONLINE_NODE, instanceType.name().toLowerCase()); }
@Test void assertGetOnlineNodePath() { assertThat(ComputeNode.getOnlineNodePath(InstanceType.PROXY), is("/nodes/compute_nodes/online/proxy")); assertThat(ComputeNode.getOnlineNodePath(InstanceType.JDBC), is("/nodes/compute_nodes/online/jdbc")); }
public static DataSource createDataSource(final ModeConfiguration modeConfig) throws SQLException { return createDataSource(DefaultDatabase.LOGIC_NAME, modeConfig); }
@Test void assertCreateDataSourceWithDefaultModeConfigurationForMultipleDataSources() throws SQLException { assertDataSource(ShardingSphereDataSourceFactory.createDataSource(null), DefaultDatabase.LOGIC_NAME); }
@Override public byte[] evaluateResponse(byte[] response) throws SaslException, SaslAuthenticationException { if (response.length == 1 && response[0] == OAuthBearerSaslClient.BYTE_CONTROL_A && errorMessage != null) { log.debug("Received %x01 response from client after it received our error"); throw new SaslAuthenticationException(errorMessage); } errorMessage = null; OAuthBearerClientInitialResponse clientResponse; try { clientResponse = new OAuthBearerClientInitialResponse(response); } catch (SaslException e) { log.debug(e.getMessage()); throw e; } return process(clientResponse.tokenValue(), clientResponse.authorizationId(), clientResponse.extensions()); }
@Test public void authorizationIdNotEqualsAuthenticationId() { assertThrows(SaslAuthenticationException.class, () -> saslServer.evaluateResponse(clientInitialResponse(USER + "x"))); }
public static boolean safeRangeEquals(final Range<Comparable<?>> sourceRange, final Range<Comparable<?>> targetRange) { Class<?> clazz = getRangeTargetNumericType(sourceRange, targetRange); if (null == clazz) { return sourceRange.equals(targetRange); } Range<Comparable<?>> newSourceRange = createTargetNumericTypeRange(sourceRange, clazz); Range<Comparable<?>> newTargetRange = createTargetNumericTypeRange(targetRange, clazz); return newSourceRange.equals(newTargetRange); }
@Test void assertSafeRangeEqualsForFloat() { assertTrue(SafeNumberOperationUtils.safeRangeEquals(Range.greaterThan(1.1F), Range.greaterThan(1.1))); }
@Override public Result responseMessageForCheckConnectionToSCM(String responseBody) { return jsonResultMessageHandler.toResult(responseBody); }
@Test public void shouldBuildSuccessResultFromCheckSCMConnectionResponse() throws Exception { String responseBody = "{\"status\":\"success\",messages=[\"message-one\",\"message-two\"]}"; Result result = messageHandler.responseMessageForCheckConnectionToSCM(responseBody); assertSuccessResult(result, List.of("message-one", "message-two")); }
@ProtoFactory public static MediaType fromString(String tree) { if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType(); Matcher matcher = TREE_PATTERN.matcher(tree); return parseSingleMediaType(tree, matcher, false); }
@Test public void testMultipleParameters() { MediaType mediaType = MediaType.fromString("application/json; charset=UTF-8; param1=value1 ;param2=value2"); assertMediaTypeWithParams(mediaType, "application", "json", new String[]{"charset", "param1", "param2"}, new String[]{"UTF-8", "value1", "value2"}); }
@VisibleForTesting static boolean areProxyPropertiesSet(String protocol) { return PROXY_PROPERTIES.stream() .anyMatch(property -> System.getProperty(protocol + "." + property) != null); }
@Test public void testAreProxyPropertiesSet_httpUserSet() { System.setProperty("http.proxyUser", "user"); Assert.assertTrue(MavenSettingsProxyProvider.areProxyPropertiesSet("http")); Assert.assertFalse(MavenSettingsProxyProvider.areProxyPropertiesSet("https")); }
@Override public int hashCode() { int result = 1; result = 31 * result + Objects.hashCode(username); result = 31 * result + Objects.hashCode(getPasswordValue()); result = 31 * result + Objects.hashCode(getSocketAddress().get()); result = 31 * result + Boolean.hashCode(getNonProxyHostsValue()); result = 31 * result + Objects.hashCode(httpHeaders.get()); result = 31 * result + Objects.hashCode(getType()); result = 31 * result + Long.hashCode(connectTimeoutMillis); return result; }
@Test void differentPasswords() { assertThat(createProxy(ADDRESS_1, PASSWORD_1)).isNotEqualTo(createProxy(ADDRESS_1, PASSWORD_2)); assertThat(createProxy(ADDRESS_1, PASSWORD_1).hashCode()).isNotEqualTo(createProxy(ADDRESS_1, PASSWORD_2).hashCode()); }
@Override public Stream<MappingField> resolveAndValidateFields( boolean isKey, List<MappingField> userFields, Map<String, String> options, InternalSerializationService serializationService ) { if (userFields.isEmpty()) { throw QueryException.error("Column list is required for JSON format"); } return extractFields(userFields, isKey).entrySet().stream() .map(entry -> { QueryPath path = entry.getKey(); if (path.isTopLevel()) { throw QueryException.error("Cannot use '" + path + "' field with JSON serialization"); } return entry.getValue(); }); }
@Test @Parameters({ "true, __key", "false, this" }) public void test_resolveFields(boolean key, String prefix) { Stream<MappingField> fields = INSTANCE.resolveAndValidateFields( key, singletonList(field("field", QueryDataType.INT, prefix + ".field")), emptyMap(), null ); assertThat(fields).containsExactly(field("field", QueryDataType.INT, prefix + ".field")); }
static LocalUri.LocalUriPathComponent getFirstLocalUriPathComponent(LocalUri localUri) { if (localUri.parent() instanceof LocalUri.LocalUriPathComponent) { return getFirstLocalUriPathComponent(localUri.parent()); } else { return localUri instanceof LocalUri.LocalUriPathComponent ? (LocalUri.LocalUriPathComponent) localUri : null; } }
@Test void getFirstLocalUriPathComponent() { String path = "/example/some-id/instances/some-instance-id"; LocalUri parsed = LocalUri.parse(path); LocalUri retrieved = ModelLocalUriId.getFirstLocalUriPathComponent(parsed); LocalUri expected = LocalUri.parse("/example"); assertThat(retrieved).isEqualTo(expected); }
public static ConfigurableResource parseResourceConfigValue(String value) throws AllocationConfigurationException { return parseResourceConfigValue(value, Long.MAX_VALUE); }
@Test public void testParseResourceConfigValue() throws Exception { Resource expected = Resources.createResource(5 * 1024, 2); Resource clusterResource = Resources.createResource(10 * 1024, 4); assertEquals(expected, parseResourceConfigValue("5120 mb 2 vcores").getResource()); assertEquals(expected, parseResourceConfigValue("2 vcores, 5120 mb").getResource()); assertEquals(expected, parseResourceConfigValue("5120 mb, 2 vcores").getResource()); assertEquals(expected, parseResourceConfigValue("2vcores,5120mb").getResource()); assertEquals(expected, parseResourceConfigValue("5120mb,2vcores").getResource()); assertEquals(expected, parseResourceConfigValue("5120mb mb, 2 vcores").getResource()); assertEquals(expected, parseResourceConfigValue("5120 Mb, 2 vCores").getResource()); assertEquals(expected, parseResourceConfigValue(" 5120 mb, 2 vcores ").getResource()); assertEquals(expected, parseResourceConfigValue(" 5120.3 mb, 2.35 vcores ").getResource()); assertEquals(expected, parseResourceConfigValue(" 5120. mb, 2. vcores ").getResource()); assertEquals(expected, parseResourceConfigValue("50% memory, 50% cpu"). getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("50% Memory, 50% CpU"). getResource(clusterResource)); assertEquals(Resources.createResource(5 * 1024, 4), parseResourceConfigValue("50% memory, 100% cpu"). getResource(clusterResource)); assertEquals(Resources.createResource(5 * 1024, 4), parseResourceConfigValue(" 100% cpu, 50% memory"). getResource(clusterResource)); assertEquals(Resources.createResource(5 * 1024, 0), parseResourceConfigValue("50% memory, 0% cpu"). getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("50 % memory, 50 % cpu"). getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("50%memory,50%cpu"). getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue(" 50 % memory, 50 % cpu "). getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("50.% memory, 50.% cpu"). getResource(clusterResource)); assertEquals(Resources.createResource((int)(1024 * 10 * 0.109), 2), parseResourceConfigValue("10.9% memory, 50.6% cpu"). getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("50%").getResource(clusterResource)); Configuration conf = new Configuration(); conf.set(YarnConfiguration.RESOURCE_TYPES, "test1"); ResourceUtils.resetResourceTypes(conf); clusterResource = Resources.createResource(10 * 1024, 4); expected = Resources.createResource(5 * 1024, 2); expected.setResourceValue("test1", Long.MAX_VALUE); assertEquals(expected, parseResourceConfigValue("vcores=2, memory-mb=5120").getResource()); assertEquals(expected, parseResourceConfigValue("memory-mb=5120, vcores=2").getResource()); assertEquals(expected, parseResourceConfigValue("vcores=2,memory-mb=5120").getResource()); assertEquals(expected, parseResourceConfigValue(" vcores = 2 , " + "memory-mb = 5120 ").getResource()); expected.setResourceValue("test1", 0L); assertEquals(expected, parseResourceConfigValue("vcores=2, memory-mb=5120", 0L).getResource()); assertEquals(expected, parseResourceConfigValue("memory-mb=5120, vcores=2", 0L).getResource()); assertEquals(expected, parseResourceConfigValue("vcores=2,memory-mb=5120", 0L).getResource()); assertEquals(expected, parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 ", 0L).getResource()); clusterResource.setResourceValue("test1", 8L); expected.setResourceValue("test1", 4L); assertEquals(expected, parseResourceConfigValue("50%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("vcores=2, memory-mb=5120, " + "test1=4").getResource()); assertEquals(expected, parseResourceConfigValue("test1=4, vcores=2, " + "memory-mb=5120").getResource()); assertEquals(expected, parseResourceConfigValue("memory-mb=5120, test1=4, " + "vcores=2").getResource()); assertEquals(expected, parseResourceConfigValue("vcores=2,memory-mb=5120," + "test1=4").getResource()); assertEquals(expected, parseResourceConfigValue(" vcores = 2 , memory-mb = 5120 , " + "test1 = 4 ").getResource()); expected = Resources.createResource(4 * 1024, 3); expected.setResourceValue("test1", 8L); assertEquals(expected, parseResourceConfigValue("vcores=75%, " + "memory-mb=40%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("memory-mb=40%, " + "vcores=75%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("vcores=75%," + "memory-mb=40%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue(" vcores = 75 % , " + "memory-mb = 40 % ").getResource(clusterResource)); expected.setResourceValue("test1", 4L); assertEquals(expected, parseResourceConfigValue("vcores=75%, memory-mb=40%, " + "test1=50%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("test1=50%, vcores=75%, " + "memory-mb=40%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("memory-mb=40%, test1=50%, " + "vcores=75%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue("vcores=75%,memory-mb=40%," + "test1=50%").getResource(clusterResource)); assertEquals(expected, parseResourceConfigValue(" vcores = 75 % , memory-mb = 40 % , " + "test1 = 50 % ").getResource(clusterResource)); }
@Override public Class<RestMeta> getMetaClass() { return RestMeta.class; }
@Test public void testGetMetaClass() throws Exception { assertEquals( RestMeta.class, consumer.getMetaClass() ); }
@Override public void putAll(Map<UK, UV> map) throws Exception { delegatedState.putAll(map); changeLogger.valueAdded(map, getCurrentNamespace()); }
@Test public void testPutAllRecorded() throws Exception { Map<String, String> map = singletonMap("x", "y"); testRecorded( emptyMap(), state -> state.putAll(map), logger -> assertEquals(map, logger.state)); }
public ConfigPayloadBuilder build(Element configE) { parseConfigName(configE); ConfigPayloadBuilder payloadBuilder = new ConfigPayloadBuilder(configDefinition); for (Element child : XML.getChildren(configE)) { parseElement(child, payloadBuilder, null); } return payloadBuilder; }
@Test void testFailNoNameAttribute() { Element configRoot = getDocument(new StringReader("<config/>")); try { new DomConfigPayloadBuilder(null).build(configRoot); fail("Expected exception for mismatch between def-name and xml name attribute."); } catch (IllegalArgumentException e) { assertEquals("The 'config' element must have a 'name' attribute that matches the name of the config definition", e.getMessage()); } }
@Override public NetworkPolicy networkPolicy(String uid) { checkArgument(!Strings.isNullOrEmpty(uid), ERR_NULL_NETWORK_POLICY_UID); return k8sNetworkPolicyStore.networkPolicy(uid); }
@Test public void testGetNetworkPolicyByUid() { createBasicNetworkPolicies(); assertNotNull("Network policy did not match", target.networkPolicy(NETWORK_POLICY_UID)); assertNull("Network policy did not match", target.networkPolicy(UNKNOWN_UID)); }
@Override public Mysql clone() throws CloneNotSupportedException { final Mysql config = new Mysql(); config.setUser(getUser()); config.setPwd(getPwd()); config.setCmd(getCmd()); return config; }
@Test void testClone() throws CloneNotSupportedException { Mysql cloned = mysql.clone(); assertEquals(mysql.hashCode(), cloned.hashCode()); assertEquals(mysql, cloned); }
public String createNote(String notePath, AuthenticationInfo subject) throws IOException { return createNote(notePath, interpreterSettingManager.getDefaultInterpreterSetting().getName(), subject); }
@Test void testScheduleDisabled() throws InterruptedException, IOException { zConf.setProperty(ConfVars.ZEPPELIN_NOTEBOOK_CRON_ENABLE.getVarName(), "false"); final int timeout = 10; final String everySecondCron = "* * * * * ?"; final CountDownLatch jobsToExecuteCount = new CountDownLatch(5); final String noteId = notebook.createNote("note1", anonymous); executeNewParagraphByCron(noteId, everySecondCron); afterStatusChangedListener = new StatusChangedListener() { @Override public void onStatusChanged(Job<?> job, Status before, Status after) { if (after == Status.FINISHED) { jobsToExecuteCount.countDown(); } } }; // This job should not run because "ZEPPELIN_NOTEBOOK_CRON_ENABLE" is set to false assertFalse(jobsToExecuteCount.await(timeout, TimeUnit.SECONDS)); terminateScheduledNote(noteId); afterStatusChangedListener = null; }
public Parser getParser() { return parser; }
@Test public void parserWithChildParsers() throws Exception { try { TikaConfig config = getConfig("TIKA-1653-norepeat.xml"); CompositeParser cp = (CompositeParser) config.getParser(); List<Parser> parsers = cp.getAllComponentParsers(); Parser p; // Just 2 top level parsers assertEquals(2, parsers.size()); // Should have a CompositeParser with 2 child ones, and // and a wrapped empty parser p = parsers.get(0); assertTrue(p instanceof CompositeParser, p.toString()); assertEquals(2, ((CompositeParser) p).getAllComponentParsers().size()); p = parsers.get(1); assertTrue(p instanceof ParserDecorator, p.toString()); assertEquals(EmptyParser.class, ((ParserDecorator) p).getWrappedParser().getClass()); assertEquals("hello/world", p.getSupportedTypes(null).iterator().next().toString()); } catch (TikaException e) { fail("Unexpected TikaException: " + e); } }
@Override public void contextInitialized(ServletContextEvent sce) { sce.getServletContext().setAttribute(InstrumentedFilter.REGISTRY_ATTRIBUTE, getMetricRegistry()); }
@Test public void injectsTheMetricRegistryIntoTheServletContext() { final ServletContext context = mock(ServletContext.class); final ServletContextEvent event = mock(ServletContextEvent.class); when(event.getServletContext()).thenReturn(context); listener.contextInitialized(event); verify(context).setAttribute("io.dropwizard.metrics.servlet6.InstrumentedFilter.registry", registry); }
public T add(String str) { requireNonNull(str, JVM_OPTION_NOT_NULL_ERROR_MESSAGE); String value = str.trim(); if (isInvalidOption(value)) { throw new IllegalArgumentException("a JVM option can't be empty and must start with '-'"); } checkMandatoryOptionOverwrite(value); options.add(value); return castThis(); }
@Test public void add_throws_NPE_if_argument_is_null() { expectJvmOptionNotNullNPE(() -> underTest.add(null)); }
State getState() { return state; }
@Test public void verify_start_graceful_stop_interrupted_by_hard_stop_cycle() { assertThat(underTest.getState()).isEqualTo(INIT); verifyMoveTo(STARTING); verifyMoveTo(OPERATIONAL); verifyMoveTo(STOPPING); verifyMoveTo(HARD_STOPPING); verifyMoveTo(FINALIZE_STOPPING); verifyMoveTo(STOPPED); }
public Map<String, Member> getServerList() { return Collections.unmodifiableMap(serverList); }
@Test void testGetServerList() { assertEquals(2, serverMemberManager.getServerList().size()); }
@Override public Integer call() throws Exception { super.call(); stdErr("Warning, this functionality is deprecated and will be removed at some point."); String content = IncludeHelperExpander.expand(Files.readString(file), file.getParent()); Flow flow = yamlFlowParser.parse(content, Flow.class); modelValidator.validate(flow); stdOut(content); return 0; }
@SuppressWarnings("deprecation") @Test void run() { ByteArrayOutputStream out = new ByteArrayOutputStream(); System.setOut(new PrintStream(out)); try (ApplicationContext ctx = ApplicationContext.builder().deduceEnvironment(false).start()) { String[] args = { "src/test/resources/helper/flow.yaml" }; Integer call = PicocliRunner.call(FlowExpandCommand.class, ctx, args); assertThat(call, is(0)); assertThat(out.toString(), is( "id: include\n" + "namespace: io.kestra.cli\n" + "\n" + "# The list of tasks\n" + "tasks:\n" + "- id: t1\n" + " type: io.kestra.plugin.core.debug.Return\n" + " format: \"Lorem ipsum dolor sit amet\"\n" + "- id: t2\n" + " type: io.kestra.plugin.core.debug.Return\n" + " format: |\n" + " Lorem ipsum dolor sit amet\n" + " Lorem ipsum dolor sit amet\n" )); } }
public void parse(InputStream stream, ContentHandler handler, Metadata metadata, ParseContext context) throws IOException, SAXException, TikaException { //set OfficeParserConfig if the user hasn't specified one configure(context); // Have the OOXML file processed OOXMLExtractorFactory.parse(stream, handler, metadata, context); }
@Test public void testWordCustomProperties() throws Exception { Metadata metadata = new Metadata(); try (InputStream input = getResourceAsStream( "/test-documents/testWORD_custom_props.docx")) { ContentHandler handler = new BodyContentHandler(-1); ParseContext context = new ParseContext(); context.set(Locale.class, Locale.US); new OOXMLParser().parse(input, handler, metadata, context); } assertEquals("application/vnd.openxmlformats-officedocument.wordprocessingml.document", metadata.get(Metadata.CONTENT_TYPE)); assertEquals("EJ04325S", metadata.get(TikaCoreProperties.CREATOR)); assertEquals("Etienne Jouvin", metadata.get(TikaCoreProperties.MODIFIER)); assertEquals("2011-07-29T16:52:00Z", metadata.get(TikaCoreProperties.CREATED)); assertEquals("2012-01-03T22:14:00Z", metadata.get(TikaCoreProperties.MODIFIED)); assertEquals("Microsoft Office Word", metadata.get(OfficeOpenXMLExtended.APPLICATION)); assertEquals("1", metadata.get(Office.PAGE_COUNT)); assertEquals("2", metadata.get(Office.WORD_COUNT)); assertEquals("My Title", metadata.get(TikaCoreProperties.TITLE)); assertEquals("My Keyword", metadata.get(Office.KEYWORDS)); assertContains("My Keyword", Arrays.asList(metadata.getValues(TikaCoreProperties.SUBJECT))); assertEquals("Normal.dotm", metadata.get(OfficeOpenXMLExtended.TEMPLATE)); assertEquals("My subject", metadata.get(DublinCore.SUBJECT)); assertEquals("EDF-DIT", metadata.get(TikaCoreProperties.PUBLISHER)); assertEquals("true", metadata.get("custom:myCustomBoolean")); assertEquals("3", metadata.get("custom:myCustomNumber")); assertEquals("MyStringValue", metadata.get("custom:MyCustomString")); assertEquals("2010-12-30T23:00:00Z", metadata.get("custom:MyCustomDate")); assertEquals("2010-12-29T22:00:00Z", metadata.get("custom:myCustomSecondDate")); }
@Override public void handle(HttpExchange httpExchange) { try { String requestUri = httpExchange.getRequestURI().toString(); requestUri = sanitizeRequestUri(requestUri); final String toServe = requestUri.substring((contextPath + "/").length()); final URL resource = this.getClass().getClassLoader().getResource(rootDir + toServe); if (resource != null) { httpExchange.getResponseHeaders().add(ContentType._HEADER_NAME, ContentType.from(toServe)); httpExchange.getResponseHeaders().add("Access-Control-Allow-Origin", "*"); httpExchange.sendResponseHeaders(200, 0); copyResourceToResponseBody(resource, httpExchange); } else { httpExchange.sendResponseHeaders(404, -1); } } catch (Exception shouldNotHappen) { LOGGER.error("Error serving static files", shouldNotHappen); } }
@Test void servesRequestedFile() throws IOException { when(httpExchange.getRequestURI()).thenReturn(URI.create("/dashboard/test.html")); staticFileHttpHandler.handle(httpExchange); verify(httpExchange).sendResponseHeaders(200, 0); }
@Override protected SchemaTransform from(BigQueryExportReadSchemaTransformConfiguration configuration) { return new BigQueryExportSchemaTransform(configuration); }
@Test public void testInvalidConfiguration() { BigQueryExportReadSchemaTransformProvider provider = new BigQueryExportReadSchemaTransformProvider(); for (Pair< BigQueryExportReadSchemaTransformConfiguration.Builder, ? extends Class<? extends RuntimeException>> caze : Arrays.asList( Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder(), IllegalArgumentException.class), Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder() .setQuery(QUERY) .setTableSpec(TABLE_SPEC), IllegalStateException.class), Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder() .setQueryLocation(LOCATION), IllegalArgumentException.class), Pair.of( BigQueryExportReadSchemaTransformConfiguration.builder() .setUseStandardSql(true), IllegalArgumentException.class))) { BigQueryExportSchemaTransform schemaTransform = (BigQueryExportSchemaTransform) provider.from(caze.getLeft().build()); schemaTransform.setTestBigQueryServices(fakeBigQueryServices); PCollectionRowTuple empty = PCollectionRowTuple.empty(p); assertThrows(caze.getRight(), () -> empty.apply(schemaTransform)); } }
public Set<HttpMethod> allowedMethods(String uri) { QueryStringDecoder decoder = new QueryStringDecoder(uri); String[] tokens = PathPattern.removeSlashesAtBothEnds(decoder.path()).split("/"); if (anyMethodRouter.anyMatched(tokens)) { return allAllowedMethods(); } Set<HttpMethod> ret = new HashSet<HttpMethod>(routers.size()); for (Map.Entry<HttpMethod, MethodlessRouter<T>> entry : routers.entrySet()) { MethodlessRouter<T> router = entry.getValue(); if (router.anyMatched(tokens)) { HttpMethod method = entry.getKey(); ret.add(method); } } return ret; }
@Test void testAllowedMethods() { assertThat(router.allAllowedMethods()).hasSize(9); Set<HttpMethod> methods = router.allowedMethods("/articles"); assertThat(methods).hasSize(2); assertThat(methods.contains(GET)).isTrue(); assertThat(methods.contains(POST)).isTrue(); }
@SuppressWarnings("unchecked") public static KiePMMLModelFactory loadKiePMMLModelFactory(ModelLocalUriId modelLocalUriId, PMMLContext pmmlContext) { Optional<GeneratedExecutableResource> generatedExecutableResource = getGeneratedExecutableResource(modelLocalUriId, pmmlContext.getGeneratedResourcesMap()); if (generatedExecutableResource.isPresent()) { return loadKiePMMLModelFactory(generatedExecutableResource.get(), pmmlContext); } else { throw new KieRuntimeServiceException("Can not find expected GeneratedExecutableResource " + "for " + modelLocalUriId); } }
@Test void loadNotExistingKiePMMLModelFactory() { try { PMMLLoaderUtils.loadKiePMMLModelFactory(new ModelLocalUriId(LocalUri.parse("/notpmml/" + basePath)), getPMMLContext(FILE_NAME, MODEL_NAME)); fail("Expecting KieRuntimeServiceException"); } catch (Exception e) { assertThat(e).isInstanceOf(KieRuntimeServiceException.class); } }
@Override public PipelineChannel newInstance(final int importerBatchSize, final PipelineChannelAckCallback ackCallback) { int queueSize = this.queueSize / importerBatchSize; return new MemoryPipelineChannel(queueSize, ackCallback); }
@Test void assertInitWithZeroBlockQueueSize() throws Exception { PipelineChannelCreator creator = TypedSPILoader.getService(PipelineChannelCreator.class, "MEMORY", PropertiesBuilder.build(new Property("block-queue-size", "0"))); assertThat(Plugins.getMemberAccessor().get(MemoryPipelineChannelCreator.class.getDeclaredField("queueSize"), creator), is(0)); PipelineChannel channel = creator.newInstance(1000, new InventoryTaskAckCallback(new AtomicReference<>())); assertInstanceOf(SynchronousQueue.class, Plugins.getMemberAccessor().get(MemoryPipelineChannel.class.getDeclaredField("queue"), channel)); }
@Override public int compareTo(PluginSettingsProperty o) { return this.getOption(DISPLAY_ORDER) - o.getOption(DISPLAY_ORDER); }
@Test public void shouldCompareTwoPropertiesBasedOnOrder() { PluginSettingsProperty p1 = createProperty("Test-Property", 1); PluginSettingsProperty p2 = createProperty("Test-Property", 0); assertThat(p1.compareTo(p2), is(1)); }
@Override public Statistics getTableStatistics(OptimizerContext session, Table table, Map<ColumnRefOperator, Column> columns, List<PartitionKey> partitionKeys, ScalarOperator predicate, long limit, TableVersionRange version) { IcebergTable icebergTable = (IcebergTable) table; long snapshotId; if (version.end().isPresent()) { snapshotId = version.end().get(); } else { Statistics.Builder statisticsBuilder = Statistics.builder(); statisticsBuilder.setOutputRowCount(1); statisticsBuilder.addColumnStatistics(statisticProvider.buildUnknownColumnStatistics(columns.keySet())); return statisticsBuilder.build(); } PredicateSearchKey key = PredicateSearchKey.of( icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), snapshotId, predicate); triggerIcebergPlanFilesIfNeeded(key, icebergTable, predicate, limit); if (!session.getSessionVariable().enableIcebergColumnStatistics()) { List<FileScanTask> icebergScanTasks = splitTasks.get(key); if (icebergScanTasks == null) { throw new StarRocksConnectorException("Missing iceberg split task for table:[{}.{}]. predicate:[{}]", icebergTable.getRemoteDbName(), icebergTable.getRemoteTableName(), predicate); } try (Timer ignored = Tracers.watchScope(EXTERNAL, "ICEBERG.calculateCardinality" + key)) { return statisticProvider.getCardinalityStats(columns, icebergScanTasks); } } else { return statisticProvider.getTableStatistics(icebergTable, columns, session, predicate, version); } }
@Test public void testGetRepeatedTableStats() { IcebergHiveCatalog icebergHiveCatalog = new IcebergHiveCatalog(CATALOG_NAME, new Configuration(), DEFAULT_CONFIG); IcebergTable icebergTable = new IcebergTable(1, "srTableName", CATALOG_NAME, "resource_name", "db_name", "table_name", "", Lists.newArrayList(), mockedNativeTableA, Maps.newHashMap()); IcebergMetadata metadata = new IcebergMetadata(CATALOG_NAME, HDFS_ENVIRONMENT, icebergHiveCatalog, Executors.newSingleThreadExecutor(), Executors.newSingleThreadExecutor(), null); Map<ColumnRefOperator, Column> colRefToColumnMetaMap = new HashMap<ColumnRefOperator, Column>(); ColumnRefOperator columnRefOperator1 = new ColumnRefOperator(3, Type.INT, "id", true); ColumnRefOperator columnRefOperator2 = new ColumnRefOperator(4, Type.STRING, "data", true); colRefToColumnMetaMap.put(columnRefOperator1, new Column("id", Type.INT)); colRefToColumnMetaMap.put(columnRefOperator2, new Column("data", Type.STRING)); mockedNativeTableA.newFastAppend().appendFile(FILE_A).commit(); mockedNativeTableA.newFastAppend().appendFile(FILE_A).commit(); mockedNativeTableA.refresh(); new ConnectContext().setThreadLocalInfo(); OptimizerContext context = new OptimizerContext(new Memo(), new ColumnRefFactory(), ConnectContext.get()); context.getSessionVariable().setEnableIcebergColumnStatistics(true); TableVersionRange version = TableVersionRange.withEnd(Optional.of( mockedNativeTableA.currentSnapshot().snapshotId())); Statistics statistics = metadata.getTableStatistics(context, icebergTable, colRefToColumnMetaMap, null, null, -1, version); Assert.assertEquals(2.0, statistics.getOutputRowCount(), 0.001); }
@Override public void check(final SQLStatement sqlStatement) { ShardingSpherePreconditions.checkState(judgeEngine.isSupported(sqlStatement), () -> new ClusterStateException(getType(), sqlStatement)); }
@Test void assertExecuteWithUnsupportedSQL() { assertThrows(ClusterStateException.class, () -> new UnavailableProxyState().check(mock(DMLStatement.class))); }
public long add(T item) { final long nextSequence = ringbuffer.peekNextTailSequence(); if (store.isEnabled()) { try { store.store(nextSequence, convertToData(item)); } catch (Exception e) { throw new HazelcastException(e); } } final long storedSequence = addInternal(item); if (storedSequence != nextSequence) { throw new IllegalStateException("Sequence we stored the item with and Ringbuffer sequence differs. Was the " + "Ringbuffer mutated from multiple threads?"); } return storedSequence; }
@Test public void add() { RingbufferConfig config = new RingbufferConfig("foo").setCapacity(10); RingbufferContainer<Data, Data> ringbuffer = getRingbufferContainer(config); ringbuffer.add(toData("foo")); ringbuffer.add(toData("bar")); assertEquals(1, ringbuffer.tailSequence()); assertEquals(0, ringbuffer.headSequence()); }
@Override public void doFilter(HttpRequest request, HttpResponse response, FilterChain chain) { IdentityProvider provider = resolveProviderOrHandleResponse(request, response, INIT_CONTEXT); if (provider != null) { handleProvider(request, response, provider); } }
@Test public void do_filter_with_context() { when(request.getContextPath()).thenReturn("/sonarqube"); when(request.getRequestURI()).thenReturn("/sonarqube/sessions/init/" + OAUTH2_PROVIDER_KEY); identityProviderRepository.addIdentityProvider(oAuth2IdentityProvider); underTest.doFilter(request, response, chain); assertOAuth2InitCalled(); verifyNoInteractions(authenticationEvent); }
@Override public void onRenamed(Item item, String oldName, String newName) { // bug 5077308 - Display name field should be cleared when you rename a job. if (item instanceof AbstractItem) { AbstractItem abstractItem = (AbstractItem) item; if (oldName.equals(abstractItem.getDisplayName())) { // the user renamed the job, but the old project name which is shown as the // displayname if no displayname was set, has been set into the displayname field. // This means that the displayname was never set, so we want to set it // to null as it was before try { LOGGER.info(String.format("onRenamed():Setting displayname to null for item.name=%s", item.getName())); abstractItem.setDisplayName(null); } catch (IOException ioe) { LOGGER.log(Level.WARNING, String.format("onRenamed():Exception while trying to clear the displayName for Item.name:%s", item.getName()), ioe); } } } }
@Test public void testOnRenamedOldNameEqualsDisplayName() throws Exception { DisplayNameListener listener = new DisplayNameListener(); final String oldName = "old job name"; final String newName = "new job name"; StubJob src = new StubJob(); src.doSetName(newName); src.setDisplayName(oldName); listener.onRenamed(src, oldName, newName); assertEquals(newName, src.getDisplayName()); }
public void submit(final String selectorId, final List<Upstream> upstreamList) { List<Upstream> validUpstreamList = upstreamList.stream().filter(Upstream::isStatus).collect(Collectors.toList()); List<Upstream> existUpstream = MapUtils.computeIfAbsent(UPSTREAM_MAP, selectorId, k -> Lists.newArrayList()); existUpstream.stream().filter(upstream -> !validUpstreamList.contains(upstream)) .forEach(upstream -> task.triggerRemoveOne(selectorId, upstream)); validUpstreamList.stream().filter(upstream -> !existUpstream.contains(upstream)) .forEach(upstream -> task.triggerAddOne(selectorId, upstream)); UPSTREAM_MAP.put(selectorId, validUpstreamList); }
@Test @Order(2) public void submitTest() { final UpstreamCacheManager upstreamCacheManager = UpstreamCacheManager.getInstance(); List<Upstream> upstreamList = new ArrayList<>(2); upstreamCacheManager.submit(SELECTOR_ID, upstreamList); upstreamList.add(Upstream.builder().url("url").status(true).build()); upstreamList.add(Upstream.builder().status(true).build()); upstreamCacheManager.submit(SELECTOR_ID, upstreamList); // hit `existUpstream.stream().filter` upstreamList.clear(); upstreamList.add(Upstream.builder().url("url2").status(true).build()); upstreamList.add(Upstream.builder().url("url").status(true).build()); upstreamCacheManager.submit(SELECTOR_ID, upstreamList); }
@Override public void write(final MySQLPacketPayload payload, final Object value) { if (value instanceof BigDecimal) { payload.writeInt8(((BigDecimal) value).longValue()); } else if (value instanceof Integer) { payload.writeInt8(((Integer) value).longValue()); } else if (value instanceof BigInteger) { payload.writeInt8(((BigInteger) value).longValue()); } else { payload.writeInt8((Long) value); } }
@Test void assertWriteWithLong() { new MySQLInt8BinaryProtocolValue().write(payload, 1L); verify(payload).writeInt8(1L); }
@Override public @Nullable V remove(Object key) { @SuppressWarnings("unchecked") K castKey = (K) key; @SuppressWarnings({"rawtypes", "unchecked"}) Node<K, V>[] node = new Node[1]; @SuppressWarnings("unchecked") V[] oldValue = (V[]) new Object[1]; RemovalCause[] cause = new RemovalCause[1]; Object lookupKey = nodeFactory.newLookupKey(key); data.computeIfPresent(lookupKey, (k, n) -> { synchronized (n) { requireIsAlive(key, n); oldValue[0] = n.getValue(); if (oldValue[0] == null) { cause[0] = RemovalCause.COLLECTED; } else if (hasExpired(n, expirationTicker().read())) { cause[0] = RemovalCause.EXPIRED; } else { cause[0] = RemovalCause.EXPLICIT; } if (cause[0].wasEvicted()) { notifyEviction(castKey, oldValue[0], cause[0]); } discardRefresh(lookupKey); node[0] = n; n.retire(); } return null; }); if (cause[0] != null) { afterWrite(new RemovalTask(node[0])); notifyRemoval(castKey, oldValue[0], cause[0]); } return (cause[0] == RemovalCause.EXPLICIT) ? oldValue[0] : null; }
@CheckMaxLogLevel(ERROR) @Test(dataProvider = "caches") @CacheSpec(population = Population.EMPTY, keys = ReferenceType.STRONG) public void brokenEquality_remove( BoundedLocalCache<MutableInt, Int> cache, CacheContext context) { testForBrokenEquality(cache, context, key -> { var value = cache.remove(key); assertThat(value).isEqualTo(context.absentValue()); }); }
@Override public Set<V> readIntersection(String... names) { return get(readIntersectionAsync(names)); }
@Test public void testReadIntersection() { RSet<Integer> set = redisson.getSet("set"); set.add(5); set.add(7); set.add(6); RSet<Integer> set1 = redisson.getSet("set1"); set1.add(1); set1.add(2); set1.add(5); RSet<Integer> set2 = redisson.getSet("set2"); set2.add(3); set2.add(4); set2.add(5); assertThat(set.readIntersection("set1", "set2")).containsOnly(5); assertThat(set).containsOnly(6, 5, 7); }
@PublicEvolving public static <IN, OUT> TypeInformation<OUT> getMapReturnTypes( MapFunction<IN, OUT> mapInterface, TypeInformation<IN> inType) { return getMapReturnTypes(mapInterface, inType, null, false); }
@SuppressWarnings({"unchecked", "rawtypes"}) @Test void testSameGenericVariable() { RichMapFunction<?, ?> function = new RichMapFunction<SameTypeVariable<String>, SameTypeVariable<String>>() { private static final long serialVersionUID = 1L; @Override public SameTypeVariable<String> map(SameTypeVariable<String> value) throws Exception { return null; } }; TypeInformation<?> ti = TypeExtractor.getMapReturnTypes( function, (TypeInformation) TypeInformation.of(new TypeHint<Tuple2<String, String>>() {})); assertThat(ti.isTupleType()).isTrue(); assertThat(ti.getArity()).isEqualTo(2); TupleTypeInfo<?> tti = (TupleTypeInfo<?>) ti; assertThat(tti.getTypeClass()).isEqualTo(SameTypeVariable.class); assertThat(tti.getTypeAt(0)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); assertThat(tti.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); }
public static TreeMap<Integer, List<BufferIndexAndChannel>> getBuffersByConsumptionPriorityInOrder( List<Integer> nextBufferIndexToConsume, TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionToAllBuffers, int expectedSize) { if (expectedSize <= 0) { return new TreeMap<>(); } PriorityQueue<BufferConsumptionPriorityIterator> heap = new PriorityQueue<>(); subpartitionToAllBuffers.forEach( (subpartitionId, buffers) -> { if (!buffers.isEmpty()) { heap.add( new BufferConsumptionPriorityIterator( buffers, nextBufferIndexToConsume.get(subpartitionId))); } }); TreeMap<Integer, List<BufferIndexAndChannel>> subpartitionToHighPriorityBuffers = new TreeMap<>(); for (int i = 0; i < expectedSize; i++) { if (heap.isEmpty()) { break; } BufferConsumptionPriorityIterator bufferConsumptionPriorityIterator = heap.poll(); BufferIndexAndChannel bufferIndexAndChannel = bufferConsumptionPriorityIterator.next(); subpartitionToHighPriorityBuffers .computeIfAbsent(bufferIndexAndChannel.getChannel(), k -> new ArrayList<>()) .add(bufferIndexAndChannel); // if this iterator has next, re-added it. if (bufferConsumptionPriorityIterator.hasNext()) { heap.add(bufferConsumptionPriorityIterator); } } // treeMap will ensure that the key are sorted by subpartitionId // ascending. Within the same subpartition, the larger the bufferIndex, // the higher the consumption priority, reserve the value so that buffers are ordered // by (subpartitionId, bufferIndex) ascending. subpartitionToHighPriorityBuffers.values().forEach(Collections::reverse); return subpartitionToHighPriorityBuffers; }
@Test void testGetBuffersByConsumptionPriorityInOrderEmptyExpectedSize() { TreeMap<Integer, Deque<BufferIndexAndChannel>> subpartitionToAllBuffers = new TreeMap<>(); subpartitionToAllBuffers.put(0, createBufferIndexAndChannelsDeque(0, 0, 1)); subpartitionToAllBuffers.put(1, createBufferIndexAndChannelsDeque(1, 2, 4)); TreeMap<Integer, List<BufferIndexAndChannel>> buffersByConsumptionPriorityInOrder = HsSpillingStrategyUtils.getBuffersByConsumptionPriorityInOrder( Arrays.asList(0, 1), subpartitionToAllBuffers, 0); assertThat(buffersByConsumptionPriorityInOrder).isEmpty(); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComQueryPacket() { when(payload.readStringEOF()).thenReturn("SHOW TABLES"); assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_QUERY, payload, connectionSession), instanceOf(MySQLComQueryPacket.class)); }
@Bean public ShenyuContextDecorator divideShenyuContextDecorator() { return new DivideShenyuContextDecorator(); }
@Test public void testDivideShenyuContextDecorator() { applicationContextRunner.run(context -> { ShenyuContextDecorator decorator = context.getBean("divideShenyuContextDecorator", ShenyuContextDecorator.class); assertNotNull(decorator); } ); }
@Override public List<Calificacion> getCalificacionesByHotelId(String hotelId) { return calificacionRepository.findByHotelId(hotelId); }
@Test void testGetCalificacionesByHotelId() { when(calificacionRepository.findByHotelId("hotel1")).thenReturn(Arrays.asList(calificacion1, calificacion3)); List<Calificacion> calificaciones = calificacionService.getCalificacionesByHotelId("hotel1"); assertThat(calificaciones).hasSize(2); assertThat(calificaciones).contains(calificacion1, calificacion3); verify(calificacionRepository, times(1)).findByHotelId("hotel1"); }
@Override public String toString(final RouteUnit routeUnit) { if (null != ownerName && !Strings.isNullOrEmpty(ownerName.getValue()) && tableName.getValue().equals(ownerName.getValue())) { Set<String> actualTableNames = routeUnit.getActualTableNames(tableName.getValue()); String actualTableName = actualTableNames.isEmpty() ? tableName.getValue().toLowerCase() : actualTableNames.iterator().next(); return tableName.getQuoteCharacter().wrap(actualTableName) + "."; } return toString(); }
@Test void assertOwnerTokenWithNoRouteUnitAndOwnerNameIsEmpty() { OwnerToken ownerToken = new OwnerToken(0, 1, null, new IdentifierValue("t_user_detail")); assertThat(ownerToken.toString(), is("")); assertTokenGrid(ownerToken); }
public static TopicMessageType getMessageType(SendMessageRequestHeader requestHeader) { Map<String, String> properties = MessageDecoder.string2messageProperties(requestHeader.getProperties()); String traFlag = properties.get(MessageConst.PROPERTY_TRANSACTION_PREPARED); TopicMessageType topicMessageType = TopicMessageType.NORMAL; if (Boolean.parseBoolean(traFlag)) { topicMessageType = TopicMessageType.TRANSACTION; } else if (properties.containsKey(MessageConst.PROPERTY_SHARDING_KEY)) { topicMessageType = TopicMessageType.FIFO; } else if (properties.get("__STARTDELIVERTIME") != null || properties.get(MessageConst.PROPERTY_DELAY_TIME_LEVEL) != null || properties.get(MessageConst.PROPERTY_TIMER_DELIVER_MS) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_SEC) != null || properties.get(MessageConst.PROPERTY_TIMER_DELAY_MS) != null) { topicMessageType = TopicMessageType.DELAY; } return topicMessageType; }
@Test public void testGetMessageTypeWithUnknownProperty() { SendMessageRequestHeader requestHeader = new SendMessageRequestHeader(); Map<String, String> map = new HashMap<>(); map.put("unknownProperty", "unknownValue"); requestHeader.setProperties(MessageDecoder.messageProperties2String(map)); TopicMessageType result = BrokerMetricsManager.getMessageType(requestHeader); assertThat(TopicMessageType.NORMAL).isEqualTo(result); }
public void write(CruiseConfig configForEdit, OutputStream output, boolean skipPreprocessingAndValidation) throws Exception { LOGGER.debug("[Serializing Config] Starting to write. Validation skipped? {}", skipPreprocessingAndValidation); MagicalGoConfigXmlLoader loader = new MagicalGoConfigXmlLoader(configCache, registry); if (!configForEdit.getOrigin().isLocal()) { throw new GoConfigInvalidException(configForEdit, "Attempted to save merged configuration with partials"); } if (!skipPreprocessingAndValidation) { loader.preprocessAndValidate(configForEdit); LOGGER.debug("[Serializing Config] Done with cruise config validators."); } Document document = createEmptyCruiseConfigDocument(); write(configForEdit, document.getRootElement(), configCache, registry); LOGGER.debug("[Serializing Config] XSD and DOM validation."); verifyXsdValid(document); MagicalGoConfigXmlLoader.validateDom(document.getRootElement(), registry); LOGGER.info("[Serializing Config] Generating config partial."); XmlUtils.writeXml(document, output); LOGGER.debug("[Serializing Config] Finished writing config partial."); }
@Test @Timeout(1) public void shouldValidateLeadingAndTrailingSpacesOnExecCommandInReasonableTime() throws Exception { // See https://github.com/gocd/gocd/issues/3551 // This is only reproducible on longish strings, so don't try shortening the exec task length... String longPath = StringUtils.repeat("f", 100); CruiseConfig config = GoConfigMother.configWithPipelines("pipeline1"); config.initializeServer(); config.findJob("pipeline1", "stage", "job").addTask(new ExecTask(longPath + " ", "arg1", (String) null)); output = new ByteArrayOutputStream(); try { xmlWriter.write(config, output, false); fail("expected to blow up"); } catch (XsdValidationException e) { assertThat(e.getMessage(), containsString("should conform to the pattern - \\S(.*\\S)?")); } }
@SneakyThrows public static Integer getAreaId(String ip) { return Integer.parseInt(SEARCHER.search(ip.trim())); }
@Test public void testGetAreaId_string() { // 120.202.4.0|120.202.4.255|420600 Integer areaId = IPUtils.getAreaId("120.202.4.50"); assertEquals(420600, areaId); }
@Override public MySqlConnectorEmbeddedDebeziumConfiguration getConfiguration() { return configuration; }
@Test void testIfMySqlEndpointCreatedWithConfig() throws Exception { final Map<String, Object> params = new HashMap<>(); params.put("offsetStorageFileName", "/offset_test_file"); params.put("databaseHostname", "localhost"); params.put("databaseUser", "dbz"); params.put("databasePassword", "pwd"); params.put("topicPrefix", "test"); params.put("databaseServerId", "1234"); params.put("schemaHistoryInternalFileFilename", "/db_history_file_test"); params.put("additionalProperties.database.connectionTimeZone", "CET"); final String remaining = "test_name"; final String uri = "debezium:mysql?name=test_name&offsetStorageFileName=/test&" + "topicPrefix=localhost&databaseServerId=1234&databaseUser=dbz&databasePassword=pwd&" + "databaseServerName=test&schemaHistoryInternalFileFilename=/test"; try (final DebeziumComponent debeziumComponent = new DebeziumMySqlComponent(new DefaultCamelContext())) { debeziumComponent.start(); final DebeziumEndpoint debeziumEndpoint = debeziumComponent.createEndpoint(uri, remaining, params); assertNotNull(debeziumEndpoint); // test for config final MySqlConnectorEmbeddedDebeziumConfiguration configuration = (MySqlConnectorEmbeddedDebeziumConfiguration) debeziumEndpoint.getConfiguration(); assertEquals("test_name", configuration.getName()); assertEquals("/offset_test_file", configuration.getOffsetStorageFileName()); assertEquals("localhost", configuration.getDatabaseHostname()); assertEquals("dbz", configuration.getDatabaseUser()); assertEquals("pwd", configuration.getDatabasePassword()); assertEquals("test", configuration.getTopicPrefix()); assertEquals(1234L, configuration.getDatabaseServerId()); assertEquals("/db_history_file_test", configuration.getSchemaHistoryInternalFileFilename()); assertEquals("CET", configuration.getAdditionalProperties().get("database.connectionTimeZone")); } }
public static Set<String> getStringSet(String property, JsonNode node) { Preconditions.checkArgument(node.has(property), "Cannot parse missing set: %s", property); return ImmutableSet.<String>builder() .addAll(new JsonStringArrayIterator(property, node)) .build(); }
@Test public void getStringSet() throws JsonProcessingException { assertThatThrownBy(() -> JsonUtil.getStringSet("items", JsonUtil.mapper().readTree("{}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse missing set: items"); assertThatThrownBy( () -> JsonUtil.getStringSet("items", JsonUtil.mapper().readTree("{\"items\": null}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse JSON array from non-array value: items: null"); assertThatThrownBy( () -> JsonUtil.getStringSet( "items", JsonUtil.mapper().readTree("{\"items\": [\"23\", 45]}"))) .isInstanceOf(IllegalArgumentException.class) .hasMessage("Cannot parse string from non-text value in items: 45"); assertThat( JsonUtil.getStringSet( "items", JsonUtil.mapper().readTree("{\"items\": [\"23\", \"45\"]}"))) .containsExactlyElementsOf(Arrays.asList("23", "45")); }
public static SqlMethodExpr get() { return instance; }
@Test public void testGet() { SqlMethodExpr instance = SqlMethodExpr.get(); assertEquals(instance, SqlMethodExpr.get()); }
public synchronized TopologyConfig topologyConfigs() { return topologyConfigs; }
@Test public void shouldNotOverrideGlobalStreamsConfigWhenGivenUnnamedTopologyProps() { final Properties streamsProps = StreamsTestUtils.getStreamsConfig(); streamsProps.put(StreamsConfig.STATESTORE_CACHE_MAX_BYTES_CONFIG, 12345L); streamsProps.put(StreamsConfig.MAX_TASK_IDLE_MS_CONFIG, 500L); streamsProps.put(StreamsConfig.TASK_TIMEOUT_MS_CONFIG, 1000L); streamsProps.put(StreamsConfig.BUFFERED_RECORDS_PER_PARTITION_CONFIG, 15); streamsProps.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, MockTimestampExtractor.class); streamsProps.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG, LogAndContinueExceptionHandler.class); final StreamsConfig config = new StreamsConfig(streamsProps); final InternalTopologyBuilder topologyBuilder = new InternalTopologyBuilder( new TopologyConfig( "my-topology", config, new Properties()) ); assertThat(topologyBuilder.topologyConfigs().cacheSize, is(12345L)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().maxTaskIdleMs, is(500L)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().taskTimeoutMs, is(1000L)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().maxBufferedSize, is(15)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().timestampExtractor.getClass(), is(MockTimestampExtractor.class)); assertThat(topologyBuilder.topologyConfigs().getTaskConfig().deserializationExceptionHandler.getClass(), is(LogAndContinueExceptionHandler.class)); }
public SearchQuery parse(String encodedQueryString) { if (Strings.isNullOrEmpty(encodedQueryString) || "*".equals(encodedQueryString)) { return new SearchQuery(encodedQueryString); } final var queryString = URLDecoder.decode(encodedQueryString, StandardCharsets.UTF_8); final Matcher matcher = querySplitterMatcher(requireNonNull(queryString).trim()); final ImmutableMultimap.Builder<String, FieldValue> builder = ImmutableMultimap.builder(); final ImmutableSet.Builder<String> disallowedKeys = ImmutableSet.builder(); while (matcher.find()) { final String entry = matcher.group(); if (!entry.contains(":")) { builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), entry, false)); continue; } final Iterator<String> entryFields = FIELD_VALUE_SPLITTER.splitToList(entry).iterator(); checkArgument(entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry); final String key = entryFields.next(); // Skip if there are no valid k/v pairs. (i.e. "action:") if (!entryFields.hasNext()) { continue; } final boolean negate = key.startsWith("-"); final String cleanKey = key.replaceFirst("^-", ""); final String value = entryFields.next(); VALUE_SPLITTER.splitToList(value).forEach(v -> { if (!dbFieldMapping.containsKey(cleanKey)) { disallowedKeys.add(cleanKey); } final SearchQueryField translatedKey = dbFieldMapping.get(cleanKey); if (translatedKey != null) { builder.put(withPrefixIfNeeded(translatedKey.getDbField()), createFieldValue(translatedKey.getFieldType(), v, negate)); } else { builder.put(withPrefixIfNeeded(defaultField), createFieldValue(defaultFieldKey.getFieldType(), v, negate)); } }); checkArgument(!entryFields.hasNext(), INVALID_ENTRY_MESSAGE, entry); } return new SearchQuery(queryString, builder.build(), disallowedKeys.build()); }
@Test void decodeQuery() throws UnsupportedEncodingException { SearchQueryParser parser = new SearchQueryParser("defaultfield", ImmutableSet.of("name", "id")); final String urlEncodedQuery = URLEncoder.encode("name:foo", StandardCharsets.UTF_8.name()); final SearchQuery query = parser.parse(urlEncodedQuery); final Multimap<String, SearchQueryParser.FieldValue> queryMap = query.getQueryMap(); assertThat(queryMap.size()).isEqualTo(1); assertThat(queryMap.get("name")).containsOnly(new SearchQueryParser.FieldValue("foo", false)); assertThat(query.hasDisallowedKeys()).isFalse(); assertThat(query.getDisallowedKeys()).isEmpty(); final DBQuery.Query dbQuery = query.toDBQuery(); final Collection<String> fieldNamesUsed = extractFieldNames(dbQuery.conditions()); assertThat(fieldNamesUsed).containsExactly("name"); }
public static <T, S> T convert(S source, Class<T> clazz) { return Optional.ofNullable(source) .map(each -> BEAN_MAPPER_BUILDER.map(each, clazz)) .orElse(null); }
@Test public void beanToMapConvertTest() { // 测试BeanToMap final Person person = new Person(); person.setName("Hippo4j"); person.setAge(1); person.setAddress("hippo4j.cn"); person.setSize(999); final Map<?, ?> convert = BeanUtil.convert(person, Map.class); Assert.assertEquals("Hippo4j", convert.get("name")); Assert.assertEquals(1, convert.get("age")); Assert.assertEquals("hippo4j.cn", convert.get("address")); Assert.assertEquals(999, convert.get("size")); }
public static void validateMemoryAllocation(Configuration conf) { int minMem = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); int maxMem = conf.getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); if (minMem <= 0 || minMem > maxMem) { throw new YarnRuntimeException("Invalid resource scheduler memory" + " allocation configuration" + ", " + YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB + "=" + minMem + ", " + YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB + "=" + maxMem + ", min and max should be greater than 0" + ", max should be no smaller than min."); } }
@Test (expected = YarnRuntimeException.class) public void testValidateMemoryAllocationInvalidMinMem() { Map<String, String> configs = new HashMap(); configs.put(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, "0"); Configuration config = CapacitySchedulerConfigGeneratorForTest .createConfiguration(configs); CapacitySchedulerConfigValidator.validateMemoryAllocation(config); fail(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB + " should be > 0"); }
public static AggregationFunctionType getStoredType(AggregationFunctionType aggregationType) { switch (aggregationType) { case DISTINCTCOUNTRAWHLL: return AggregationFunctionType.DISTINCTCOUNTHLL; case PERCENTILERAWEST: return AggregationFunctionType.PERCENTILEEST; case PERCENTILERAWTDIGEST: return AggregationFunctionType.PERCENTILETDIGEST; case DISTINCTCOUNTRAWTHETASKETCH: return AggregationFunctionType.DISTINCTCOUNTTHETASKETCH; case DISTINCTCOUNTRAWHLLPLUS: return AggregationFunctionType.DISTINCTCOUNTHLLPLUS; case DISTINCTCOUNTRAWINTEGERSUMTUPLESKETCH: case AVGVALUEINTEGERSUMTUPLESKETCH: case SUMVALUESINTEGERSUMTUPLESKETCH: return AggregationFunctionType.DISTINCTCOUNTTUPLESKETCH; case DISTINCTCOUNTRAWCPCSKETCH: return AggregationFunctionType.DISTINCTCOUNTCPCSKETCH; case DISTINCTCOUNTRAWULL: return AggregationFunctionType.DISTINCTCOUNTULL; default: return aggregationType; } }
@Test public void testGetStoredType() { assertEquals(getStoredType(AggregationFunctionType.DISTINCTCOUNTRAWHLL), AggregationFunctionType.DISTINCTCOUNTHLL); assertEquals(getStoredType(AggregationFunctionType.PERCENTILERAWTDIGEST), AggregationFunctionType.PERCENTILETDIGEST); assertEquals(getStoredType(AggregationFunctionType.DISTINCTCOUNTRAWTHETASKETCH), AggregationFunctionType.DISTINCTCOUNTTHETASKETCH); assertEquals(getStoredType(AggregationFunctionType.DISTINCTCOUNTRAWINTEGERSUMTUPLESKETCH), AggregationFunctionType.DISTINCTCOUNTTUPLESKETCH); assertEquals(getStoredType(AggregationFunctionType.SUMVALUESINTEGERSUMTUPLESKETCH), AggregationFunctionType.DISTINCTCOUNTTUPLESKETCH); assertEquals(getStoredType(AggregationFunctionType.AVGVALUEINTEGERSUMTUPLESKETCH), AggregationFunctionType.DISTINCTCOUNTTUPLESKETCH); assertEquals(getStoredType(AggregationFunctionType.DISTINCTCOUNTHLLPLUS), AggregationFunctionType.DISTINCTCOUNTHLLPLUS); // Default case assertEquals(getStoredType(AggregationFunctionType.COUNT), AggregationFunctionType.COUNT); }
protected void setup() throws IOException { type.assertFull(); if (!ops.doRequiredTopoFilesExist(conf, topologyId)) { LOG.info("Missing topology storm code, so can't launch worker with assignment {} for this supervisor {} on port {} with id {}", assignment, supervisorId, port, workerId); throw new IllegalStateException("Not all needed files are here!!!!"); } LOG.info("Setting up {}:{}", supervisorId, workerId); ops.forceMkdir(new File(ConfigUtils.workerPidsRoot(conf, workerId))); ops.forceMkdir(new File(ConfigUtils.workerTmpRoot(conf, workerId))); ops.forceMkdir(new File(ConfigUtils.workerHeartbeatsRoot(conf, workerId))); File workerArtifacts = new File(ConfigUtils.workerArtifactsRoot(conf, topologyId, port)); if (!ops.fileExists(workerArtifacts)) { ops.forceMkdir(workerArtifacts); ops.setupWorkerArtifactsDir(assignment.get_owner(), workerArtifacts); } String user = getWorkerUser(); writeLogMetadata(user); saveWorkerUser(user); createArtifactsLink(); createBlobstoreLinks(); }
@Test public void testSetup() throws Exception { final int port = 8080; final String topoId = "test_topology"; final String workerId = "worker_id"; final String user = "me"; final String stormLocal = asAbsPath("tmp", "testing"); final File workerArtifacts = asAbsFile(stormLocal, topoId, String.valueOf(port)); final File logMetadataFile = new File(workerArtifacts, "worker.yaml"); final File workerUserFile = asAbsFile(stormLocal, "workers-users", workerId); final File workerRoot = asAbsFile(stormLocal, "workers", workerId); final File distRoot = asAbsFile(stormLocal, "supervisor", "stormdist", topoId); final Map<String, Object> topoConf = new HashMap<>(); final List<String> topoUsers = Arrays.asList("t-user-a", "t-user-b"); final List<String> logUsers = Arrays.asList("l-user-a", "l-user-b"); final List<String> topoGroups = Arrays.asList("t-group-a", "t-group-b"); final List<String> logGroups = Arrays.asList("l-group-a", "l-group-b"); topoConf.put(DaemonConfig.LOGS_GROUPS, logGroups); topoConf.put(Config.TOPOLOGY_GROUPS, topoGroups); topoConf.put(DaemonConfig.LOGS_USERS, logUsers); topoConf.put(Config.TOPOLOGY_USERS, topoUsers); final Map<String, Object> superConf = new HashMap<>(); superConf.put(Config.STORM_LOCAL_DIR, stormLocal); superConf.put(Config.STORM_WORKERS_ARTIFACTS_DIR, stormLocal); final StringWriter yamlDump = new StringWriter(); AdvancedFSOps ops = mock(AdvancedFSOps.class); when(ops.doRequiredTopoFilesExist(superConf, topoId)).thenReturn(true); when(ops.fileExists(workerArtifacts)).thenReturn(true); when(ops.fileExists(workerRoot)).thenReturn(true); when(ops.getWriter(logMetadataFile)).thenReturn(yamlDump); LocalAssignment la = new LocalAssignment(); la.set_topology_id(topoId); la.set_owner(user); ResourceIsolationInterface iso = mock(ResourceIsolationInterface.class); MockContainer mc = new MockContainer(ContainerType.LAUNCH, superConf, "SUPERVISOR", 6628, 8080, la, iso, workerId, topoConf, ops, new StormMetricsRegistry()); mc.setup(); //Initial Setup verify(ops).forceMkdir(new File(workerRoot, "pids")); verify(ops).forceMkdir(new File(workerRoot, "tmp")); verify(ops).forceMkdir(new File(workerRoot, "heartbeats")); verify(ops).fileExists(workerArtifacts); //Log file permissions verify(ops).getWriter(logMetadataFile); String yamlResult = yamlDump.toString(); Yaml yaml = new Yaml(); Map<String, Object> result = yaml.load(yamlResult); assertEquals(workerId, result.get("worker-id")); assertEquals(user, result.get(Config.TOPOLOGY_SUBMITTER_USER)); HashSet<String> allowedUsers = new HashSet<>(topoUsers); allowedUsers.addAll(logUsers); assertEquals(allowedUsers, new HashSet<>(ObjectReader.getStrings(result.get(DaemonConfig.LOGS_USERS)))); HashSet<String> allowedGroups = new HashSet<>(topoGroups); allowedGroups.addAll(logGroups); assertEquals(allowedGroups, new HashSet<>(ObjectReader.getStrings(result.get(DaemonConfig.LOGS_GROUPS)))); //Save the current user to help with recovery verify(ops).dump(workerUserFile, user); //Create links to artifacts dir verify(ops).createSymlink(new File(workerRoot, "artifacts"), workerArtifacts); //Create links to blobs verify(ops, never()).createSymlink(new File(workerRoot, "resources"), new File(distRoot, "resources")); }
public void setProperty(String name, String value) { if (value == null) { return; } name = Introspector.decapitalize(name); PropertyDescriptor prop = getPropertyDescriptor(name); if (prop == null) { addWarn("No such property [" + name + "] in " + objClass.getName() + "."); } else { try { setProperty(prop, name, value); } catch (PropertySetterException ex) { addWarn("Failed to set property [" + name + "] to value \"" + value + "\". ", ex); } } }
@Test public void testSetCamelProperty() { setter.setProperty("camelCase", "trot"); assertEquals("trot", house.getCamelCase()); setter.setProperty("camelCase", "gh"); assertEquals("gh", house.getCamelCase()); }
public static int nrSpacesBefore( String field ) { int nr = 0; int len = field.length(); while ( nr < len && field.charAt( nr ) == ' ' ) { nr++; } return nr; }
@Test public void testNrSpacesBefore() { try { Const.nrSpacesBefore( null ); fail( "Expected NullPointerException" ); } catch ( NullPointerException ex ) { // Ignore } assertEquals( 0, Const.nrSpacesBefore( "" ) ); assertEquals( 1, Const.nrSpacesBefore( " " ) ); assertEquals( 3, Const.nrSpacesBefore( " " ) ); assertEquals( 0, Const.nrSpacesBefore( "test" ) ); assertEquals( 0, Const.nrSpacesBefore( "test " ) ); assertEquals( 3, Const.nrSpacesBefore( " test" ) ); assertEquals( 4, Const.nrSpacesBefore( " test " ) ); }
public static Result label(long durationInMillis) { double nbSeconds = durationInMillis / 1000.0; double nbMinutes = nbSeconds / 60; double nbHours = nbMinutes / 60; double nbDays = nbHours / 24; double nbYears = nbDays / 365; return getMessage(nbSeconds, nbMinutes, nbHours, nbDays, nbYears); }
@Test public void age_in_days() { long days = 4; DurationLabel.Result result = DurationLabel.label(now() - ago(days * DAY)); assertThat(result.key()).isEqualTo("duration.days"); assertThat(result.value()).isEqualTo(days); }
@Override public List<VFSFile> getFiles( VFSFile file, String filters, VariableSpace space ) throws FileException { ConnectionFileName fileName = getConnectionFileName( file ); if ( fileName.isConnectionRoot() ) { VFSConnectionDetails details = getExistingDetails( fileName ); if ( usesBuckets( details ) ) { return getBuckets( file, fileName, details ); } } FileObject fileObject; try { fileObject = getFileObject( file, space ); } catch ( FileException e ) { throw new FileNotFoundException( file.getPath(), TYPE ); } return populateChildren( file, fileObject, filters ); }
@Test public void testGetFilesOfSubFolder() throws Exception { GetFilesOfSubFolderScenario scenario = new GetFilesOfSubFolderScenario(); List<VFSFile> files = vfsFileProvider.getFiles( scenario.baseFile, null, mock( VariableSpace.class ) ); assertNotNull( files ); assertEquals( 2, files.size() ); assertFile( files.get( 0 ), scenario.child1FileObject ); assertFolder( files.get( 1 ), scenario.child2FolderObject ); }
@Override public long currentWatermark() { return currentWatermark; }
@Test void testCurrentWatermark() throws Exception { BatchExecutionKeyedStateBackend<Integer> keyedStatedBackend = new BatchExecutionKeyedStateBackend<>( KEY_SERIALIZER, new KeyGroupRange(0, 1), new ExecutionConfig()); InternalTimeServiceManager<Integer> timeServiceManager = BatchExecutionInternalTimeServiceManager.create( UnregisteredMetricGroups.createUnregisteredTaskMetricGroup() .getIOMetricGroup(), keyedStatedBackend, this.getClass().getClassLoader(), new DummyKeyContext(), new TestProcessingTimeService(), Collections.emptyList(), StreamTaskCancellationContext.alwaysRunning()); List<Long> timers = new ArrayList<>(); TriggerWithTimerServiceAccess<Integer, VoidNamespace> eventTimeTrigger = TriggerWithTimerServiceAccess.eventTimeTrigger( (timer, timerService) -> { assertThat(timerService.currentWatermark()).isEqualTo(Long.MAX_VALUE); timers.add(timer.getTimestamp()); }); InternalTimerService<VoidNamespace> timerService = timeServiceManager.getInternalTimerService( "test", KEY_SERIALIZER, new VoidNamespaceSerializer(), eventTimeTrigger); eventTimeTrigger.setTimerService(timerService); assertThat(timerService.currentWatermark()).isEqualTo(Long.MIN_VALUE); keyedStatedBackend.setCurrentKey(1); timerService.registerEventTimeTimer(VoidNamespace.INSTANCE, 123); assertThat(timerService.currentWatermark()).isEqualTo(Long.MIN_VALUE); // advancing the watermark to a value different than Long.MAX_VALUE should have no effect timeServiceManager.advanceWatermark(new Watermark(1000)); assertThat(timerService.currentWatermark()).isEqualTo(Long.MIN_VALUE); // changing the current key fires all timers keyedStatedBackend.setCurrentKey(2); assertThat(timerService.currentWatermark()).isEqualTo(Long.MIN_VALUE); timerService.registerEventTimeTimer(VoidNamespace.INSTANCE, 124); // advancing the watermark to Long.MAX_VALUE should fire remaining key timeServiceManager.advanceWatermark(Watermark.MAX_WATERMARK); assertThat(timers).containsExactly(123L, 124L); }
public static Optional<Collection<Comparable<?>>> extractValues(final ExpressionSegment expression, final List<Object> params) { Collection<Comparable<?>> result = new LinkedList<>(); if (expression instanceof BinaryOperationExpression) { extractValues(((BinaryOperationExpression) expression).getRight(), params).ifPresent(result::addAll); } if (expression instanceof InExpression) { extractValues(((InExpression) expression).getRight(), params).ifPresent(result::addAll); } if (expression instanceof ListExpression) { ((ListExpression) expression).getItems().forEach(each -> extractValueInSimpleExpressionSegment(each, params).ifPresent(result::add)); } if (expression instanceof SimpleExpressionSegment) { extractValueInSimpleExpressionSegment(expression, params).ifPresent(result::add); } return result.isEmpty() ? Optional.empty() : Optional.of(result); }
@Test void assertExtractValuesSimpleExpressionSegment() { SimpleExpressionSegment simpleExpressionSegment = new LiteralExpressionSegment(1, 2, "expected"); List<Object> params = new LinkedList<>(); assertTrue(ShadowExtractor.extractValues(simpleExpressionSegment, params).isPresent()); assertThat(ShadowExtractor.extractValues(simpleExpressionSegment, params).get().iterator().next(), is("expected")); }
public static <FnT extends DoFn<?, ?>> DoFnSignature getSignature(Class<FnT> fn) { return signatureCache.computeIfAbsent(fn, DoFnSignatures::parseSignature); }
@Test public void testStateIdNonFinal() throws Exception { thrown.expect(IllegalArgumentException.class); thrown.expectMessage("State declarations must be final"); thrown.expectMessage("Non-final field"); thrown.expectMessage("myfield"); thrown.expectMessage(not(mentionsTimers())); DoFnSignatures.getSignature( new DoFn<KV<String, Integer>, Long>() { @StateId("my-id") private StateSpec<ValueState<Integer>> myfield = StateSpecs.value(VarIntCoder.of()); @ProcessElement public void foo(ProcessContext context) {} }.getClass()); }
public KiePMMLSegmentation getSegmentation() { return segmentation; }
@Test void getSegmentation() { assertThat(KIE_PMML_MINING_MODEL.getSegmentation()).isNull(); final KiePMMLSegmentation segmentation = getKiePMMLSegmentation("SEGMENTATION_NAME"); KIE_PMML_MINING_MODEL = BUILDER.withSegmentation(segmentation).build(); assertThat(KIE_PMML_MINING_MODEL.getSegmentation()).isEqualTo(segmentation); }
@Override public Object postProcessAfterInitialization(Object bean, String beanName) throws BeansException { ReflectionUtils.doWithMethods(bean.getClass(), recurringJobFinderMethodCallback); return bean; }
@Test void beansWithMethodsAnnotatedWithRecurringAnnotationIntervalDisabled() { // GIVEN final RecurringJobPostProcessor recurringJobPostProcessor = getRecurringJobPostProcessor(); // WHEN recurringJobPostProcessor.postProcessAfterInitialization(new MyServiceWithRecurringIntervalDisabled(), "not important"); // THEN verify(jobScheduler).deleteRecurringJob("my-recurring-job"); }
@Path("upgrade/{id}") @PUT @Consumes(MediaType.APPLICATION_JSON) @Produces(MediaType.APPLICATION_JSON) public Response upgradeApp(@PathParam("id") String id, Service app) { try { AppCatalogSolrClient sc = new AppCatalogSolrClient(); sc.upgradeApp(app); YarnServiceClient yc = new YarnServiceClient(); yc.upgradeApp(app); } catch (IOException | SolrServerException e) { return Response.status(Status.BAD_REQUEST).entity(e.toString()).build(); } String output = "{\"status\":\"Application upgrade requested.\",\"id\":\"" + app.getName() + "\"}"; return Response.status(Status.ACCEPTED).entity(output).build(); }
@Test void testUpgradeApp() throws Exception { String id = "application1"; AppDetailsController ac = Mockito.mock(AppDetailsController.class); Service yarnfile = new Service(); yarnfile.setVersion("1.0"); Component comp = new Component(); Container c = new Container(); c.setId("container-1"); List<Container> containers = new ArrayList<Container>(); containers.add(c); comp.setContainers(containers); yarnfile.addComponent(comp); Response expected = Response.ok().build(); when(ac.upgradeApp(id, yarnfile)).thenReturn(Response.ok().build()); final Response actual = ac.upgradeApp(id, yarnfile); assertEquals(expected.getStatus(), actual.getStatus()); }
@Override public void init(HazelcastInstance instance, Properties properties, String mapName) { validateMapStoreConfig(instance, mapName); logger = instance.getLoggingService().getLogger(GenericMapLoader.class); this.instance = Util.getHazelcastInstanceImpl(instance); this.genericMapStoreProperties = new GenericMapStoreProperties(properties, mapName); this.sqlService = instance.getSql(); this.mappingHelper = new MappingHelper(this.sqlService); this.mapName = mapName; this.mappingName = MAPPING_PREFIX + mapName; HazelcastProperties hzProperties = nodeEngine().getProperties(); this.initTimeoutMillis = hzProperties.getMillis(MAPSTORE_INIT_TIMEOUT); ManagedExecutorService asyncExecutor = getMapStoreExecutor(); // Init can run on partition thread, creating a mapping uses other maps, so it needs to run elsewhere asyncExecutor.submit(this::createOrReadMapping); }
@Test public void givenMapStoreConfigWithOffloadDisabled_thenFail() { MapStoreConfig mapStoreConfig = new MapStoreConfig() .setClassName(GenericMapLoader.class.getName()) .setOffload(false); MapConfig mapConfig = new MapConfig(mapName); mapConfig.setMapStoreConfig(mapStoreConfig); instance().getConfig().addMapConfig(mapConfig); mapLoader = new GenericMapLoader<>(); Properties properties = new Properties(); properties.setProperty(DATA_CONNECTION_REF_PROPERTY, TEST_DATABASE_REF); assertThatThrownBy(() -> mapLoader.init(hz, properties, mapName)) .isInstanceOf(HazelcastException.class) .hasMessage("MapStoreConfig for " + mapName + " must have `offload` property set to true"); }
@Override public int hashCode() { return Objects.hash(klass, version, type); }
@SuppressWarnings("rawtypes") @Test public void testPluginDescEquality() { PluginDesc<SinkConnector> connectorDescPluginPath = new PluginDesc<>( SinkConnector.class, snapshotVersion, PluginType.SINK, pluginLoader ); PluginDesc<SinkConnector> connectorDescClasspath = new PluginDesc<>( SinkConnector.class, snapshotVersion, PluginType.SINK, systemLoader ); assertEquals(connectorDescPluginPath, connectorDescClasspath); assertEquals(connectorDescPluginPath.hashCode(), connectorDescClasspath.hashCode()); PluginDesc<Converter> converterDescPluginPath = new PluginDesc<>( Converter.class, noVersion, PluginType.CONVERTER, pluginLoader ); PluginDesc<Converter> converterDescClasspath = new PluginDesc<>( Converter.class, noVersion, PluginType.CONVERTER, systemLoader ); assertEquals(converterDescPluginPath, converterDescClasspath); assertEquals(converterDescPluginPath.hashCode(), converterDescClasspath.hashCode()); PluginDesc<Transformation> transformDescPluginPath = new PluginDesc<>( Transformation.class, null, PluginType.TRANSFORMATION, pluginLoader ); PluginDesc<Transformation> transformDescClasspath = new PluginDesc<>( Transformation.class, noVersion, PluginType.TRANSFORMATION, pluginLoader ); assertNotEquals(transformDescPluginPath, transformDescClasspath); }
private TomcatInformations(ObjectName threadPool) throws JMException { super(); name = threadPool.getKeyProperty("name"); maxThreads = MBeansAccessor.getAttribute(threadPool, "maxThreads"); currentThreadCount = MBeansAccessor.getAttribute(threadPool, "currentThreadCount"); currentThreadsBusy = MBeansAccessor.getAttribute(threadPool, "currentThreadsBusy"); ObjectName grp = null; for (final ObjectName globalRequestProcessor : GLOBAL_REQUEST_PROCESSORS) { if (name.equals(globalRequestProcessor.getKeyProperty("name"))) { grp = globalRequestProcessor; break; } } if (grp != null) { bytesReceived = MBeansAccessor.getAttribute(grp, "bytesReceived"); bytesSent = MBeansAccessor.getAttribute(grp, "bytesSent"); requestCount = MBeansAccessor.getAttribute(grp, "requestCount"); errorCount = MBeansAccessor.getAttribute(grp, "errorCount"); processingTime = MBeansAccessor.getAttribute(grp, "processingTime"); maxTime = MBeansAccessor.getAttribute(grp, "maxTime"); } else { bytesReceived = 0; bytesSent = 0; requestCount = 0; errorCount = 0; processingTime = 0; maxTime = 0; } }
@Test public void testTomcatInformations() throws JMException { System.setProperty("catalina.home", "unknown"); // ce premier appel crée un MBeanServer TomcatInformations.initMBeans(); assertNotNull("buildTomcatInformationsList", TomcatInformations.buildTomcatInformationsList()); final MBeanServer mBeanServer = MBeans.getPlatformMBeanServer(); final List<ObjectName> mBeans = new ArrayList<>(); mBeans.add( mBeanServer .registerMBean(new ThreadPool(), new ObjectName("Catalina:type=ThreadPool,name=http-8080")) .getObjectName()); TomcatInformations.initMBeans(); try { // les appels suivants réutilise le MBeanServer créé assertNotNull("buildTomcatInformationsList", TomcatInformations.buildTomcatInformationsList()); mBeans.add( mBeanServer .registerMBean(new GlobalRequestProcessor(), new ObjectName( "Catalina:type=GlobalRequestProcessor,name=http-8080")) .getObjectName()); TomcatInformations.initMBeans(); assertNotNull("buildTomcatInformationsList", TomcatInformations.buildTomcatInformationsList()); for (final TomcatInformations tomcatInformations : TomcatInformations .buildTomcatInformationsList()) { tomcatInformations.toString(); } final Counter counter = new Counter("http", null); final Collector collector = new Collector("test", List.of(counter)); final ServletContext context = createNiceMock(ServletContext.class); expect(context.getServerInfo()).andReturn("Mock").anyTimes(); expect(context.getMajorVersion()).andReturn(5).anyTimes(); expect(context.getMinorVersion()).andReturn(0).anyTimes(); expect(context.getContextPath()).andReturn("/test").anyTimes(); replay(context); Parameters.initialize(context); collector.collectLocalContextWithoutErrors(); verify(context); } finally { for (final ObjectName registeredMBean : mBeans) { mBeanServer.unregisterMBean(registeredMBean); } TomcatInformations.initMBeans(); } }
@Override public void setOriginalFile(Component file, OriginalFile originalFile) { storeOriginalFileInCache(originalFiles, file, originalFile); }
@Test public void setOriginalFile_throws_NPE_when_file_is_null() { assertThatThrownBy(() -> underTest.setOriginalFile(null, SOME_ORIGINAL_FILE)) .isInstanceOf(NullPointerException.class) .hasMessage("file can't be null"); }
@Override public OAuth2AccessTokenDO refreshAccessToken(String refreshToken, String clientId) { // 查询访问令牌 OAuth2RefreshTokenDO refreshTokenDO = oauth2RefreshTokenMapper.selectByRefreshToken(refreshToken); if (refreshTokenDO == null) { throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "无效的刷新令牌"); } // 校验 Client 匹配 OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId); if (ObjectUtil.notEqual(clientId, refreshTokenDO.getClientId())) { throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "刷新令牌的客户端编号不正确"); } // 移除相关的访问令牌 List<OAuth2AccessTokenDO> accessTokenDOs = oauth2AccessTokenMapper.selectListByRefreshToken(refreshToken); if (CollUtil.isNotEmpty(accessTokenDOs)) { oauth2AccessTokenMapper.deleteBatchIds(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getId)); oauth2AccessTokenRedisDAO.deleteList(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getAccessToken)); } // 已过期的情况下,删除刷新令牌 if (DateUtils.isExpired(refreshTokenDO.getExpiresTime())) { oauth2RefreshTokenMapper.deleteById(refreshTokenDO.getId()); throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "刷新令牌已过期"); } // 创建访问令牌 return createOAuth2AccessToken(refreshTokenDO, clientDO); }
@Test public void testRefreshAccessToken_clientIdError() { // 准备参数 String refreshToken = randomString(); String clientId = randomString(); // mock 方法 OAuth2ClientDO clientDO = randomPojo(OAuth2ClientDO.class).setClientId(clientId); when(oauth2ClientService.validOAuthClientFromCache(eq(clientId))).thenReturn(clientDO); // mock 数据(访问令牌) OAuth2RefreshTokenDO refreshTokenDO = randomPojo(OAuth2RefreshTokenDO.class) .setRefreshToken(refreshToken).setClientId("error"); oauth2RefreshTokenMapper.insert(refreshTokenDO); // 调用,并断言 assertServiceException(() -> oauth2TokenService.refreshAccessToken(refreshToken, clientId), new ErrorCode(400, "刷新令牌的客户端编号不正确")); }
public static List<String> getDefaultCipherSuites() throws NoSuchAlgorithmException, KeyManagementException { // TODO Might want to cache the result. It's unlikely to change at runtime. final SSLContext context = getUninitializedSSLContext(); context.init( null, null, null ); return Arrays.asList( context.createSSLEngine().getEnabledCipherSuites() ); }
@Test public void testHasDefaultCipherSuites() throws Exception { // Setup fixture. // (not needed) // Execute system under test. final Collection<String> result = EncryptionArtifactFactory.getDefaultCipherSuites(); // Verify results. assertFalse( result.isEmpty() ); }
@Override public void visit(Entry entry) { Component component = componentProvider.createComponent(entry); if(component != null){ final EntryAccessor entryAccessor = new EntryAccessor(); entryAccessor.setComponent(entry, component); final AFreeplaneAction action = entryAccessor.getAction(entry); if (action != null) { final ActionEnabler actionEnabler = new ActionEnabler(component); action.addPropertyChangeListener(actionEnabler); entry.setAttribute(actionEnabler.getClass(), actionEnabler); } final JToolBar container = (JToolBar) new EntryAccessor().getAncestorComponent(entry); GridBagConstraints constraints = layoutConstraintsForEntry(entry, component); container.add(component, constraints); } }
@Test public void createsVerticalToolbarSeparator() { Entry separatorEntry = new Entry(); separatorEntry.setBuilders(asList("separator")); Entry toolbarEntry = new Entry(); final FreeplaneToolBar toolbar = new FreeplaneToolBar("toolbar", SwingConstants.HORIZONTAL); new EntryAccessor().setComponent(toolbarEntry, toolbar); toolbarEntry.addChild(separatorEntry); final JToolbarComponentBuilder toolbarActionGroupBuilder = new JToolbarComponentBuilder(resourceAccessorMock); toolbarActionGroupBuilder.visit(separatorEntry); JToolBar.Separator separator = (JToolBar.Separator)new EntryAccessor().getComponent(separatorEntry); assertThat(separator.getParent(), CoreMatchers.equalTo((Container)toolbar)); assertThat(separator.getOrientation(), CoreMatchers.equalTo(SwingConstants.VERTICAL)); }
@Override public Object[] toArray() { Object[] retArray = new Object[items.size()]; int index = 0; for (byte[] item : items) { retArray[index] = serializer.decode(item); index++; } return retArray; }
@Test public void testToArray() throws Exception { //Test creation of a new array of the values fillSet(10, set); Object[] arr = set.toArray(); assertEquals("The array should be of length 10.", 10, arr.length); for (int i = 0; i < 10; i++) { assertTrue("All elements of the array should be in the set.", set.contains(arr[i])); } }
public Preference<String> getString(@StringRes int prefKey, @StringRes int defaultValue) { return getString(mResources.getString(prefKey), defaultValue); }
@Test public void testStringHappyPath() { RxSharedPrefs impl = new RxSharedPrefs(getApplicationContext(), this::testRestoreFunction); final Preference<String> preference = impl.getString(R.string.pref_test_key, R.string.pref_test_value); Assert.assertEquals("value", preference.get()); final AtomicReference<String> observedValue = new AtomicReference<>(null); mCompositeDisposable.add(preference.asObservable().subscribe(observedValue::set)); Assert.assertEquals("value", preference.get()); SharedPrefsHelper.setPrefsValue(R.string.pref_test_key, "iw"); Assert.assertEquals("iw", preference.get()); Assert.assertEquals("iw", observedValue.get()); }
public static int roundToPowerOfTwo(final int value) { if (value > MAX_POW2) { throw new IllegalArgumentException("There is no larger power of 2 int for value:"+value+" since it exceeds 2^31."); } if (value < 0) { throw new IllegalArgumentException("Given value:"+value+". Expecting value >= 0."); } final int nextPow2 = 1 << (32 - Integer.numberOfLeadingZeros(value - 1)); return nextPow2; }
@Test(expected = IllegalArgumentException.class) public void testMaxRoundException() { Pow2.roundToPowerOfTwo(MAX_POSITIVE_POW2 + 1); fail(); }
@Override public DeterministicSubsettingMetadata getSubsettingMetadata(LoadBalancerState state) { if (_clusterName == null) { _log.debug("Peer cluster name not provided."); return null; } FutureCallback<DeterministicSubsettingMetadata> metadataFutureCallback = new FutureCallback<>(); state.listenToCluster(_clusterName, (type, name) -> { LoadBalancerStateItem<UriProperties> uriItem = state.getUriProperties(_clusterName); synchronized (_lock) { if (uriItem.getVersion() != _peerClusterVersion) { _peerClusterVersion = uriItem.getVersion(); UriProperties uriProperties = uriItem.getProperty(); if (uriProperties != null) { // Sort the URIs so each client sees the same ordering List<String> sortedHosts = uriProperties.getPartitionDesc().keySet().stream() .map(URI::getHost) .sorted() .distinct() .collect(Collectors.toList()); int instanceId = sortedHosts.indexOf(_hostName); if (instanceId >= 0) { _subsettingMetadata = new DeterministicSubsettingMetadata(instanceId, sortedHosts.size(), _peerClusterVersion); } else { _subsettingMetadata = null; } } else { _subsettingMetadata = null; } _log.debug("Got deterministic subsetting metadata for cluster {}: {}", _clusterName, _subsettingMetadata); } } metadataFutureCallback.onSuccess(_subsettingMetadata); }); try { return metadataFutureCallback.get(_timeout, _unit); } catch (InterruptedException | ExecutionException | TimeoutException e) { _log.warn("Failed to fetch deterministic subsetting metadata from ZooKeeper for cluster " + _clusterName, e); return null; } }
@Test public void testGetSubsettingMetadata() { List<String> schemes = new ArrayList<>(); Map<Integer, PartitionData> partitionData = new HashMap<>(1); partitionData.put(DefaultPartitionAccessor.DEFAULT_PARTITION_ID, new PartitionData(1d)); Map<URI, Map<Integer, PartitionData>> uriData = new HashMap<>(); for (int i = 0; i < 10; i++) { uriData.put(URI.create("http://test" + i + ".linkedin.com:8888/test"), partitionData); } schemes.add("http"); _state.listenToCluster("cluster-1", new LoadBalancerState.NullStateListenerCallback()); _state.listenToService("service-1", new LoadBalancerState.NullStateListenerCallback()); _clusterRegistry.put("cluster-1", new ClusterProperties("cluster-1", schemes)); _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData)); _serviceRegistry.put("service-1", new ServiceProperties("service-1", "cluster-1", "/test", Collections.singletonList("random"))); DeterministicSubsettingMetadata metadata = _metadataProvider.getSubsettingMetadata(_state); assertEquals(metadata.getInstanceId(), 2); assertEquals(metadata.getTotalInstanceCount(), 10); assertEquals(metadata.getPeerClusterVersion(), 5); uriData.remove(URI.create("http://test0.linkedin.com:8888/test")); _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData)); metadata = _metadataProvider.getSubsettingMetadata(_state); assertEquals(metadata.getInstanceId(), 1); assertEquals(metadata.getTotalInstanceCount(), 9); assertEquals(metadata.getPeerClusterVersion(), 7); uriData.remove(URI.create("http://test2.linkedin.com:8888/test")); _uriRegistry.put("cluster-1", new UriProperties("cluster-1", uriData)); metadata = _metadataProvider.getSubsettingMetadata(_state); assertNull(metadata); }
public static UForLoop create( Iterable<? extends UStatement> initializer, @Nullable UExpression condition, Iterable<? extends UExpressionStatement> update, UStatement statement) { return new AutoValue_UForLoop( ImmutableList.copyOf(initializer), condition, ImmutableList.copyOf(update), (USimpleStatement) statement); }
@Test public void serialization() { SerializableTester.reserializeAndAssert( UForLoop.create( ImmutableList.of( UVariableDecl.create("i", UPrimitiveTypeTree.INT, UFreeIdent.create("from"))), UBinary.create(Kind.LESS_THAN, ULocalVarIdent.create("i"), UFreeIdent.create("to")), ImmutableList.of( UExpressionStatement.create( UUnary.create(Kind.POSTFIX_INCREMENT, ULocalVarIdent.create("i")))), UBlock.create())); }
@Override public Collection<V> valueRangeReversed(int startIndex, int endIndex) { return get(valueRangeReversedAsync(startIndex, endIndex)); }
@Test public void testValueRangeReversed() { RScoredSortedSet<Integer> set = redisson.getScoredSortedSet("simple"); set.add(0, 1); set.add(1, 2); set.add(2, 3); set.add(3, 4); set.add(4, 5); set.add(4, 5); Collection<Integer> vals = set.valueRangeReversed(0, -1); assertThat(vals).containsExactly(5, 4, 3, 2, 1); }
@Override public boolean trySetCapacity(int capacity) { return get(trySetCapacityAsync(capacity)); }
@Test public void testRemoveEmpty() { Assertions.assertThrows(NoSuchElementException.class, () -> { RBoundedBlockingQueue<Integer> queue = redisson.getBoundedBlockingQueue("blocking:queue"); queue.trySetCapacity(10); queue.remove(); }); }
public static KiePMMLDroolsAST getKiePMMLDroolsAST(final List<Field<?>> fields, final TreeModel model, final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap, final List<KiePMMLDroolsType> types) { logger.trace("getKiePMMLDroolsAST {} {}", fields, model); DATA_TYPE targetType = getTargetFieldType(fields, model); List<OutputField> outputFields = model.getOutput() != null ? model.getOutput().getOutputFields() : Collections.emptyList(); List<KiePMMLDroolsRule> rules = KiePMMLTreeModelNodeASTFactory.factory(fieldTypeMap, outputFields, model.getNoTrueChildStrategy(), targetType).declareRulesFromRootNode(model.getNode(), ""); return new KiePMMLDroolsAST(types, rules); }
@Test void getKiePMMLDroolsSimpleSetAST() { final Map<String, KiePMMLOriginalTypeGeneratedType> fieldTypeMap = getFieldTypeMap(simpleSetPmml.getDataDictionary(), simpleSetPmml.getTransformationDictionary(), simpleSetModel.getLocalTransformations()); List<KiePMMLDroolsType> types = Collections.emptyList(); KiePMMLDroolsAST retrieved = KiePMMLTreeModelASTFactory.getKiePMMLDroolsAST(getFieldsFromDataDictionary(simpleSetPmml.getDataDictionary()), simpleSetModel, fieldTypeMap, types); assertThat(retrieved).isNotNull(); assertThat(retrieved.getTypes()).isEqualTo(types); assertThat(retrieved.getRules()).isNotEmpty(); }
static <T extends CompoundPredicate> T flattenCompound(Predicate predicateLeft, Predicate predicateRight, Class<T> klass) { // The following could have been achieved with {@link com.hazelcast.query.impl.predicates.FlatteningVisitor}, // however since we only care for 2-argument flattening, we can avoid constructing a visitor and its internals // for each token pass at the cost of the following explicit code. Predicate[] predicates; if (klass.isInstance(predicateLeft) || klass.isInstance(predicateRight)) { Predicate[] left = getSubPredicatesIfClass(predicateLeft, klass); Predicate[] right = getSubPredicatesIfClass(predicateRight, klass); predicates = new Predicate[left.length + right.length]; ArrayUtils.concat(left, right, predicates); } else { predicates = new Predicate[]{predicateLeft, predicateRight}; } try { T compoundPredicate = klass.getDeclaredConstructor().newInstance(); compoundPredicate.setPredicates(predicates); return compoundPredicate; } catch (ReflectiveOperationException e) { throw new IllegalArgumentException(String.format("%s must have a public default constructor", klass.getName())); } }
@Test public void testAnd_whenRightPredicateAnd() { Predicate<Object, Object> predicate1 = Predicates.alwaysTrue(); AndPredicate predicate2 = new AndPredicate(new SqlPredicate("a == 1"), new SqlPredicate("a == 2")); AndPredicate concatenatedOr = SqlPredicate.flattenCompound(predicate1, predicate2, AndPredicate.class); assertEquals(3, concatenatedOr.getPredicates().length); assertSame(predicate1, concatenatedOr.getPredicates()[0]); assertInstanceOf(SqlPredicate.class, concatenatedOr.getPredicates()[1]); assertInstanceOf(SqlPredicate.class, concatenatedOr.getPredicates()[2]); }
@Override public long incrementAndGet(K key) { return complete(asyncCounterMap.incrementAndGet(key)); }
@Test public void testIncrementAndGet() { atomicCounterMap.put(KEY1, VALUE1); Long afterIncrement = atomicCounterMap.incrementAndGet(KEY1); assertThat(afterIncrement, is(VALUE1 + 1)); }
boolean matchModification(MaterialRevisions materialRevisions) { if (StringUtils.isEmpty(this.matcher)) { return false; } return materialRevisions.containsMyCheckin(new Matcher(matcher)); }
@Test void shouldNotMatchWhenUserDidNotSetUpTheMatcher() { materialRevisions = new MaterialRevisions(new MaterialRevision(MaterialsMother.svnMaterial(), aCheckIn("100", "readme"))); assertThat(new User("UserName", new String[]{null}, "user@mail.com", true).matchModification(materialRevisions)).isFalse(); assertThat(new User("UserName", new String[]{""}, "user@mail.com", true).matchModification(materialRevisions)).isFalse(); }
public static <T> Read<T> read(Class<T> classType) { return new AutoValue_CosmosIO_Read.Builder<T>().setClassType(classType).build(); }
@Test public void testSplit() throws Exception { Read<Family> read = CosmosIO.read(Family.class) .withContainer(CONTAINER) .withDatabase(DATABASE) .withCoder(SerializableCoder.of(Family.class)); BoundedCosmosBDSource<Family> initialSource = new BoundedCosmosBDSource<>(read); // Cosmos DB precision is in KB. Inserted test data is ~3KB List<? extends BoundedSource<Family>> splits = initialSource.split(1024, pipelineOptions); assertEquals("Wrong split", 3, splits.size()); }
@Override public void handlerSelector(final SelectorData selectorData) { SpringCloudSelectorHandle springCloudSelectorHandle = GsonUtils.getInstance().fromJson(selectorData.getHandle(), SpringCloudSelectorHandle.class); SELECTOR_CACHED.get().cachedHandle(selectorData.getId(), springCloudSelectorHandle); if (CollectionUtils.isEmpty(springCloudSelectorHandle.getDivideUpstreams())) { UpstreamCacheManager.getInstance().removeByKey(selectorData.getId()); return; } if (springCloudCacheConfig.getEnabled()) { List<ServiceInstance> serviceInstances = discoveryClient.getInstances(springCloudSelectorHandle.getServiceId()); ServiceInstanceCache.cacheServiceInstance(springCloudSelectorHandle.getServiceId(), serviceInstances); } UpstreamCacheManager.getInstance().submit(selectorData.getId(), convertUpstreamList(springCloudSelectorHandle.getDivideUpstreams())); if (!selectorData.getContinued()) { RULE_CACHED.get().cachedHandle(CacheKeyUtils.INST.getKey(selectorData.getId(), Constants.DEFAULT_RULE), SpringCloudRuleHandle.newDefaultInstance()); } }
@Test public void testHandlerSelector() throws NoSuchFieldException, IllegalAccessException { List<DivideUpstream> divideUpstreams = new ArrayList<>(); divideUpstreams.add(new DivideUpstream()); final SpringCloudSelectorHandle springCloudSelectorHandle = SpringCloudSelectorHandle.builder() .serviceId("serviceId") .divideUpstreams(divideUpstreams) .build(); selectorData = SelectorData.builder() .handle(GsonUtils.getInstance().toJson(springCloudSelectorHandle)) .id("1") .build(); springCloudPluginDataHandler.handlerSelector(selectorData); UpstreamCacheManager instance = UpstreamCacheManager.getInstance(); Field field = instance.getClass().getDeclaredField("UPSTREAM_MAP"); field.setAccessible(true); Map<String, List<Upstream>> map = (Map<String, List<Upstream>>) field.get(instance); Assertions.assertNotNull(map.get("1")); }
public static <T> List<T> addAllIfNotContains(List<T> list, List<T> otherList) { for (T t : otherList) { if (false == list.contains(t)) { list.add(t); } } return list; }
@Test public void addAllIfNotContainsTest() { final ArrayList<String> list1 = new ArrayList<>(); list1.add("1"); list1.add("2"); final ArrayList<String> list2 = new ArrayList<>(); list2.add("2"); list2.add("3"); CollUtil.addAllIfNotContains(list1, list2); assertEquals(3, list1.size()); assertEquals("1", list1.get(0)); assertEquals("2", list1.get(1)); assertEquals("3", list1.get(2)); }
@Override public ObjectNode encode(OpenstackVtapNetwork network, CodecContext context) { checkNotNull(network, JSON_NULL_MESSAGE, "OpenstackVtapNetwork object"); ObjectNode result = context.mapper().createObjectNode() .put(MODE, network.mode().toString()) .put(NETWORK_ID, network.networkId()) .put(SERVER_IP, network.serverIp().toString()); return result; }
@Test public void testEncode() { OpenstackVtapNetwork vtapNetwork = DefaultOpenstackVtapNetwork.builder() .mode(MODE) .networkId(VNI) .serverIp(IpAddress.valueOf(SERVER_IP_ADDRESS)) .build(); ObjectNode nodeJson = vtapNetworkCodec.encode(vtapNetwork, context); assertThat(nodeJson, matchesVtapNetwork(vtapNetwork)); }