focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public void increaseUsage(long value) { if (value == 0) { return; } usageLock.writeLock().lock(); try { usage += value; setPercentUsage(caclPercentUsage()); } finally { usageLock.writeLock().unlock(); } if (parent != null) { parent.increaseUsage(value); } }
@Test public final void testAddUsageListenerStartsThread() throws Exception { int activeThreadCount = Thread.activeCount(); underTest = new MemoryUsage(); underTest.setExecutor(executor); underTest.setLimit(10); underTest.start(); final CountDownLatch called = new CountDownLatch(1); final String[] listnerThreadNameHolder = new String[1]; underTest.addUsageListener(new UsageListener() { public void onUsageChanged(Usage usage, int oldPercentUsage, int newPercentUsage) { called.countDown(); listnerThreadNameHolder[0] = Thread.currentThread().toString(); } }); underTest.increaseUsage(1); assertTrue("listener was called", called.await(30, TimeUnit.SECONDS)); assertTrue("listener called from another thread", !Thread.currentThread().toString().equals(listnerThreadNameHolder[0])); assertEquals("usage is correct", 10, underTest.getPercentUsage()); assertEquals("new thread created with listener", activeThreadCount + 1, Thread.activeCount()); }
public <T extends VFSConnectionDetails> boolean test( @NonNull ConnectionManager manager, @NonNull T details, @Nullable VFSConnectionTestOptions options ) throws KettleException { if ( options == null ) { options = new VFSConnectionTestOptions(); } // The specified connection details may not exist saved in the meta-store, // but still needs to have a non-empty name in it, to be able to form a temporary PVFS URI. if ( StringUtils.isEmpty( details.getName() ) ) { return false; } VFSConnectionProvider<T> provider = getExistingProvider( manager, details ); if ( !provider.test( details ) ) { return false; } if ( !details.isRootPathSupported() || options.isRootPathIgnored() ) { return true; } String resolvedRootPath; try { resolvedRootPath = getResolvedRootPath( details ); } catch ( KettleException e ) { // Invalid root path. return false; } if ( resolvedRootPath == null ) { return !details.isRootPathRequired(); } // Ensure that root path exists and is a folder. return isFolder( getConnectionRootProviderFileObject( manager, provider, details ) ); }
@Test public void testTestReturnsFalseWhenConnectionNameIsNull() throws KettleException { when( vfsConnectionDetails.getName() ).thenReturn( null ); assertFalse( vfsConnectionManagerHelper.test( connectionManager, vfsConnectionDetails, getTestOptionsCheckRootPath() ) ); }
@Override public boolean add(E element) { return add(element, element.hashCode()); }
@Test public void testAdd() { final OAHashSet<Integer> set = new OAHashSet<>(8); for (int i = 0; i < 10; i++) { final boolean added = set.add(i); assertTrue("Element " + i + " should be added", added); } }
public static FilePredicate create(Collection<FilePredicate> predicates) { if (predicates.isEmpty()) { return TruePredicate.TRUE; } AndPredicate result = new AndPredicate(); for (FilePredicate filePredicate : predicates) { if (filePredicate == TruePredicate.TRUE) { continue; } else if (filePredicate == FalsePredicate.FALSE) { return FalsePredicate.FALSE; } else if (filePredicate instanceof AndPredicate andPredicate) { result.predicates.addAll(andPredicate.predicates); } else { result.predicates.add(OptimizedFilePredicateAdapter.create(filePredicate)); } } Collections.sort(result.predicates); return result; }
@Test public void simplifyAndExpressionsWhenFalse() { PathPatternPredicate pathPatternPredicate1 = new PathPatternPredicate(PathPattern.create("foo1/**")); PathPatternPredicate pathPatternPredicate2 = new PathPatternPredicate(PathPattern.create("foo2/**")); FilePredicate andPredicate = AndPredicate.create(Arrays.asList(pathPatternPredicate1, FalsePredicate.FALSE, pathPatternPredicate2)); assertThat(andPredicate).isEqualTo(FalsePredicate.FALSE); }
public ShareFetchContext newContext(String groupId, Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchData, List<TopicIdPartition> toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> shareFetchDataWithMaxBytes = new HashMap<>(); shareFetchData.forEach((tp, sharePartitionData) -> { if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { // If the epoch is FINAL_EPOCH, don't try to create a new session. if (!shareFetchDataWithMaxBytes.isEmpty()) { throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } else { log.debug("Removed share session with key " + key); } context = new FinalContext(); } else { if (isAcknowledgeDataPresent) { log.error("Acknowledge data present in Initial Fetch Request for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.INVALID_REQUEST.exception(); } if (cache.remove(key) != null) { log.debug("Removed share session with key {}", key); } ImplicitLinkedHashCollection<CachedSharePartition> cachedSharePartitions = new ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } context = new ShareSessionContext(reqMetadata, shareFetchDataWithMaxBytes); log.debug("Created a new ShareSessionContext with key {} isSubsequent {} returning {}. A new share " + "session will be started.", responseShareSessionKey, false, partitionsToLogString(shareFetchDataWithMaxBytes.keySet())); } } else { // We update the already existing share session. synchronized (cache) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); ShareSession shareSession = cache.get(key); if (shareSession == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } if (shareSession.epoch != reqMetadata.epoch()) { log.debug("Share session error for {}: expected epoch {}, but got {} instead", key, shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } Map<ShareSession.ModifiedTopicIdPartitionType, List<TopicIdPartition>> modifiedTopicIdPartitions = shareSession.update( shareFetchDataWithMaxBytes, toForget); cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, partitionsToLogString(modifiedTopicIdPartitions.get( ShareSession.ModifiedTopicIdPartitionType.ADDED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.UPDATED)), partitionsToLogString(modifiedTopicIdPartitions.get(ShareSession.ModifiedTopicIdPartitionType.REMOVED)) ); context = new ShareSessionContext(reqMetadata, shareSession); } } return context; }
@Test public void testShareFetchContextResponseSize() { Time time = new MockTime(); ShareSessionCache cache = new ShareSessionCache(10, 1000); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache).withTime(time).build(); Map<Uuid, String> topicNames = new HashMap<>(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); topicNames.put(tpId0, "foo"); topicNames.put(tpId1, "bar"); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(tpId1, new TopicPartition("bar", 0)); TopicIdPartition tp3 = new TopicIdPartition(tpId1, new TopicPartition("bar", 1)); String groupId = "grp"; // Create a new share session with an initial share fetch request Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData2 = new LinkedHashMap<>(); reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); // For response size expected value calculation ObjectSerializationCache objectSerializationCache = new ObjectSerializationCache(); short version = ApiKeys.SHARE_FETCH.latestVersion(); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); assertEquals(ShareSessionContext.class, context2.getClass()); assertFalse(((ShareSessionContext) context2).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); respData2.put(tp1, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); int respSize2 = context2.responseSize(respData2, version); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(respData2, resp2.responseData(topicNames)); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + resp2.data().size(objectSerializationCache, version), respSize2); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, reqMetadata2.memberId()); // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test trying to create a new session with a non-existent session key Uuid memberId4 = Uuid.randomUuid(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. Map<TopicIdPartition, ShareFetchRequest.SharePartitionData> reqData5 = Collections.singletonMap( tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); assertEquals(ShareSessionContext.class, context5.getClass()); assertTrue(((ShareSessionContext) context5).isSubsequent()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData5 = new LinkedHashMap<>(); respData5.put(tp2, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); int respSize5 = context5.responseSize(respData5, version); ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData5); assertEquals(Errors.NONE, resp5.error()); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + resp5.data().size(objectSerializationCache, version), respSize5); // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); int respSize7 = context7.responseSize(respData2, version); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Close the subsequent share session. ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(0, cache.size()); LinkedHashMap<TopicIdPartition, ShareFetchResponseData.PartitionData> respData8 = new LinkedHashMap<>(); respData8.put(tp3, new ShareFetchResponseData.PartitionData().setPartitionIndex(1)); int respSize8 = context8.responseSize(respData8, version); ShareFetchResponse resp8 = context8.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData8); assertEquals(Errors.NONE, resp8.error()); // We add 4 here in response to 4 being added in sizeOf() method in ShareFetchResponse class. assertEquals(4 + resp8.data().size(objectSerializationCache, version), respSize8); }
public static <T extends PulsarConfiguration> T create(String configFile, Class<? extends PulsarConfiguration> clazz) throws IOException, IllegalArgumentException { requireNonNull(configFile); try (InputStream inputStream = new FileInputStream(configFile)) { return create(inputStream, clazz); } }
@SuppressWarnings("deprecation") @Test public void testPulsarConfigurationLoadingStream() throws Exception { File testConfigFile = new File("tmp." + System.currentTimeMillis() + ".properties"); if (testConfigFile.exists()) { testConfigFile.delete(); } final String metadataStoreUrl = "zk:z1.example.com,z2.example.com,z3.example.com"; PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(new FileOutputStream(testConfigFile))); printWriter.println("metadataStoreUrl=" + metadataStoreUrl); printWriter.println("configurationMetadataStoreUrl=gz1.example.com,gz2.example.com,gz3.example.com/foo"); printWriter.println("brokerDeleteInactiveTopicsEnabled=true"); printWriter.println("statusFilePath=/tmp/status.html"); printWriter.println("managedLedgerDefaultEnsembleSize=1"); printWriter.println("backlogQuotaDefaultLimitGB=18"); printWriter.println("clusterName=usc"); printWriter.println("brokerClientAuthenticationPlugin=test.xyz.client.auth.plugin"); printWriter.println("brokerClientAuthenticationParameters=role:my-role"); printWriter.println("superUserRoles=appid1,appid2"); printWriter.println("brokerServicePort=7777"); printWriter.println("brokerServicePortTls=8777"); printWriter.println("webServicePort="); printWriter.println("webServicePortTls="); printWriter.println("managedLedgerDefaultMarkDeleteRateLimit=5.0"); printWriter.println("managedLedgerDigestType=CRC32C"); printWriter.println("managedLedgerCacheSizeMB="); printWriter.println("bookkeeperDiskWeightBasedPlacementEnabled=true"); printWriter.println("metadataStoreSessionTimeoutMillis=60"); printWriter.println("metadataStoreOperationTimeoutSeconds=600"); printWriter.println("metadataStoreCacheExpirySeconds=500"); printWriter.close(); testConfigFile.deleteOnExit(); InputStream stream = new FileInputStream(testConfigFile); final ServiceConfiguration serviceConfig = PulsarConfigurationLoader.create(stream, ServiceConfiguration.class); assertNotNull(serviceConfig); assertEquals(serviceConfig.getMetadataStoreUrl(), metadataStoreUrl); assertTrue(serviceConfig.isBrokerDeleteInactiveTopicsEnabled()); assertEquals(serviceConfig.getBacklogQuotaDefaultLimitGB(), 18); assertEquals(serviceConfig.getClusterName(), "usc"); assertEquals(serviceConfig.getBrokerClientAuthenticationParameters(), "role:my-role"); assertEquals(serviceConfig.getBrokerServicePort().get(), Integer.valueOf((7777))); assertEquals(serviceConfig.getBrokerServicePortTls().get(), Integer.valueOf((8777))); assertFalse(serviceConfig.getWebServicePort().isPresent()); assertFalse(serviceConfig.getWebServicePortTls().isPresent()); assertEquals(serviceConfig.getManagedLedgerDigestType(), DigestType.CRC32C); assertTrue(serviceConfig.getManagedLedgerCacheSizeMB() > 0); assertTrue(serviceConfig.isBookkeeperDiskWeightBasedPlacementEnabled()); assertEquals(serviceConfig.getMetadataStoreSessionTimeoutMillis(), 60); assertEquals(serviceConfig.getMetadataStoreOperationTimeoutSeconds(), 600); assertEquals(serviceConfig.getMetadataStoreCacheExpirySeconds(), 500); }
@Override public void onSeparate() {}
@Test public void testOnSeparate() { mUnderTest.onSeparate(); Mockito.verifyZeroInteractions(mMockParentListener, mMockKeyboardDismissAction); }
@POST @Consumes({ MediaType.APPLICATION_JSON /* , MediaType.APPLICATION_XML */}) @Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8 /* , MediaType.APPLICATION_XML */}) public TimelinePutResponse postEntities( @Context HttpServletRequest req, @Context HttpServletResponse res, TimelineEntities entities) { init(res); UserGroupInformation callerUGI = getUser(req); if (callerUGI == null) { String msg = "The owner of the posted timeline entities is not set"; LOG.error(msg); throw new ForbiddenException(msg); } try { return timelineDataManager.postEntities(entities, callerUGI); } catch (BadRequestException bre) { throw bre; } catch (Exception e) { LOG.error("Error putting entities", e); throw new WebApplicationException(e, Response.Status.INTERNAL_SERVER_ERROR); } }
@Test void testPostEntities() throws Exception { TimelineEntities entities = new TimelineEntities(); TimelineEntity entity = new TimelineEntity(); entity.setEntityId("test id 1"); entity.setEntityType("test type 1"); entity.setStartTime(System.currentTimeMillis()); entity.setDomainId("domain_id_1"); entities.addEntity(entity); WebResource r = resource(); // No owner, will be rejected ClientResponse response = r.path("ws").path("v1").path("timeline") .accept(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON) .post(ClientResponse.class, entities); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); assertResponseStatusCode(Status.FORBIDDEN, response.getStatusInfo()); response = r.path("ws").path("v1").path("timeline") .queryParam("user.name", "tester") .accept(MediaType.APPLICATION_JSON) .type(MediaType.APPLICATION_JSON) .post(ClientResponse.class, entities); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); TimelinePutResponse putResposne = response.getEntity(TimelinePutResponse.class); assertNotNull(putResposne); assertEquals(0, putResposne.getErrors().size()); // verify the entity exists in the store response = r.path("ws").path("v1").path("timeline") .path("test type 1").path("test id 1") .accept(MediaType.APPLICATION_JSON) .get(ClientResponse.class); assertEquals(MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, response.getType().toString()); entity = response.getEntity(TimelineEntity.class); assertNotNull(entity); assertEquals("test id 1", entity.getEntityId()); assertEquals("test type 1", entity.getEntityType()); }
public static boolean isNotEmpty(@SuppressWarnings("rawtypes") Collection collection) { return !isEmpty(collection); }
@Test void collectionIsNotEmpty() { assertThat(CollectionUtil.isNotEmpty(null)).isFalse(); assertThat(CollectionUtil.isNotEmpty(Collections.emptyList())).isFalse(); assertThat(CollectionUtil.isNotEmpty(Collections.singletonList("test"))).isTrue(); }
public Set<String> getActualTableNames(final String logicTableName) { return tableMappers.stream().filter(each -> logicTableName.equalsIgnoreCase(each.getLogicName())).map(RouteMapper::getActualName).collect(Collectors.toSet()); }
@Test void assertGetActualTableNames() { Set<String> actual = routeUnit.getActualTableNames(LOGIC_TABLE); assertThat(actual.size(), is(2)); assertTrue(actual.contains(ACTUAL_TABLE_0)); assertTrue(actual.contains(ACTUAL_TABLE_1)); }
@Override @Deprecated public <VR> KStream<K, VR> flatTransformValues(final org.apache.kafka.streams.kstream.ValueTransformerSupplier<? super V, Iterable<VR>> valueTransformerSupplier, final String... stateStoreNames) { Objects.requireNonNull(valueTransformerSupplier, "valueTransformerSupplier can't be null"); return doFlatTransformValues( toValueTransformerWithKeySupplier(valueTransformerSupplier), NamedInternal.empty(), stateStoreNames); }
@Test @SuppressWarnings("deprecation") public void shouldNotAllowNullStoreNameOnFlatTransformValuesWithFlatValueWithKeySupplier() { final NullPointerException exception = assertThrows( NullPointerException.class, () -> testStream.flatTransformValues( flatValueTransformerWithKeySupplier, (String) null)); assertThat(exception.getMessage(), equalTo("stateStoreNames can't contain `null` as store name")); }
@Override @DefaultClass(MDCBasedDiscriminator.class) public void setDiscriminator(Discriminator<ILoggingEvent> discriminator) { super.setDiscriminator(discriminator); }
@Disabled @Test public void programmicSiftingAppender() { SiftingAppender connectorAppender = new SiftingAppender(); connectorAppender.setContext(loggerContext); connectorAppender.setName("SIFTING_APPENDER"); MDCBasedDiscriminator discriminator = new MDCBasedDiscriminator(); discriminator.setKey("SKEY"); discriminator.setDefaultValue("DEF_KEY"); discriminator.start(); connectorAppender.setDiscriminator(discriminator); connectorAppender.setAppenderFactory(new AppenderFactory<ILoggingEvent>() { @Override public Appender<ILoggingEvent> buildAppender(Context context, String discriminatingValue) throws JoranException { RollingFileAppender<ILoggingEvent> appender = new RollingFileAppender<ILoggingEvent>(); appender.setName("ROLLING_APPENDER_" + discriminatingValue); appender.setContext(context); appender.setFile("/var/logs/active_" + discriminatingValue + ".log"); TimeBasedRollingPolicy<ILoggingEvent> policy = new TimeBasedRollingPolicy<ILoggingEvent>(); policy.setContext(context); policy.setMaxHistory(365); policy.setFileNamePattern(CoreTestConstants.OUTPUT_DIR_PREFIX + "/logback1127/" + discriminatingValue + "_%d{yyyy_MM_dd}_%i.log"); policy.setParent(appender); policy.setCleanHistoryOnStart(true); SizeAndTimeBasedFNATP<ILoggingEvent> innerpolicy = new SizeAndTimeBasedFNATP<ILoggingEvent>(); innerpolicy.setContext(context); innerpolicy.setMaxFileSize(FileSize.valueOf("5KB")); innerpolicy.setTimeBasedRollingPolicy(policy); policy.setTimeBasedFileNamingAndTriggeringPolicy(innerpolicy); policy.start(); appender.setRollingPolicy(policy); PatternLayoutEncoder pl = new PatternLayoutEncoder(); pl.setContext(context); pl.setPattern("%d{yyyy/MM/dd'T'HH:mm:ss} %-5level - %msg\n"); pl.start(); appender.setEncoder(pl); appender.start(); return appender; } }); connectorAppender.start(); ch.qos.logback.classic.Logger logger = loggerContext.getLogger("org.test"); logger.addAppender(connectorAppender); logger.setLevel(Level.DEBUG); logger.setAdditive(false); logbackMDCAdapter.put("SKEY", "K1"); logger.info("bla1"); logbackMDCAdapter.clear(); logbackMDCAdapter.put("SKEY", "K2"); logger.info("bla2"); logbackMDCAdapter.clear(); //StatusPrinter.print(loggerContext); }
@Override public byte[] serialize() { byte[] payloadData = null; if (this.payload != null) { this.payload.setParent(this); payloadData = this.payload.serialize(); } int length = VXLAN_HEADER_LENGTH + (payloadData == null ? 0 : payloadData.length); final byte[] data = new byte[length]; final ByteBuffer bb = ByteBuffer.wrap(data); bb.put(this.flags); bb.put(this.rsvd1); bb.put(this.vni); bb.put(this.rsvd2); if (payloadData != null) { bb.put(payloadData); } return data; }
@Test public void testSerialize() { VXLAN vxlan = new VXLAN(); vxlan.setFlag((byte) TEST_FLAGS); vxlan.setVni(TEST_VNI1); vxlan.setParent(UDP_HDR); assertArrayEquals("Serialized packet is not matched", BYTE_PACKET_VXLAN, vxlan.serialize()); }
@Override public void emit(String emitKey, List<Metadata> metadataList, ParseContext parseContext) throws IOException, TikaEmitterException { if (metadataList == null || metadataList.size() < 1) { return; } List<EmitData> emitDataList = new ArrayList<>(); emitDataList.add(new EmitData(new EmitKey("", emitKey), metadataList)); emit(emitDataList); }
@Test public void testBasic(@TempDir Path tmpDir) throws Exception { Files.createDirectories(tmpDir.resolve("db")); Path dbDir = tmpDir.resolve("db/h2"); Path config = tmpDir.resolve("tika-config.xml"); String connectionString = "jdbc:h2:file:" + dbDir.toAbsolutePath(); writeConfig("/configs/tika-config-jdbc-emitter.xml", connectionString, config); EmitterManager emitterManager = EmitterManager.load(config); Emitter emitter = emitterManager.getEmitter(); List<String[]> data = new ArrayList<>(); data.add(new String[]{"k1", "true", "k2", "some string1", "k3", "4", "k4", "100"}); data.add(new String[]{"k1", "false", "k2", "some string2", "k3", "5", "k4", "101"}); data.add(new String[]{"k1", "true", "k2", "some string3", "k3", "6", "k4", "102"}); //test dates with and without timezones data.add(new String[]{"k1", "false", "k2", "some string4", "k3", "7", "k4", "103", "k5", "100002", "k6", "2022-11-04T17:10:15Z"}); data.add(new String[]{"k1", "true", "k2", "some string5", "k3", "8", "k4", "104", "k5", "100002", "k6", "2022-11-04T17:10:15"}); int id = 0; for (String[] d : data) { emitter.emit("id" + id++, Collections.singletonList(m(d)), new ParseContext()); } try (Connection connection = DriverManager.getConnection(connectionString)) { try (Statement st = connection.createStatement()) { try (ResultSet rs = st.executeQuery("select * from test")) { int rows = 0; while (rs.next()) { assertEquals("id" + rows, rs.getString(1)); assertEquals(rows % 2 == 0, rs.getBoolean(2)); assertEquals("some string" + (rows + 1), rs.getString(3)); assertEquals(rows + 4, rs.getInt(4)); assertEquals(100 + rows, rs.getLong(5)); if (rows > 2) { assertEquals(100002, rs.getLong(6)); Timestamp timestamp = rs.getTimestamp(7); String str = timestamp.toInstant().atZone(ZoneId.of("UTC")).toString(); //TODO fix this to work in other timezones assertTrue(str.startsWith("2022-11")); } rows++; } } } } }
public int solePartition() { int candidateResult = bitSet.nextSetBit(0); if (bitSet.nextSetBit(candidateResult + 1) < 0) { return candidateResult; } else { return -1; } }
@Test public void test_solePartition() { assertEquals(0, createWithPartitionCount(10, 0).solePartition()); assertEquals(1, createWithPartitionCount(10, 1).solePartition()); assertEquals(9, createWithPartitionCount(10, 9).solePartition()); assertEquals(-1, createWithPartitionCount(10, 0, 1).solePartition()); assertEquals(-1, createWithPartitionCount(10, 0, 9).solePartition()); }
public User userStatus(Integer userStatus) { this.userStatus = userStatus; return this; }
@Test public void userStatusTest() { // TODO: test userStatus }
@SuppressWarnings("checkstyle:HiddenField") public AwsCredentialsProvider credentialsProvider( String accessKeyId, String secretAccessKey, String sessionToken) { if (!Strings.isNullOrEmpty(accessKeyId) && !Strings.isNullOrEmpty(secretAccessKey)) { if (Strings.isNullOrEmpty(sessionToken)) { return StaticCredentialsProvider.create( AwsBasicCredentials.create(accessKeyId, secretAccessKey)); } else { return StaticCredentialsProvider.create( AwsSessionCredentials.create(accessKeyId, secretAccessKey, sessionToken)); } } if (!Strings.isNullOrEmpty(this.clientCredentialsProvider)) { return credentialsProvider(this.clientCredentialsProvider); } // Create a new credential provider for each client return DefaultCredentialsProvider.builder().build(); }
@Test public void testDefaultCredentialsConfiguration() { AwsClientProperties awsClientProperties = new AwsClientProperties(); AwsCredentialsProvider credentialsProvider = awsClientProperties.credentialsProvider(null, null, null); assertThat(credentialsProvider) .as("Should use default credentials if nothing is set") .isInstanceOf(DefaultCredentialsProvider.class); }
public Map<String, Object> getKsqlStreamConfigProps(final String applicationId) { final Map<String, Object> map = new HashMap<>(getKsqlStreamConfigProps()); map.put( MetricCollectors.RESOURCE_LABEL_PREFIX + StreamsConfig.APPLICATION_ID_CONFIG, applicationId ); // Streams client metrics aren't used in Confluent deployment possiblyConfigureConfluentTelemetry(map); return Collections.unmodifiableMap(map); }
@Test public void shouldSetStreamsConfigConsumerKsqlPrefixedProperties() { final KsqlConfig ksqlConfig = new KsqlConfig( Collections.singletonMap( KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.CONSUMER_PREFIX + ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "100")); assertThat(ksqlConfig.getKsqlStreamConfigProps() .get(StreamsConfig.CONSUMER_PREFIX + ConsumerConfig.FETCH_MIN_BYTES_CONFIG), equalTo(100)); assertThat(ksqlConfig.getKsqlStreamConfigProps() .get(ConsumerConfig.FETCH_MIN_BYTES_CONFIG), is(nullValue())); assertThat(ksqlConfig.getKsqlStreamConfigProps() .get(KsqlConfig.KSQL_STREAMS_PREFIX + StreamsConfig.CONSUMER_PREFIX + ConsumerConfig.FETCH_MIN_BYTES_CONFIG), is(nullValue())); }
public void write( ByteBuffer record, TieredStorageSubpartitionId subpartitionId, Buffer.DataType dataType, boolean isBroadcast) throws IOException { if (isBroadcast && !isBroadcastOnly) { int currentPosition = record.position(); for (int i = 0; i < numSubpartitions; ++i) { // As the tiered storage subpartition ID is created only for broadcast records, // which are fewer than normal records, the performance impact of generating new // TieredStorageSubpartitionId objects is expected to be manageable. If the // performance is significantly affected, this logic will be optimized accordingly. bufferAccumulator.receive( record, new TieredStorageSubpartitionId(i), dataType, isBroadcast); record.position(currentPosition); } } else { bufferAccumulator.receive(record, subpartitionId, dataType, isBroadcast); } }
@TestTemplate void testWriteRecordsToEmptyStorageTiers() { int numSubpartitions = 10; int bufferSize = 1024; Random random = new Random(); TieredStorageProducerClient tieredStorageProducerClient = createTieredStorageProducerClient(numSubpartitions, Collections.emptyList()); assertThatThrownBy( () -> tieredStorageProducerClient.write( generateRandomData(bufferSize, random), new TieredStorageSubpartitionId(0), Buffer.DataType.DATA_BUFFER, isBroadcast)) .isInstanceOf(RuntimeException.class) .hasMessageContaining("Failed to choose a storage tier"); }
public static String simpleUUID() { return toString(UUID.randomUUID(), true); }
@Test public void simpleUUIDTest() { String simpleUUID = IdUtil.simpleUUID(); Assert.assertNotNull(simpleUUID); Assert.assertFalse(simpleUUID.contains("-")); }
@Override public int read() { return (mPosition < mLimit) ? (mData[mPosition++] & 0xff) : -1; }
@Test void testWrongOffset() { Assertions.assertThrows(IndexOutOfBoundsException.class, () -> { UnsafeByteArrayInputStream stream = new UnsafeByteArrayInputStream("abc".getBytes()); stream.read(new byte[1], -1, 1); }); }
IdBatchAndWaitTime newIdBaseLocal(int batchSize) { return newIdBaseLocal(Clock.currentTimeMillis(), getNodeId(), batchSize); }
@Test public void test_negativeId() { long id = gen.newIdBaseLocal(DEFAULT_EPOCH_START - 1, 1234, 10).idBatch.base(); assertEquals((-1 << DEFAULT_BITS_SEQUENCE + DEFAULT_BITS_NODE_ID) + 1234, id); }
@Nullable public static String decrypt(String cipherText, String encryptionKey, String salt) { try { return tryDecrypt(cipherText, encryptionKey, salt); } catch (Exception ex) { LOG.error("Could not decrypt (legacy) value.", ex); return null; } }
@Test public void testDecryptStaticPKCS5PaddedCipherText() { // The cipherText was encrypted using an AES/CBC/PKCS5Padding transformation. // If this test fails, we changed the transformation. If the change was intentional, this test must // be updated, and we need to create a migration to re-encrypt all existing secrets in the database. // Otherwise, existing secrets cannot be decrypted anymore! final String cipherText = "f0b3e951a4b4537e1466a9cd9621eabb"; final String salt = "612ac41505dc0120"; final String decrypt = AESTools.decrypt(cipherText, "1234567890123456", salt); Assert.assertEquals("I am secret", decrypt); }
public static Function.FunctionMetaData changeFunctionInstanceStatus(Function.FunctionMetaData functionMetaData, Integer instanceId, boolean start) { Function.FunctionMetaData.Builder builder = functionMetaData.toBuilder() .setVersion(functionMetaData.getVersion() + 1); if (builder.getInstanceStatesMap() == null || builder.getInstanceStatesMap().isEmpty()) { for (int i = 0; i < functionMetaData.getFunctionDetails().getParallelism(); ++i) { builder.putInstanceStates(i, Function.FunctionState.RUNNING); } } Function.FunctionState state = start ? Function.FunctionState.RUNNING : Function.FunctionState.STOPPED; if (instanceId < 0) { for (int i = 0; i < functionMetaData.getFunctionDetails().getParallelism(); ++i) { builder.putInstanceStates(i, state); } } else if (instanceId < builder.getFunctionDetails().getParallelism()) { builder.putInstanceStates(instanceId, state); } return builder.build(); }
@Test public void testChangeState() { long version = 5; Function.FunctionMetaData metaData = Function.FunctionMetaData.newBuilder().setFunctionDetails( Function.FunctionDetails.newBuilder().setName("func-1").setParallelism(2)).setVersion(version).build(); Function.FunctionMetaData newMetaData = FunctionMetaDataUtils.changeFunctionInstanceStatus(metaData, 0, false); Assert.assertTrue(newMetaData.getInstanceStatesMap() != null); Assert.assertEquals(newMetaData.getInstanceStatesMap().size(), 2); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(0), Function.FunctionState.STOPPED); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(1), Function.FunctionState.RUNNING); Assert.assertEquals(newMetaData.getVersion(), version + 1); // Nothing should happen newMetaData = FunctionMetaDataUtils.changeFunctionInstanceStatus(newMetaData, 3, false); Assert.assertTrue(newMetaData.getInstanceStatesMap() != null); Assert.assertEquals(newMetaData.getInstanceStatesMap().size(), 2); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(0), Function.FunctionState.STOPPED); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(1), Function.FunctionState.RUNNING); Assert.assertEquals(newMetaData.getVersion(), version + 2); // Change one more newMetaData = FunctionMetaDataUtils.changeFunctionInstanceStatus(newMetaData, 1, false); Assert.assertTrue(newMetaData.getInstanceStatesMap() != null); Assert.assertEquals(newMetaData.getInstanceStatesMap().size(), 2); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(0), Function.FunctionState.STOPPED); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(1), Function.FunctionState.STOPPED); Assert.assertEquals(newMetaData.getVersion(), version + 3); // Change all more newMetaData = FunctionMetaDataUtils.changeFunctionInstanceStatus(newMetaData, -1, true); Assert.assertTrue(newMetaData.getInstanceStatesMap() != null); Assert.assertEquals(newMetaData.getInstanceStatesMap().size(), 2); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(0), Function.FunctionState.RUNNING); Assert.assertEquals(newMetaData.getInstanceStatesMap().get(1), Function.FunctionState.RUNNING); Assert.assertEquals(newMetaData.getVersion(), version + 4); }
@SuppressWarnings("unchecked") @Override public IterableOfProtosSubject<M> valuesForKey(@Nullable Object key) { return check("valuesForKey(%s)", key) .about(protos()) .that(((Multimap<Object, M>) actual).get(key)); }
@Test public void testFluent_valuesForKey() { expectThat(multimapOf(1, message1, 1, message2, 2, message1)) .valuesForKey(1) .ignoringFields(ignoreFieldNumber) .containsExactly(eqIgnoredMessage2, eqIgnoredMessage1); expectThat(multimapOf(1, message1, 1, message2, 2, message1)) .valuesForKey(2) .ignoringRepeatedFieldOrder() .containsExactly(eqRepeatedMessage1); expectFailureWhenTesting() .that(multimapOf(1, message1, 1, message2, 2, message1)) .valuesForKey(1) .ignoringFields(ignoreFieldNumber) .containsExactly(eqRepeatedMessage1, eqRepeatedMessage2); expectThatFailure().isNotNull(); }
public static String getKey(String dataId, String group) { StringBuilder sb = new StringBuilder(); urlEncode(dataId, sb); sb.append('+'); urlEncode(group, sb); return sb.toString(); }
@Test void testGetKeyByTwoParams() { // Act final String actual = GroupKey2.getKey("3", "'"); // Assert result assertEquals("3+'", actual); }
@ProtoFactory public static MediaType fromString(String tree) { if (tree == null || tree.isEmpty()) throw CONTAINER.missingMediaType(); Matcher matcher = TREE_PATTERN.matcher(tree); return parseSingleMediaType(tree, matcher, false); }
@Test(expected = EncodingException.class) public void testParsingWhitespaceInSubtype() { MediaType.fromString("application/ json"); }
@Override public void decrement(@Nonnull UUID txnId) { decrement0(txnId, true); }
@Test public void null_uuid_does_not_decrement_counter() { long valueBefore = nodeWideUsedCapacityCounter.currentValue(); counter.decrement(TxnReservedCapacityCounter.NULL_UUID); long valueAfter = nodeWideUsedCapacityCounter.currentValue(); assertEquals(valueBefore, valueAfter); }
public String convert(ILoggingEvent event) { StringBuilder sb = new StringBuilder(); int pri = facility + LevelToSyslogSeverity.convert(event); sb.append("<"); sb.append(pri); sb.append(">"); sb.append(computeTimeStampString(event.getTimeStamp())); sb.append(' '); sb.append(localHostName); sb.append(' '); return sb.toString(); }
@Test public void datesGreaterThanTen() { LoggingEvent le = createLoggingEvent(); calendar.set(2012, Calendar.OCTOBER, 11, 22, 14, 15); le.setTimeStamp(calendar.getTimeInMillis()); assertEquals("<191>Oct 11 22:14:15 " + HOSTNAME + " ", converter.convert(le)); }
public static TObjectDependencyRes listObjectDependencies(TObjectDependencyReq req) { TAuthInfo auth = req.getAuth_info(); TObjectDependencyRes response = new TObjectDependencyRes(); UserIdentity currentUser; if (auth.isSetCurrent_user_ident()) { currentUser = UserIdentity.fromThrift(auth.getCurrent_user_ident()); } else { currentUser = UserIdentity.createAnalyzedUserIdentWithIp(auth.getUser(), auth.getUser_ip()); } // list dependencies of mv Locker locker = new Locker(); Collection<Database> dbs = GlobalStateMgr.getCurrentState().getLocalMetastore().getFullNameToDb().values(); for (Database db : CollectionUtils.emptyIfNull(dbs)) { String catalog = Optional.ofNullable(db.getCatalogName()) .orElse(InternalCatalog.DEFAULT_INTERNAL_CATALOG_NAME); locker.lockDatabase(db, LockType.READ); try { for (Table table : db.getTables()) { // If it is not a materialized view, we do not need to verify permissions if (!table.isMaterializedView()) { continue; } // Only show tables with privilege try { Authorizer.checkAnyActionOnTableLikeObject(currentUser, null, db.getFullName(), table); } catch (AccessDeniedException e) { continue; } MaterializedView mv = (MaterializedView) table; for (BaseTableInfo refObj : CollectionUtils.emptyIfNull(mv.getBaseTableInfos())) { TObjectDependencyItem item = new TObjectDependencyItem(); item.setObject_id(mv.getId()); item.setObject_name(mv.getName()); item.setDatabase(db.getFullName()); item.setCatalog(catalog); item.setObject_type(mv.getType().toString()); item.setRef_object_id(refObj.getTableId()); item.setRef_database(refObj.getDbName()); item.setRef_catalog(refObj.getCatalogName()); Optional<Table> refTable = MvUtils.getTableWithIdentifier(refObj); item.setRef_object_type(getRefObjectType(refTable, mv.getName())); // If the ref table is dropped/swapped/renamed, the actual info would be inconsistent with // BaseTableInfo, so we use the source-of-truth information if (refTable.isEmpty()) { item.setRef_object_name(refObj.getTableName()); } else { item.setRef_object_name(refTable.get().getName()); } response.addToItems(item); } } } finally { locker.unLockDatabase(db, LockType.READ); } } return response; }
@Test public void testUnknownCatalogObjectDependencies() throws Exception { String mvName = "test.iceberg_mv"; starRocksAssert.withMaterializedView("create materialized view " + mvName + " " + "partition by str2date(d,'%Y-%m-%d') " + "distributed by hash(a) " + "REFRESH DEFERRED MANUAL " + "PROPERTIES (\n" + "'replication_num' = '1'" + ") " + "as select a, b, d, bitmap_union(to_bitmap(t1.c))" + " from iceberg0.partitioned_db.part_tbl1 as t1 " + " group by a, b, d;"); String grantSql1 = "GRANT ALL ON MATERIALIZED VIEW test.iceberg_mv TO USER `test_mv`@`%`;"; DDLStmtExecutor.execute(UtFrameUtils.parseStmtWithNewParser(grantSql1, connectContext), connectContext); TObjectDependencyReq dependencyReq = buildRequest(); TObjectDependencyRes objectDependencyRes = SysObjectDependencies.listObjectDependencies(dependencyReq); Assert.assertTrue(objectDependencyRes.getItems().stream().anyMatch(x -> x.getRef_object_type().equals("ICEBERG"))); }
@Override public Map<K, V> getCachedMap() { return localCacheView.getCachedMap(); }
@Test public void testGetStoringCacheMiss() { RLocalCachedMap<String, Integer> map = redisson.getLocalCachedMap(LocalCachedMapOptions.<String, Integer>name("test").storeCacheMiss(true)); Map<String, Integer> cache = map.getCachedMap(); assertThat(map.get("19")).isNull(); Awaitility.await().atMost(Durations.ONE_SECOND) .untilAsserted(() -> assertThat(cache.size()).isEqualTo(1)); }
@InvokeOnHeader(Web3jConstants.ETH_GET_TRANSACTION_BY_BLOCK_NUMBER_AND_INDEX) void ethGetTransactionByBlockNumberAndIndex(Message message) throws IOException { DefaultBlockParameter atBlock = toDefaultBlockParameter(message.getHeader(Web3jConstants.AT_BLOCK, configuration::getAtBlock, String.class)); BigInteger transactionIndex = message.getHeader(Web3jConstants.INDEX, configuration::getIndex, BigInteger.class); Request<?, EthTransaction> request = web3j.ethGetTransactionByBlockNumberAndIndex(atBlock, transactionIndex); setRequestId(message, request); EthTransaction response = request.send(); boolean hasError = checkForError(message, response); if (!hasError) { message.setBody(response.getTransaction().isPresent() ? response.getTransaction().get() : null); } }
@Test public void ethGetTransactionByBlockNumberAndIndexTest() throws Exception { EthTransaction response = Mockito.mock(EthTransaction.class); Mockito.when(mockWeb3j.ethGetTransactionByBlockNumberAndIndex(any(), any())).thenReturn(request); Mockito.when(request.send()).thenReturn(response); Transaction transaction = Mockito.mock(Transaction.class); Optional<Transaction> optional = Optional.ofNullable(transaction); Mockito.when(response.getTransaction()).thenReturn(optional); Exchange exchange = createExchangeWithBodyAndHeader(null, OPERATION, Web3jConstants.ETH_GET_TRANSACTION_BY_BLOCK_NUMBER_AND_INDEX); template.send(exchange); Transaction body = exchange.getIn().getBody(Transaction.class); assertNotNull(body); }
public FEELFnResult<TemporalAmount> invoke(@ParameterName( "from" ) String val) { if ( val == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } try { // try to parse as days/hours/minute/seconds return FEELFnResult.ofResult( Duration.parse( val ) ); } catch( DateTimeParseException e ) { // if it failed, try to parse as years/months try { return FEELFnResult.ofResult(ComparablePeriod.parse(val).normalized()); } catch( DateTimeParseException e2 ) { // failed to parse, so return null according to the spec return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "date-parsing exception", new RuntimeException(new Throwable() { public final List<Throwable> causes = Arrays.asList( new Throwable[]{e, e2} ); } ))); } } }
@Test void invokeParamStringDuration() { FunctionTestUtil.assertResult(durationFunction.invoke("P2D"), Duration.of(2, ChronoUnit.DAYS)); FunctionTestUtil.assertResult(durationFunction.invoke("P2DT3H"), Duration.of(2, ChronoUnit.DAYS).plusHours(3)); FunctionTestUtil.assertResult( durationFunction.invoke("P2DT3H28M"), Duration.of(2, ChronoUnit.DAYS).plusHours(3).plusMinutes(28)); FunctionTestUtil.assertResult( durationFunction.invoke("P2DT3H28M15S"), Duration.of(2, ChronoUnit.DAYS).plusHours(3).plusMinutes(28).plusSeconds(15)); }
@Override public Map<String, String> loadGlobalSettings() { return load(null); }
@Test public void loadGlobalSettings() throws IOException { WsResponse response = mock(WsResponse.class); PipedOutputStream out = new PipedOutputStream(); PipedInputStream in = new PipedInputStream(out); Settings.ValuesWsResponse.newBuilder() .addSettings(Settings.Setting.newBuilder() .setKey("abc").setValue("def") .build()) .addSettings(Settings.Setting.newBuilder() .setKey("123").setValue("456") .build()) .build() .writeTo(out); out.close(); when(response.contentStream()).thenReturn(in); when(wsClient.call(any())).thenReturn(response); Map<String, String> result = underTest.loadGlobalSettings(); ArgumentCaptor<GetRequest> argumentCaptor = ArgumentCaptor.forClass(GetRequest.class); verify(wsClient, times(1)).call(argumentCaptor.capture()); assertThat(argumentCaptor.getValue().getPath()).isEqualTo("api/settings/values.protobuf"); assertThat(result) .isNotNull() .hasSize(2) .containsEntry("abc", "def") .containsEntry("123", "456"); }
@Override public FieldList addFields(int... fieldIDs) { if (fieldIDs == null || fieldIDs.length == 0) { return this; } if (size() == 0) { return new FieldList(fieldIDs); } else { ArrayList<Integer> list = new ArrayList<Integer>(size() + fieldIDs.length); list.addAll(this.collection); for (int i = 0; i < fieldIDs.length; i++) { list.add(fieldIDs[i]); } return new FieldList(Collections.unmodifiableList(list)); } }
@Test void testAddSetToList() { check(new FieldList().addFields(new FieldSet(1)).addFields(2), 1, 2); check(new FieldList().addFields(1).addFields(new FieldSet(2)), 1, 2); check(new FieldList().addFields(new FieldSet(2)), 2); }
private PDStructureTreeRoot getStructureTreeRoot() { PDStructureNode parent = this.getParent(); while (parent instanceof PDStructureElement) { parent = ((PDStructureElement) parent).getParent(); } if (parent instanceof PDStructureTreeRoot) { return (PDStructureTreeRoot) parent; } return null; }
@Test void testPDFBox4197() throws IOException { Set<Revisions<PDAttributeObject>> attributeSet = new HashSet<>(); Set<String> classSet = new HashSet<>(); try (PDDocument doc = Loader.loadPDF(new File(TARGETPDFDIR, "PDFBOX-4197.pdf"))) { PDStructureTreeRoot structureTreeRoot = doc.getDocumentCatalog().getStructureTreeRoot(); checkElement(structureTreeRoot.getK(), attributeSet, structureTreeRoot.getClassMap(), classSet); } // collect attributes and check their count. assertEquals(117, attributeSet.size()); int cnt = attributeSet.stream().map(attributes -> attributes.size()).reduce(0, Integer::sum); assertEquals(111, cnt); // this one was 105 before PDFBOX-4197 was fixed assertEquals(0, classSet.size()); }
public <T> T convert(String property, Class<T> targetClass) { final AbstractPropertyConverter<?> converter = converterRegistry.get(targetClass); if (converter == null) { throw new MissingFormatArgumentException("converter not found, can't convert from String to " + targetClass.getCanonicalName()); } return (T) converter.convert(property); }
@Test void testConvertNotSupportType() { assertThrows(MissingFormatArgumentException.class, () -> { compositeConverter.convert("test", CompositeConverter.class); }); }
@Override public byte[] toBitSet() { int offset = (value >>> 3); byte[] array = new byte[offset + 1]; int lastBitOffset = value > 8 ? value % 8 : value; // Need to use logical right shift to ensure other bits aren't set array[offset] = (byte) (0x80 >>> (7 - lastBitOffset)); return array; }
@Test public void testToBitSet() { testToArray(0); testToArray(12); testToArray(16); testToArray(43); testToArray(5); }
public static boolean canDrop( FilterPredicate pred, List<ColumnChunkMetaData> columns, DictionaryPageReadStore dictionaries) { Objects.requireNonNull(pred, "pred cannnot be null"); Objects.requireNonNull(columns, "columns cannnot be null"); return pred.accept(new DictionaryFilter(columns, dictionaries)); }
@Test public void testEqMissingColumn() throws Exception { BinaryColumn b = binaryColumn("missing_column"); assertTrue( "Should drop block for non-null query", canDrop(eq(b, Binary.fromString("any")), ccmd, dictionaries)); assertFalse("Should not drop block null query", canDrop(eq(b, null), ccmd, dictionaries)); }
public List<ProductPagingSimpleResponse> findAllWithPagingByCategoryId( final Long memberId, final Long productId, final Long categoryId, final int pageSize ) { QProduct product = QProduct.product; QProductImage productImage = QProductImage.productImage; QMember member = QMember.member; QProductLike productLike = QProductLike.productLike; return jpaQueryFactory.select(Projections.constructor(ProductPagingSimpleResponse.class, product.id, new CaseBuilder() .when(productImage.id.isNull()) .then(NOT_FOUND_IMAGE_NUMBER) .otherwise(productImage.id).as("imageId"), new CaseBuilder() .when(productImage.uniqueName.isNull()) .then("null") .otherwise( stringTemplate("CONCAT('https://atwozimage.s3.ap-northeast-2.amazonaws.com/', {0})", productImage.uniqueName) ).as("uniqueName"), product.description.location, product.description.title, product.price.price, product.statisticCount.visitedCount, product.statisticCount.contactCount, product.productStatus, member.id, member.nickname, product.statisticCount.likedCount, isLikedAlreadyByMe(memberId), product.createdAt )) .from(product) .leftJoin(member).on(product.memberId.eq(member.id)) .leftJoin(productLike).on(productLike.productId.eq(product.id).and(productLike.memberId.eq(memberId))) .leftJoin(productImage).on(productImage.product.id.eq(product.id)) .where( ltProductId(productId), categoryId != null ? product.categoryId.eq(categoryId) : product.categoryId.isNull() ) .orderBy(product.id.desc()) .limit(pageSize) .fetch(); }
@Test void no_offset_페이징_두번째_조회() { // given for (long i = 1L; i <= 20L; i++) { productRepository.save(Product.builder() .id(i) .categoryId(1L) .memberId(1L) .description(new Description("title", "content", Location.BUILDING_CENTER)) .statisticCount(StatisticCount.createDefault()) .price(new Price(10000)) .productStatus(ProductStatus.WAITING) .build() ); } // when List<ProductPagingSimpleResponse> result = productQueryRepository.findAllWithPagingByCategoryId(1L, 11L, 1L, 10); // then assertSoftly(softly -> { softly.assertThat(result).hasSize(10); softly.assertThat(result.get(0).id()).isEqualTo(10L); softly.assertThat(result.get(9).id()).isEqualTo(1L); }); }
@Udf(description = "Returns the hyperbolic tangent of an INT value") public Double tanh( @UdfParameter( value = "value", description = "The value in radians to get the hyperbolic tangent of." ) final Integer value ) { return tanh(value == null ? null : value.doubleValue()); }
@Test public void shouldHandleNull() { assertThat(udf.tanh((Integer) null), is(nullValue())); assertThat(udf.tanh((Long) null), is(nullValue())); assertThat(udf.tanh((Double) null), is(nullValue())); }
public final static Comparator<NodeModel> comparator() { return new Comparator<NodeModel>() { private final HashMap<NodeModel, NodeAbsolutePath> paths = new HashMap<>(); @Override public int compare(NodeModel o1, NodeModel o2) { if(o1 == o2) return 0; if(o1 == null) return -1; if(o2 == null) return 1; final NodeAbsolutePath absoluteBeginPath = getPath(o1); final NodeAbsolutePath absoluteEndPath = getPath(o2); return new NodeRelativePath(absoluteBeginPath, absoluteEndPath).compareNodePositions(); } public NodeAbsolutePath getPath(NodeModel node) { NodeAbsolutePath path = paths.get(node); if(path == null) { path = new NodeAbsolutePath(node); paths.put(node, path); } else path.reset(); return path; } }; }
@Test public void compareNodesWithSameParentUsingComparator(){ final NodeModel parent = root(); final NodeModel node1 = new NodeModel("node1", map); parent.insert(node1); final NodeModel node2 = new NodeModel("node2", map); parent.insert(node2); final int compared = NodeRelativePath.comparator().compare(node2, node1); assertTrue(compared > 0); }
@Override public boolean remove(Object value) { return get(removeAsync((V) value)); }
@Test public void testRemove() throws InterruptedException, ExecutionException { RSetCache<Integer> set = redisson.getSetCache("simple"); set.add(1, 1, TimeUnit.SECONDS); set.add(3, 2, TimeUnit.SECONDS); set.add(7, 3, TimeUnit.SECONDS); Assertions.assertTrue(set.remove(1)); Assertions.assertFalse(set.contains(1)); assertThat(set).containsOnly(3, 7); Assertions.assertFalse(set.remove(1)); assertThat(set).containsOnly(3, 7); Assertions.assertTrue(set.remove(3)); Assertions.assertFalse(set.contains(3)); assertThat(set).containsOnly(7); Assertions.assertEquals(1, set.size()); set.destroy(); }
@GetMapping("/readiness") public ResponseEntity<String> readiness(HttpServletRequest request) { ReadinessResult result = ModuleHealthCheckerHolder.getInstance().checkReadiness(); if (result.isSuccess()) { return ResponseEntity.ok().body("OK"); } return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(result.getResultMessage()); }
@Test void testReadinessNamingFailure() { // Naming is not in readiness Mockito.when(configInfoPersistService.configInfoCount(any(String.class))).thenReturn(0); Mockito.when(serverStatusManager.getServerStatus()).thenThrow(new RuntimeException("HealthControllerTest.testReadiness")); ResponseEntity<String> response = healthController.readiness(null); assertEquals(500, response.getStatusCodeValue()); assertEquals("naming not in readiness", response.getBody()); }
int commit(final Collection<Task> tasksToCommit) { int committed = 0; final Set<TaskId> ids = tasksToCommit.stream() .map(Task::id) .collect(Collectors.toSet()); maybeLockTasks(ids); // We have to throw the first uncaught exception after locking the tasks, to not attempt to commit failure records. maybeThrowTaskExceptionsFromProcessingThreads(); final Map<Task, Map<TopicPartition, OffsetAndMetadata>> consumedOffsetsAndMetadataPerTask = new HashMap<>(); try { committed = commitTasksAndMaybeUpdateCommittableOffsets(tasksToCommit, consumedOffsetsAndMetadataPerTask); } catch (final TimeoutException timeoutException) { consumedOffsetsAndMetadataPerTask .keySet() .forEach(t -> t.maybeInitTaskTimeoutOrThrow(time.milliseconds(), timeoutException)); } maybeUnlockTasks(ids); return committed; }
@SuppressWarnings("unchecked") @Test public void shouldNotFailForTimeoutExceptionOnConsumerCommit() { final StateMachineTask task00 = new StateMachineTask(taskId00, taskId00Partitions, true, stateManager); final StateMachineTask task01 = new StateMachineTask(taskId01, taskId01Partitions, true, stateManager); task00.setCommittableOffsetsAndMetadata(taskId00Partitions.stream().collect(Collectors.toMap(p -> p, p -> new OffsetAndMetadata(0)))); task01.setCommittableOffsetsAndMetadata(taskId00Partitions.stream().collect(Collectors.toMap(p -> p, p -> new OffsetAndMetadata(0)))); doThrow(new TimeoutException("KABOOM!")).doNothing().when(consumer).commitSync(any(Map.class)); task00.setCommitNeeded(); assertThat(taskManager.commit(mkSet(task00, task01)), equalTo(0)); assertThat(task00.timeout, equalTo(time.milliseconds())); assertNull(task01.timeout); assertThat(taskManager.commit(mkSet(task00, task01)), equalTo(1)); assertNull(task00.timeout); assertNull(task01.timeout); verify(consumer, times(2)).commitSync(any(Map.class)); }
public static String byteBufferToString(ByteBuffer buf) { StringBuilder sb = new StringBuilder(); for (int k = 0; k < buf.limit() / 4; k++) { if (k != 0) { sb.append(" "); } sb.append(buf.getInt()); } return sb.toString(); }
@Test public void byteBufferToString() { class TestCase { String mExpected; ByteBuffer mInput; public TestCase(String expected, ByteBuffer input) { mExpected = expected; mInput = input; } } List<TestCase> testCases = new ArrayList<>(); testCases.add(new TestCase("", ByteBuffer.wrap(new byte[] {}))); testCases.add(new TestCase("", ByteBuffer.wrap(new byte[] {0}))); testCases.add(new TestCase("", ByteBuffer.wrap(new byte[] {0, 0}))); testCases.add(new TestCase("", ByteBuffer.wrap(new byte[] {0, 0, 0}))); testCases.add(new TestCase("1", ByteBuffer.wrap(new byte[] {0, 0, 0, 1}))); testCases.add(new TestCase("1", ByteBuffer.wrap(new byte[] {0, 0, 0, 1, 0}))); testCases.add(new TestCase("1", ByteBuffer.wrap(new byte[] {0, 0, 0, 1, 0, 0}))); testCases.add(new TestCase("1", ByteBuffer.wrap(new byte[] {0, 0, 0, 1, 0, 0, 0}))); testCases.add(new TestCase("1 2", ByteBuffer.wrap(new byte[] {0, 0, 0, 1, 0, 0, 0, 2}))); for (TestCase testCase : testCases) { assertEquals(testCase.mExpected, FormatUtils.byteBufferToString(testCase.mInput)); } }
public ClientTelemetrySender telemetrySender() { return clientTelemetrySender; }
@Test public void testHandleResponsePushTelemetryErrorResponse() { ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // unknown subscription id PushTelemetryResponse response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.UNKNOWN_SUBSCRIPTION_ID.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(0, telemetrySender.intervalMs()); assertTrue(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // unsupported compression type response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.UNSUPPORTED_COMPRESSION_TYPE.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(0, telemetrySender.intervalMs()); assertTrue(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // telemetry too large response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.TELEMETRY_TOO_LARGE.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(20000, telemetrySender.intervalMs()); assertTrue(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // throttling quota exceeded response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.THROTTLING_QUOTA_EXCEEDED.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(20000, telemetrySender.intervalMs()); assertTrue(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // invalid request error response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.INVALID_REQUEST.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); assertFalse(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // unsupported version error telemetrySender.enabled(true); response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.UNSUPPORTED_VERSION.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); assertFalse(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // invalid record telemetrySender.enabled(true); response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.INVALID_RECORD.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); assertFalse(telemetrySender.enabled()); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_IN_PROGRESS)); // unknown error telemetrySender.enabled(true); response = new PushTelemetryResponse( new PushTelemetryResponseData().setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code())); telemetrySender.handleResponse(response); assertEquals(ClientTelemetryState.SUBSCRIPTION_NEEDED, telemetrySender.state()); assertEquals(Integer.MAX_VALUE, telemetrySender.intervalMs()); assertFalse(telemetrySender.enabled()); }
private Mono<ServerResponse> initialize(ServerRequest request) { return request.bodyToMono(SystemInitializationRequest.class) .switchIfEmpty( Mono.error(new ServerWebInputException("Request body must not be empty")) ) .doOnNext(requestBody -> { if (!ValidationUtils.validateName(requestBody.getUsername())) { throw new UnsatisfiedAttributeValueException( "The username does not meet the specifications", "problemDetail.user.username.unsatisfied", null); } if (StringUtils.isBlank(requestBody.getPassword())) { throw new UnsatisfiedAttributeValueException( "The password does not meet the specifications", "problemDetail.user.password.unsatisfied", null); } }) .flatMap(requestBody -> initializationStateSupplier.userInitialized() .flatMap(result -> { if (result) { return Mono.error(new ResponseStatusException(HttpStatus.CONFLICT, "System has been initialized")); } return initializeSystem(requestBody); }) ) .then(ServerResponse.created(URI.create("/console")).build()); }
@Test void initializeWithRequestBody() { var initialization = new SystemInitializationRequest(); initialization.setUsername("faker"); initialization.setPassword("openfaker"); initialization.setEmail("faker@halo.run"); initialization.setSiteTitle("Fake Site"); when(initializationStateGetter.userInitialized()).thenReturn(Mono.just(false)); when(superAdminInitializer.initialize(any(InitializationParam.class))) .thenReturn(Mono.empty()); var configMap = new ConfigMap(); when(client.get(ConfigMap.class, SystemSetting.SYSTEM_CONFIG)) .thenReturn(Mono.just(configMap)); when(client.update(configMap)).thenReturn(Mono.just(configMap)); webTestClient.post().uri("/system/initialize") .bodyValue(initialization) .exchange() .expectStatus().isCreated() .expectHeader().location("/console"); verify(initializationStateGetter).userInitialized(); verify(superAdminInitializer).initialize(any()); verify(client).get(ConfigMap.class, SystemSetting.SYSTEM_CONFIG); verify(client).update(configMap); }
protected void execute(JRaftServer server, final RpcContext asyncCtx, final Message message, final JRaftServer.RaftGroupTuple tuple) { FailoverClosure closure = new FailoverClosure() { Response data; Throwable ex; @Override public void setResponse(Response data) { this.data = data; } @Override public void setThrowable(Throwable throwable) { this.ex = throwable; } @Override public void run(Status status) { if (Objects.nonNull(ex)) { Loggers.RAFT.error("execute has error : ", ex); asyncCtx.sendResponse(Response.newBuilder().setErrMsg(ex.toString()).setSuccess(false).build()); } else { asyncCtx.sendResponse(data); } } }; server.applyOperation(tuple.getNode(), message, closure); }
@Test void testErrorThroughRpc() { final AtomicReference<Response> reference = new AtomicReference<>(); RpcContext context = new RpcContext() { @Override public void sendResponse(Object responseObj) { reference.set((Response) responseObj); } @Override public Connection getConnection() { return null; } @Override public String getRemoteAddress() { return null; } }; AbstractProcessor processor = new NacosWriteRequestProcessor(server, SerializeFactory.getDefault()); processor.execute(server, context, WriteRequest.newBuilder().build(), new JRaftServer.RaftGroupTuple()); Response response = reference.get(); assertNotNull(response); assertEquals("Error message transmission", response.getErrMsg()); assertFalse(response.getSuccess()); }
@Override public SfmSketchState createGroupedState() { return new GroupedSfmSketchState(); }
@Test public void testGroupedMemoryAccounting() { SfmSketchState state = factory.createGroupedState(); long emptySize = state.getEstimatedSize(); // Create three sketches: // - sketch1 has one 1-bit. // - sketch2 has one other 1-bit. // - sketch3 has many random bits. // On initial creation, they will all be of equal size. // However, due to the internals of BitSet.valueOf(), serializing and deserializing will drop trailing zeros in the sketch. // So we can create sketches of equal logical size but different physical size by round-tripping them. // The physical sizes (SfmSketch.getRetainedSizeInBytes()) observed by the author after this round-trip are: // - sketch1: 232 // - sketch2: 120 // - sketch3: 2144 SfmSketch sketch1 = SfmSketch.create(1024, 16); sketch1.add(1); sketch1 = SfmSketch.deserialize(sketch1.serialize()); SfmSketch sketch2 = SfmSketch.create(1024, 16); sketch2.add(0); sketch2 = SfmSketch.deserialize(sketch2.serialize()); SfmSketch sketch3 = SfmSketch.create(1024, 16); sketch3.enablePrivacy(0.1, new TestingSeededRandomizationStrategy(1)); sketch3 = SfmSketch.deserialize(sketch3.serialize()); // Set initial state to use sketch1, check memory estimate. // Memory usage should increase by the size of the new sketch. state.setSketch(sketch1); long memoryIncrease = state.getEstimatedSize() - emptySize; assertEquals(memoryIncrease, state.getSketch().getRetainedSizeInBytes()); // Merge in sketch2, and ensure memory estimate reflects the size of the merged sketch. // Memory usage should stay the same, as the merged sketch should be the same size as the initial sketch. state.mergeSketch(sketch2); memoryIncrease = state.getEstimatedSize() - emptySize - memoryIncrease; assertEquals(memoryIncrease, 0); assertEquals(state.getEstimatedSize() - emptySize, state.getSketch().getRetainedSizeInBytes()); // Merge in sketch3, and ensure memory estimate reflects the size of the merged sketch. // Memory usage should increase now to be at least as large as sketch3. // (The actual size may be larger than sketch3 due to the way BitSet expands.) state.mergeSketch(sketch3); memoryIncrease = state.getEstimatedSize() - emptySize - memoryIncrease; assertTrue(memoryIncrease >= 0); assertEquals(state.getEstimatedSize() - emptySize, state.getSketch().getRetainedSizeInBytes()); }
public ConfigData get(String path) { if (allowedPaths == null) { throw new IllegalStateException("The provider has not been configured yet."); } Map<String, String> data = new HashMap<>(); if (path == null || path.isEmpty()) { return new ConfigData(data); } Path filePath = allowedPaths.parseUntrustedPath(path); if (filePath == null) { log.warn("The path {} is not allowed to be accessed", path); return new ConfigData(data); } try (Reader reader = reader(filePath)) { Properties properties = new Properties(); properties.load(reader); Enumeration<Object> keys = properties.keys(); while (keys.hasMoreElements()) { String key = keys.nextElement().toString(); String value = properties.getProperty(key); if (value != null) { data.put(key, value); } } return new ConfigData(data); } catch (IOException e) { log.error("Could not read properties from file {}", path, e); throw new ConfigException("Could not read properties from file " + path); } }
@Test public void testEmptyPathWithKey() { ConfigData configData = configProvider.get(""); assertTrue(configData.data().isEmpty()); assertNull(configData.ttl()); }
public static Optional<Expression> convert( org.apache.flink.table.expressions.Expression flinkExpression) { if (!(flinkExpression instanceof CallExpression)) { return Optional.empty(); } CallExpression call = (CallExpression) flinkExpression; Operation op = FILTERS.get(call.getFunctionDefinition()); if (op != null) { switch (op) { case IS_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::isNull); case NOT_NULL: return onlyChildAs(call, FieldReferenceExpression.class) .map(FieldReferenceExpression::getName) .map(Expressions::notNull); case LT: return convertFieldAndLiteral(Expressions::lessThan, Expressions::greaterThan, call); case LT_EQ: return convertFieldAndLiteral( Expressions::lessThanOrEqual, Expressions::greaterThanOrEqual, call); case GT: return convertFieldAndLiteral(Expressions::greaterThan, Expressions::lessThan, call); case GT_EQ: return convertFieldAndLiteral( Expressions::greaterThanOrEqual, Expressions::lessThanOrEqual, call); case EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.isNaN(ref); } else { return Expressions.equal(ref, lit); } }, call); case NOT_EQ: return convertFieldAndLiteral( (ref, lit) -> { if (NaNUtil.isNaN(lit)) { return Expressions.notNaN(ref); } else { return Expressions.notEqual(ref, lit); } }, call); case NOT: return onlyChildAs(call, CallExpression.class) .flatMap(FlinkFilters::convert) .map(Expressions::not); case AND: return convertLogicExpression(Expressions::and, call); case OR: return convertLogicExpression(Expressions::or, call); case STARTS_WITH: return convertLike(call); } } return Optional.empty(); }
@Test public void testAnd() { Expression expr = resolve( Expressions.$("field1") .isEqual(Expressions.lit(1)) .and(Expressions.$("field2").isEqual(Expressions.lit(2L)))); Optional<org.apache.iceberg.expressions.Expression> actual = FlinkFilters.convert(expr); assertThat(actual).isPresent(); And and = (And) actual.get(); And expected = (And) org.apache.iceberg.expressions.Expressions.and( org.apache.iceberg.expressions.Expressions.equal("field1", 1), org.apache.iceberg.expressions.Expressions.equal("field2", 2L)); assertPredicatesMatch(expected.left(), and.left()); assertPredicatesMatch(expected.right(), and.right()); }
@Override public long currentStreamTimeMs() { throw new UnsupportedOperationException("There is no concept of stream-time for a global processor."); }
@Test public void shouldThrowOnCurrentStreamTime() { assertThrows(UnsupportedOperationException.class, () -> globalContext.currentStreamTimeMs()); }
@Override public boolean resolve(final Path file) { if(PreferencesFactory.get().getBoolean("path.symboliclink.resolve")) { // Follow links instead return false; } // Create symbolic link only if supported by the local file system if(feature != null) { final Path target = file.getSymlinkTarget(); // Only create symbolic link if target is included in the download for(TransferItem root : files) { if(this.findTarget(target, root.remote)) { if(log.isDebugEnabled()) { log.debug(String.format("Resolved target %s for %s", target, file)); } // Create symbolic link return true; } } } // Otherwise download target file return false; }
@Test public void testResolveRoot() { final ArrayList<TransferItem> files = new ArrayList<>(); files.add(new TransferItem(new Path("/a", EnumSet.of(Path.Type.directory)))); DownloadSymlinkResolver resolver = new DownloadSymlinkResolver(files); Path p = new Path("/b", EnumSet.of(Path.Type.file, AbstractPath.Type.symboliclink)); p.setSymlinkTarget(new Path("/a", EnumSet.of(Path.Type.directory))); assertTrue(resolver.resolve(p)); }
@Override void validateKeyPresent(final SourceName sinkName, final Projection projection) { if (getSchema().key().isEmpty()) { // No key column. return; } final List<Column> keys = getSchema().key(); for (final Column keyCol : keys) { final ColumnName keyName = keyCol.name(); if (!projection.containsExpression(new QualifiedColumnReferenceExp(getAlias(), keyName)) && !projection.containsExpression(new UnqualifiedColumnReferenceExp(keyName)) ) { throwKeysNotIncludedError( sinkName, "key column", keys.stream() .map(Column::name) .map(UnqualifiedColumnReferenceExp::new) .collect(Collectors.toList()) ); } } }
@Test public void shouldThrowIfProjectionDoesNotContainKeyColumns() { // Given: when(projection.containsExpression(any())).thenReturn(false); // When: final Exception e = assertThrows( KsqlException.class, () -> node.validateKeyPresent(SOURCE_NAME, projection) ); // Then: assertThat(e.getMessage(), containsString("The query used to build `datasource` must include " + "the key columns k0 and k1 in its projection (eg, SELECT k0, k1...).")); }
public BigInteger getWork() throws VerificationException { BigInteger target = getDifficultyTargetAsInteger(); return LARGEST_HASH.divide(target.add(BigInteger.ONE)); }
@Test public void testWork() { BigInteger work = TESTNET.getGenesisBlock().getWork(); double log2Work = Math.log(work.longValue()) / Math.log(2); // This number is printed by Bitcoin Core at startup as the calculated value of chainWork on testnet: // UpdateTip: new best=000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943 height=0 version=0x00000001 log2_work=32.000022 tx=1 date='2011-02-02 23:16:42' ... assertEquals(32.000022, log2Work, 0.0000001); }
public static String decapitalize(String name) { if ( name == null || name.length() == 0 ) { return name; } if ( name.length() > 1 && Character.isUpperCase( name.charAt( 1 ) ) && Character.isUpperCase( name.charAt( 0 ) ) ) { return name; } char[] chars = name.toCharArray(); chars[0] = Character.toLowerCase( chars[0] ); return new String( chars ); }
@Test public void testDecapitalize() { assertThat( IntrospectorUtils.decapitalize( null ) ).isNull(); assertThat( IntrospectorUtils.decapitalize( "" ) ).isEqualTo( "" ); assertThat( IntrospectorUtils.decapitalize( "URL" ) ).isEqualTo( "URL" ); assertThat( IntrospectorUtils.decapitalize( "FooBar" ) ).isEqualTo( "fooBar" ); assertThat( IntrospectorUtils.decapitalize( "PArtialCapitalized" ) ).isEqualTo( "PArtialCapitalized" ); assertThat( IntrospectorUtils.decapitalize( "notCapitalized" ) ).isEqualTo( "notCapitalized" ); assertThat( IntrospectorUtils.decapitalize( "a" ) ).isEqualTo( "a" ); }
@Override public DescriptiveUrl toDownloadUrl(final Path file, final Sharee sharee, final Void options, final PasswordCallback callback) throws BackgroundException { try { final Host bookmark = session.getHost(); final CreateFileShareRequest request = new CreateFileShareRequest() .fileId(fileid.getFileId(file)); request.setPassword(callback.prompt(bookmark, LocaleFactory.localizedString("Passphrase", "Cryptomator"), MessageFormat.format(LocaleFactory.localizedString("Create a passphrase required to access {0}", "Credentials"), file.getName()), new LoginOptions().anonymous(true).keychain(false).icon(bookmark.getProtocol().disk())).getPassword()); return new DescriptiveUrl(URI.create( new FileSharesApi(session.getClient()).fileSharesPost_0(request).getUrl()), DescriptiveUrl.Type.signed); } catch(ApiException e) { throw new StoregateExceptionMappingService(fileid).map(e); } }
@Test public void toDownloadUrl() throws Exception { final StoregateIdProvider nodeid = new StoregateIdProvider(session); final Path room = new StoregateDirectoryFeature(session, nodeid).mkdir( new Path(String.format("/My files/%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); final Path test = new StoregateTouchFeature(session, nodeid).touch( new Path(room, String.format("%s", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)), new TransferStatus()); assertNotNull(new StoregateShareFeature(session, nodeid).toDownloadUrl(test, Share.Sharee.world, null, new DisabledPasswordCallback()).getUrl()); new StoregateDeleteFeature(session, nodeid).delete(Collections.singletonList(room), new DisabledPasswordCallback(), new Delete.DisabledCallback()); }
public static String getTableRowPath(final String databaseName, final String schemaName, final String table, final String uniqueKey) { return String.join("/", getTablePath(databaseName, schemaName, table), uniqueKey); }
@Test void assertGetTableRowPath() { assertThat(ShardingSphereDataNode.getTableRowPath("db_name", "db_schema", "tbl_name", "key"), is("/statistics/databases/db_name/schemas/db_schema/tables/tbl_name/key")); }
public boolean ifNonZero(int... nums) { LOGGER.info("Source module {}", VERSION); return Arrays.stream(nums).allMatch(num -> num != 0); }
@Test void testIfNonZero() { assertFalse(source.ifNonZero(-1, 0, 1)); }
static MetricDimensions convert(Point p) { if (p == null) { return StateMetricContext.newInstance(null); } List<String> dimensions = p.dimensions(); List<Value> location = p.location(); Map<String, Object> pointWrapper = new HashMap<>(dimensions.size()); for (int i = 0; i < dimensions.size(); ++i) { pointWrapper.put(dimensions.get(i), valueAsString(location.get(i))); } return StateMetricContext.newInstance(pointWrapper); }
@Test void testConversion() { MetricReceiver mock = new MetricReceiver.MockReceiver(); mock.declareCounter("foo").add(1); mock.declareGauge("quuux").sample(42.25); mock.declareCounter("bar", new Point(new HashMap<String, String>())).add(4); MetricSnapshot snapshot = new SnapshotConverter(mock.getSnapshot()).convert(); for (Map.Entry<MetricDimensions, MetricSet> entry : snapshot) { for (Map.Entry<String, String> dv : entry.getKey()) { fail(); } int cnt = 0; for (Map.Entry<String, MetricValue> mv : entry.getValue()) { ++cnt; if ("foo".equals(mv.getKey())) { assertTrue(mv.getValue() instanceof CountMetric); assertEquals(1, ((CountMetric) mv.getValue()).getCount()); } else if ("bar".equals(mv.getKey())) { assertTrue(mv.getValue() instanceof CountMetric); assertEquals(4, ((CountMetric) mv.getValue()).getCount()); } else if ("quuux".equals(mv.getKey())) { assertTrue(mv.getValue() instanceof GaugeMetric); assertEquals(42.25, ((GaugeMetric) mv.getValue()).getLast(), 0.001); assertEquals(1, ((GaugeMetric) mv.getValue()).getCount()); } else { fail(); } } assertEquals(3, cnt); } }
public static String uncompress(byte[] compressedURL) { StringBuffer url = new StringBuffer(); switch (compressedURL[0] & 0x0f) { case EDDYSTONE_URL_PROTOCOL_HTTP_WWW: url.append(URL_PROTOCOL_HTTP_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTPS_WWW: url.append(URL_PROTOCOL_HTTPS_WWW_DOT); break; case EDDYSTONE_URL_PROTOCOL_HTTP: url.append(URL_PROTOCOL_HTTP_COLON_SLASH_SLASH); break; case EDDYSTONE_URL_PROTOCOL_HTTPS: url.append(URL_PROTOCOL_HTTPS_COLON_SLASH_SLASH); break; default: break; } byte lastByte = -1; for (int i = 1; i < compressedURL.length; i++) { byte b = compressedURL[i]; if (lastByte == 0 && b == 0 ) { break; } lastByte = b; String tld = topLevelDomainForByte(b); if (tld != null) { url.append(tld); } else { url.append((char) b); } } return url.toString(); }
@Test public void testUncompressWithDotInfoTLD() throws MalformedURLException { String testURL = "http://google.info"; byte[] testBytes = {0x02, 'g', 'o', 'o', 'g', 'l', 'e', 0x0b}; assertEquals(testURL, UrlBeaconUrlCompressor.uncompress(testBytes)); }
@Override public Job cancelJob(String project, String region, String jobId) { LOG.info("Cancelling {} under {}", jobId, project); Job job = new Job().setRequestedState(JobState.CANCELLED.toString()); LOG.info("Sending job to update {}:\n{}", jobId, formatForLogging(job)); return Failsafe.with(clientRetryPolicy()) .get( () -> client.projects().locations().jobs().update(project, region, jobId, job).execute()); }
@Test public void testCancelJobThrowsException() throws IOException { when(getLocationJobs(client).update(any(), any(), any(), any())).thenThrow(new IOException()); assertThrows( FailsafeException.class, () -> new FakePipelineLauncher(client).cancelJob(PROJECT, REGION, JOB_ID)); }
@Override public Properties getConfig(RedisClusterNode node, String pattern) { RedisClient entry = getEntry(node); RFuture<List<String>> f = executorService.writeAsync(entry, StringCodec.INSTANCE, RedisCommands.CONFIG_GET, pattern); List<String> r = syncFuture(f); if (r != null) { return Converters.toProperties(r); } return null; }
@Test public void testGetConfig() { RedisClusterNode master = getFirstMaster(); Properties config = connection.getConfig(master, "*"); assertThat(config.size()).isGreaterThan(20); }
public boolean match(String pattern, String path) { return doMatch(pattern, path, true, null); }
@Test public void matchesTest4() { AntPathMatcher pathMatcher = new AntPathMatcher(); // 精确匹配 assertTrue(pathMatcher.match("/test", "/test")); assertFalse(pathMatcher.match("test", "/test")); //测试通配符? assertTrue(pathMatcher.match("t?st", "test")); assertTrue(pathMatcher.match("te??", "test")); assertFalse(pathMatcher.match("tes?", "tes")); assertFalse(pathMatcher.match("tes?", "testt")); //测试通配符* assertTrue(pathMatcher.match("*", "test")); assertTrue(pathMatcher.match("test*", "test")); assertTrue(pathMatcher.match("test/*", "test/Test")); assertTrue(pathMatcher.match("*.*", "test.")); assertTrue(pathMatcher.match("*.*", "test.test.test")); assertFalse(pathMatcher.match("test*", "test/")); //注意这里是false 因为路径不能用*匹配 assertFalse(pathMatcher.match("test*", "test/t")); //这同理 assertFalse(pathMatcher.match("test*aaa", "testblaaab")); //这个是false 因为最后一个b无法匹配了 前面都是能匹配成功的 //测试通配符** 匹配多级URL assertTrue(pathMatcher.match("/*/**", "/testing/testing")); assertTrue(pathMatcher.match("/**/*", "/testing/testing")); assertTrue(pathMatcher.match("/bla/**/bla", "/bla/testing/testing/bla/bla")); //这里也是true哦 assertFalse(pathMatcher.match("/bla*bla/test", "/blaXXXbl/test")); assertFalse(pathMatcher.match("/????", "/bala/bla")); assertFalse(pathMatcher.match("/**/*bla", "/bla/bla/bla/bbb")); assertTrue(pathMatcher.match("/*bla*/**/bla/**", "/XXXblaXXXX/testing/testing/bla/testing/testing/")); assertTrue(pathMatcher.match("/*bla*/**/bla/*", "/XXXblaXXXX/testing/testing/bla/testing")); assertTrue(pathMatcher.match("/*bla*/**/bla/**", "/XXXblaXXXX/testing/testing/bla/testing/testing")); assertTrue(pathMatcher.match("/*bla*/**/bla/**", "/XXXblaXXXX/testing/testing/bla/testing/testing.jpg")); assertTrue(pathMatcher.match("/foo/bar/**", "/foo/bar")); //这个需要特别注意:{}里面的相当于Spring MVC里接受一个参数一样,所以任何东西都会匹配的 assertTrue(pathMatcher.match("/{bla}.*", "/testing.html")); assertFalse(pathMatcher.match("/{bla}.htm", "/testing.html")); //这样就是false了 }
@Override public boolean isTerminated() { return shutdown.get() && taskQ.isEmpty(); }
@Test public void isTerminated_whenRunning() { assertFalse(newManagedExecutorService().isTerminated()); }
public double getHeight(double lat, double lon) { double deltaLat = Math.abs(lat - minLat); double deltaLon = Math.abs(lon - minLon); if (deltaLat > latHigherBound || deltaLat < lowerBound) throw new IllegalStateException("latitude not in boundary of this file:" + lat + "," + lon + ", this:" + this.toString()); if (deltaLon > lonHigherBound || deltaLon < lowerBound) throw new IllegalStateException("longitude not in boundary of this file:" + lat + "," + lon + ", this:" + this.toString()); double elevation; if (interpolate) { double x = (width - 1) * deltaLon / horizontalDegree; double y = (height - 1) * (1 - deltaLat / verticalDegree); int left = (int) x; int top = (int) y; int right = left + 1; int bottom = top + 1; double w00 = getHeightSample(left, top); double w01 = getHeightSample(left, bottom); double w10 = getHeightSample(right, top); double w11 = getHeightSample(right, bottom); double topEle = linearInterpolate(w00, w10, x - left); double bottomEle = linearInterpolate(w01, w11, x - left); elevation = linearInterpolate(topEle, bottomEle, y - top); } else { // first row in the file is the northernmost one // http://gis.stackexchange.com/a/43756/9006 int x = (int) (width / horizontalDegree * deltaLon); // different fallback methods for lat and lon as we have different rounding (lon -> positive, lat -> negative) if (x >= width) x = width - 1; int y = height - 1 - (int) (height / verticalDegree * deltaLat); if (y < 0) y = 0; elevation = getHeightSample(x, y); } return isValidElevation(elevation) ? elevation : Double.NaN; }
@Test public void testGetHeight() { // data access has same coordinate system as graphical or UI systems have (or the original DEM data has). // But HeightTile has lat,lon system ('mathematically') int width = 10; int height = 20; HeightTile instance = new HeightTile(0, 0, width, height, 1e-6, 10, 20); DataAccess heights = new RAMDirectory().create("tmp"); heights.create(2 * width * height); instance.setHeights(heights); init(heights, width, height, 1); // x,y=1,7 heights.setShort(2 * (17 * width + 1), (short) 70); // x,y=2,9 heights.setShort(2 * (19 * width + 2), (short) 90); assertEquals(1, instance.getHeight(5, 5), 1e-3); assertEquals(70, instance.getHeight(2.5, 1.5), 1e-3); // edge cases for one tile with the boundaries [min,min+degree/width) for lat and lon assertEquals(1, instance.getHeight(3, 2), 1e-3); assertEquals(70, instance.getHeight(2, 1), 1e-3); // edge cases for the whole object assertEquals(1, instance.getHeight(+1.0, 2), 1e-3); assertEquals(90, instance.getHeight(0.5, 2.5), 1e-3); assertEquals(90, instance.getHeight(0.0, 2.5), 1e-3); assertEquals(1, instance.getHeight(+0.0, 3), 1e-3); assertEquals(1, instance.getHeight(-0.5, 3.5), 1e-3); assertEquals(1, instance.getHeight(-0.5, 3.0), 1e-3); // fall back to "2,9" if within its boundaries assertEquals(90, instance.getHeight(-0.5, 2.5), 1e-3); assertEquals(1, instance.getHeight(0, 0), 1e-3); assertEquals(1, instance.getHeight(9, 10), 1e-3); assertEquals(1, instance.getHeight(10, 9), 1e-3); assertEquals(1, instance.getHeight(10, 10), 1e-3); // no error assertEquals(1, instance.getHeight(10.5, 5), 1e-3); assertEquals(1, instance.getHeight(-0.5, 5), 1e-3); assertEquals(1, instance.getHeight(1, -0.5), 1e-3); assertEquals(1, instance.getHeight(1, 10.5), 1e-3); }
Collection<Point> query(Rect r, Collection<Point> relevantPoints) { //could also be a circle instead of a rectangle if (this.boundary.intersects(r)) { this.points .values() .stream() .filter(r::contains) .forEach(relevantPoints::add); if (this.divided) { this.northwest.query(r, relevantPoints); this.northeast.query(r, relevantPoints); this.southwest.query(r, relevantPoints); this.southeast.query(r, relevantPoints); } } return relevantPoints; }
@Test void queryTest() { var points = new ArrayList<Point>(); var rand = new Random(); for (int i = 0; i < 20; i++) { var p = new Bubble(rand.nextInt(300), rand.nextInt(300), i, rand.nextInt(2) + 1); points.add(p); } var field = new Rect(150, 150, 300, 300); //size of field var queryRange = new Rect(70, 130, 100, 100); //result = all points lying in this rectangle //points found in the query range using quadtree and normal method is same var points1 = QuadTreeTest.quadTreeTest(points, field, queryRange); var points2 = QuadTreeTest.verify(points, queryRange); assertEquals(points1, points2); }
@Override public PathAttributes find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return PathAttributes.EMPTY; } if(containerService.isContainer(file)) { return PathAttributes.EMPTY; } return this.toAttributes(this.details(file)); }
@Test public void testReadTildeInKey() throws Exception { final Path container = new SpectraDirectoryFeature(session, new SpectraWriteFeature(session)).mkdir( new Path(new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.directory, Path.Type.volume)), new TransferStatus()); container.attributes().setRegion("us-east-1"); final Path file = new Path(container, String.format("%s~", new AlphanumericRandomStringService().random()), EnumSet.of(Path.Type.file)); new SpectraTouchFeature(session).touch(file, new TransferStatus()); new SpectraAttributesFinderFeature(session).find(file); new SpectraDeleteFeature(session).delete(Collections.singletonList(container), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public Selector getSelector() { return selector; }
@Test void testGetSelector() { Selector selector = serviceMetadata.getSelector(); assertNotNull(selector); boolean result = selector instanceof NoneSelector; assertTrue(result); }
public static Getter newFieldGetter(Object object, Getter parent, Field field, String modifier) throws Exception { return newGetter(object, parent, modifier, field.getType(), field::get, (t, et) -> new FieldGetter(parent, field, modifier, t, et)); }
@Test public void newFieldGetter_whenExtractingFromEmpty_Collection_AndReducerSuffixInNotEmpty_thenReturnNullGetter() throws Exception { OuterObject object = OuterObject.emptyInner("name"); Getter getter = GetterFactory.newFieldGetter(object, null, innersCollectionField, "[any]"); assertSame(NullMultiValueGetter.NULL_MULTIVALUE_GETTER, getter); }
@Override public long extractWatermark(IcebergSourceSplit split) { return split.task().files().stream() .map( scanTask -> { Preconditions.checkArgument( scanTask.file().lowerBounds() != null && scanTask.file().lowerBounds().get(eventTimeFieldId) != null, "Missing statistics for column name = %s in file = %s", eventTimeFieldName, eventTimeFieldId, scanTask.file()); return timeUnit.toMillis( Conversions.fromByteBuffer( Types.LongType.get(), scanTask.file().lowerBounds().get(eventTimeFieldId))); }) .min(Comparator.comparingLong(l -> l)) .get(); }
@TestTemplate public void testSingle() throws IOException { ColumnStatsWatermarkExtractor extractor = new ColumnStatsWatermarkExtractor(SCHEMA, columnName, TimeUnit.MILLISECONDS); assertThat(extractor.extractWatermark(split(0))) .isEqualTo(MIN_VALUES.get(0).get(columnName).longValue()); }
@Override public List<ControllerInfo> getControllers() { List<ControllerInfo> controllers = Lists.newArrayList(); DeviceId deviceId = getDeviceId(); checkNotNull(deviceId, MSG_DEVICE_ID_NULL); MastershipService mastershipService = getHandler().get(MastershipService.class); checkNotNull(mastershipService, MSG_MASTERSHIP_NULL); if (!mastershipService.isLocalMaster(deviceId)) { log.warn( "I am not master for {}. " + "Please use master {} to get controllers for this device", deviceId, mastershipService.getMasterFor(deviceId)); return controllers; } // Hit the path that provides the server's controllers InputStream response = null; try { response = getController().get(deviceId, URL_CONTROLLERS_GET, JSON); } catch (ProcessingException pEx) { log.error("Failed to get controllers of device: {}", deviceId); return controllers; } // Load the JSON into objects ObjectMapper mapper = new ObjectMapper(); Map<String, Object> jsonMap = null; JsonNode jsonNode = null; ObjectNode objNode = null; try { jsonMap = mapper.readValue(response, Map.class); jsonNode = mapper.convertValue(jsonMap, JsonNode.class); objNode = (ObjectNode) jsonNode; } catch (IOException ioEx) { log.error("Failed to get controllers of device: {}", deviceId); return controllers; } if (jsonMap == null) { log.error("Failed to get controllers of device: {}", deviceId); return controllers; } // Fetch controllers' array JsonNode ctrlNode = objNode.path(PARAM_CTRL); for (JsonNode cn : ctrlNode) { ObjectNode ctrlObjNode = (ObjectNode) cn; // Get the attributes of a controller String ctrlIpStr = get(cn, PARAM_CTRL_IP); int ctrlPort = ctrlObjNode.path(PARAM_CTRL_PORT).asInt(); String ctrlType = get(cn, PARAM_CTRL_TYPE); // Implies no controller if (ctrlIpStr.isEmpty()) { continue; } // Check data format and range IpAddress ctrlIp = null; try { ctrlIp = IpAddress.valueOf(ctrlIpStr); } catch (IllegalArgumentException e) { throw new IllegalArgumentException(e); } if ((ctrlPort < 0) || (ctrlPort > TpPort.MAX_PORT)) { final String msg = "Invalid controller port: " + ctrlPort; throw new IllegalArgumentException(msg); } controllers.add( new ControllerInfo(ctrlIp, ctrlPort, ctrlType) ); } return controllers; }
@Test public void testGetControllers() { // Get device handler DriverHandler driverHandler = null; try { driverHandler = driverService.createHandler(restDeviceId1); } catch (Exception e) { throw e; } assertThat(driverHandler, notNullValue()); // Ask for the controllers of this device List<ControllerInfo> receivedControllers = null; // TODO: Fix this test }
@Override public Option<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema, Properties properties) throws IOException { return combineAndGetUpdateValue(currentValue, schema); }
@Test public void testUpdate() { Schema avroSchema = new Schema.Parser().parse(AVRO_SCHEMA_STRING); GenericRecord newRecord = new GenericData.Record(avroSchema); Properties properties = new Properties(); newRecord.put("field1", 1); newRecord.put("Op", "U"); GenericRecord oldRecord = new GenericData.Record(avroSchema); oldRecord.put("field1", 0); oldRecord.put("Op", "I"); AWSDmsAvroPayload payload = new AWSDmsAvroPayload(Option.of(newRecord)); try { Option<IndexedRecord> outputPayload = payload.combineAndGetUpdateValue(oldRecord, avroSchema, properties); assertTrue((int) outputPayload.get().get(0) == 1); assertTrue(outputPayload.get().get(1).toString().equals("U")); } catch (Exception e) { fail("Unexpected exception"); } }
@Override public CompletableFuture<T> toCompletableFuture() { return _task.toCompletionStage().toCompletableFuture(); }
@Test public void testCreateStageFromThrowable() throws Exception { ParSeqBasedCompletionStage<String> stageFromValue = _parSeqBasedCompletionStageFactory.buildStageFromThrowable(EXCEPTION); try { stageFromValue.toCompletableFuture().get(); fail("Should fail"); } catch (Exception e) { Assert.assertEquals(EXCEPTION, e.getCause()); } }
public static SamlAuthenticationStatus getSamlAuthenticationStatus(String samlResponse, Auth auth, SamlSettings samlSettings) { SamlAuthenticationStatus samlAuthenticationStatus = new SamlAuthenticationStatus(); try { auth.processResponse(); } catch (Exception e) { samlAuthenticationStatus.getErrors().add(e.getMessage()); } samlAuthenticationStatus.getErrors().addAll(auth.getErrors().stream().filter(Objects::nonNull).toList()); if (auth.getLastErrorReason() != null) { samlAuthenticationStatus.getErrors().add(auth.getLastErrorReason()); } if (samlAuthenticationStatus.getErrors().isEmpty()) { samlAuthenticationStatus.getErrors().addAll(generateMappingErrors(auth, samlSettings)); } samlAuthenticationStatus.setAvailableAttributes(auth.getAttributes()); samlAuthenticationStatus.setMappedAttributes(getAttributesMapping(auth, samlSettings)); samlAuthenticationStatus.setSignatureEnabled(isSignatureEnabled(auth, samlSettings)); samlAuthenticationStatus.setEncryptionEnabled(isEncryptionEnabled(auth, samlResponse)); samlAuthenticationStatus.setWarnings(samlAuthenticationStatus.getErrors().isEmpty() ? generateWarnings(auth, samlSettings) : new ArrayList<>()); samlAuthenticationStatus.setStatus(samlAuthenticationStatus.getErrors().isEmpty() ? "success" : "error"); return samlAuthenticationStatus; }
@Test public void mapped_attributes_are_complete_when_mapping_fields_are_correct() { setSettings(); settings.setProperty("sonar.auth.saml.sp.privateKey.secured", (String) null); getResponseAttributes().forEach((key, value) -> when(auth.getAttribute(key)).thenReturn(value)); samlAuthenticationStatus = getSamlAuthenticationStatus(BASE64_SAML_RESPONSE, auth, new SamlSettings(settings.asConfig())); assertEquals("success", samlAuthenticationStatus.getStatus()); assertTrue(samlAuthenticationStatus.getErrors().isEmpty()); assertTrue(samlAuthenticationStatus.getWarnings().isEmpty()); assertEquals(4, samlAuthenticationStatus.getAvailableAttributes().size()); assertEquals(4, samlAuthenticationStatus.getMappedAttributes().size()); assertTrue(samlAuthenticationStatus.getAvailableAttributes().keySet().containsAll(Set.of("login", "name", "email", "groups"))); assertTrue(samlAuthenticationStatus.getMappedAttributes().keySet().containsAll(Set.of("User login value", "User name value", "User email value", "Groups value"))); }
ClientProtocol getClient() { return client; }
@Test @SuppressWarnings("unchecked") public void testProtocolProviderCreation() throws Exception { Iterator iterator = mock(Iterator.class); when(iterator.hasNext()).thenReturn(true, true, true, true); when(iterator.next()).thenReturn(getClientProtocolProvider()) .thenThrow(new ServiceConfigurationError("Test error")) .thenReturn(getClientProtocolProvider()); Iterable frameworkLoader = mock(Iterable.class); when(frameworkLoader.iterator()).thenReturn(iterator); Cluster.frameworkLoader = frameworkLoader; Cluster testCluster = new Cluster(new Configuration()); // Check that we get the acceptable client, even after // failure in instantiation. assertNotNull("ClientProtocol is expected", testCluster.getClient()); // Check if we do not try to load the providers after a failure. verify(iterator, times(2)).next(); }
@Override public Object get(int i, int j) { return df.get(index[i], j); }
@Test public void testGet() { System.out.println("get"); System.out.println(df); System.out.println(df.get(0)); System.out.println(df.get(1)); assertEquals(48, df.getInt(0, 0)); assertEquals("Jane", df.getString(0, 3)); assertEquals(230000., df.get(0, 4)); assertEquals(23, df.getInt(1, 0)); assertEquals("Bob", df.getString(1, 3)); assertNull(df.get(1, 4)); assertEquals(13, df.getInt(2, 0)); assertEquals("Amy", df.getString(2, 3)); assertNull(df.get(2, 4)); assertEquals(48, df.getInt(3, 0)); assertEquals("Jane", df.getString(3, 3)); assertEquals(230000., df.get(3, 4)); }
protected Authorization parseAuthLine(String line) throws ParseException { String[] tokens = line.split("\\s+"); String keyword = tokens[0].toLowerCase(); switch (keyword) { case "topic": return createAuthorization(line, tokens); case "user": m_parsingUsersSpecificSection = true; m_currentUser = tokens[1]; m_parsingPatternSpecificSection = false; return null; case "pattern": m_parsingUsersSpecificSection = false; m_currentUser = ""; m_parsingPatternSpecificSection = true; return createAuthorization(line, tokens); default: throw new ParseException(String.format("invalid line definition found %s", line), 1); } }
@Test public void testParseAuthLineValid_invalid() { assertThrows(ParseException.class, () -> authorizator.parseAuthLine("topic faker /weather/italy/anemometer")); }
@Override public void onAddClassLoader(ModuleModel scopeModel, ClassLoader classLoader) { refreshClassLoader(classLoader); }
@Test void testStatus2() { FrameworkModel frameworkModel = new FrameworkModel(); ApplicationModel applicationModel = frameworkModel.newApplication(); ModuleModel moduleModel = applicationModel.newModule(); ApplicationConfig applicationConfig = new ApplicationConfig("Test"); applicationConfig.setSerializeCheckStatus(SerializeCheckStatus.WARN.name()); applicationModel.getApplicationConfigManager().setApplication(applicationConfig); SerializeSecurityManager ssm = frameworkModel.getBeanFactory().getBean(SerializeSecurityManager.class); SerializeSecurityConfigurator serializeSecurityConfigurator = new SerializeSecurityConfigurator(moduleModel); serializeSecurityConfigurator.onAddClassLoader( moduleModel, Thread.currentThread().getContextClassLoader()); Assertions.assertEquals(SerializeCheckStatus.WARN, ssm.getCheckStatus()); frameworkModel.destroy(); }
@Override public void addListener(ReplicaInfoEventListener listener) { listenerRegistry.addListener(checkNotNull(listener)); }
@Test public void testReplicaInfoEvent() throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); service.addListener(new MasterNodeCheck(latch, DID1, NID1)); // fake MastershipEvent eventDispatcher.post(new MastershipEvent(Type.MASTER_CHANGED, DID1, new MastershipInfo(1, Optional.of(NID1), ImmutableMap.of(NID1, MastershipRole.MASTER)))); assertTrue(latch.await(1, TimeUnit.SECONDS)); }
@Override public void handle(SeckillWebMockRequestDTO request) { orderService.deleteRecord(request.getSeckillId()); }
@Test public void shouldDeleteSuccessKilledRecord() { SeckillWebMockRequestDTO request = new SeckillWebMockRequestDTO(); request.setSeckillId(123L); mongoPreRequestHandler.handle(request); verify(orderService, times(1)).deleteRecord(anyLong()); }
public boolean matchesGroupId(TimelineEntityGroupId groupId) { return matchesGroupId(groupId.toString()); }
@Test void testMatchesGroupId() throws Exception { String testGroupId = "app1_group1"; // Match EntityLogInfo testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app1_group1", UserGroupInformation.getLoginUser().getUserName()); assertTrue(testLogInfo.matchesGroupId(testGroupId)); testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "test_app1_group1", UserGroupInformation.getLoginUser().getUserName()); assertTrue(testLogInfo.matchesGroupId(testGroupId)); // Unmatch testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app2_group1", UserGroupInformation.getLoginUser().getUserName()); assertFalse(testLogInfo.matchesGroupId(testGroupId)); testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app1_group2", UserGroupInformation.getLoginUser().getUserName()); assertFalse(testLogInfo.matchesGroupId(testGroupId)); testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app1_group12", UserGroupInformation.getLoginUser().getUserName()); assertFalse(testLogInfo.matchesGroupId(testGroupId)); // Check delimiters testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app1_group1_2", UserGroupInformation.getLoginUser().getUserName()); assertTrue(testLogInfo.matchesGroupId(testGroupId)); testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app1_group1.dat", UserGroupInformation.getLoginUser().getUserName()); assertTrue(testLogInfo.matchesGroupId(testGroupId)); // Check file names shorter than group id testLogInfo = new EntityLogInfo(TEST_ATTEMPT_DIR_NAME, "app2", UserGroupInformation.getLoginUser().getUserName()); assertFalse(testLogInfo.matchesGroupId(testGroupId)); }
public MessageType convert(Schema avroSchema) { if (!avroSchema.getType().equals(Schema.Type.RECORD)) { throw new IllegalArgumentException("Avro schema must be a record."); } return new MessageType(avroSchema.getFullName(), convertFields(avroSchema.getFields(), "")); }
@Test public void testTimeMicrosType() throws Exception { Schema date = LogicalTypes.timeMicros().addToSchema(Schema.create(LONG)); Schema expected = Schema.createRecord( "myrecord", null, null, false, Arrays.asList(new Schema.Field("time", date, null, null))); testRoundTripConversion( expected, "message myrecord {\n" + " required int64 time (TIME(MICROS,true));\n" + "}\n"); for (PrimitiveTypeName primitive : new PrimitiveTypeName[] {INT32, INT96, FLOAT, DOUBLE, BOOLEAN, BINARY, FIXED_LEN_BYTE_ARRAY}) { final PrimitiveType type; if (primitive == FIXED_LEN_BYTE_ARRAY) { type = new PrimitiveType(REQUIRED, primitive, 12, "test", TIME_MICROS); } else { type = new PrimitiveType(REQUIRED, primitive, "test", TIME_MICROS); } assertThrows( "Should not allow TIME_MICROS with " + primitive, IllegalArgumentException.class, () -> new AvroSchemaConverter().convert(message(type))); } }
public static LogExceptionBehaviourInterface getExceptionStrategy( LogTableCoreInterface table ) { return getExceptionStrategy( table, null ); }
@Test public void testExceptionStrategyWithMaxAllowedPacketException() { DatabaseMeta databaseMeta = mock( DatabaseMeta.class ); DatabaseInterface databaseInterface = new MariaDBDatabaseMeta(); MaxAllowedPacketException e = new MaxAllowedPacketException(); when( logTable.getDatabaseMeta() ).thenReturn( databaseMeta ); when( databaseMeta.getDatabaseInterface() ).thenReturn( databaseInterface ); LogExceptionBehaviourInterface exceptionStrategy = DatabaseLogExceptionFactory.getExceptionStrategy( logTable, new KettleDatabaseException( e ) ); String strategyName = exceptionStrategy.getClass().getName(); assertEquals( SUPPRESSABLE_WITH_SHORT_MESSAGE, strategyName ); }
@Override public FileBaseStatistics getStatistics(BaseStatistics cachedStats) throws IOException { final FileBaseStatistics cachedFileStats = cachedStats instanceof FileBaseStatistics ? (FileBaseStatistics) cachedStats : null; // store properties final long oldTimeout = this.openTimeout; final int oldBufferSize = this.bufferSize; final int oldLineLengthLimit = this.lineLengthLimit; try { final ArrayList<FileStatus> allFiles = new ArrayList<>(1); // let the file input format deal with the up-to-date check and the basic size final FileBaseStatistics stats = getFileStats(cachedFileStats, getFilePaths(), allFiles); if (stats == null) { return null; } // check whether the width per record is already known or the total size is unknown as // well // in both cases, we return the stats as they are if (stats.getAverageRecordWidth() != FileBaseStatistics.AVG_RECORD_BYTES_UNKNOWN || stats.getTotalInputSize() == FileBaseStatistics.SIZE_UNKNOWN) { return stats; } // disabling sampling for unsplittable files since the logic below assumes splitability. // TODO: Add sampling for unsplittable files. Right now, only compressed text files are // affected by this limitation. if (unsplittable) { return stats; } // compute how many samples to take, depending on the defined upper and lower bound final int numSamples; if (this.numLineSamples != NUM_SAMPLES_UNDEFINED) { numSamples = this.numLineSamples; } else { // make the samples small for very small files final int calcSamples = (int) (stats.getTotalInputSize() / 1024); numSamples = Math.min( DEFAULT_MAX_NUM_SAMPLES, Math.max(DEFAULT_MIN_NUM_SAMPLES, calcSamples)); } // check if sampling is disabled. if (numSamples == 0) { return stats; } if (numSamples < 0) { throw new RuntimeException("Error: Invalid number of samples: " + numSamples); } // make sure that the sampling times out after a while if the file system does not // answer in time this.openTimeout = 10000; // set a small read buffer size this.bufferSize = 4 * 1024; // prevent overly large records, for example if we have an incorrectly configured // delimiter this.lineLengthLimit = MAX_SAMPLE_LEN; long offset = 0; long totalNumBytes = 0; long stepSize = stats.getTotalInputSize() / numSamples; int fileNum = 0; int samplesTaken = 0; // take the samples while (samplesTaken < numSamples && fileNum < allFiles.size()) { // make a split for the sample and use it to read a record FileStatus file = allFiles.get(fileNum); FileInputSplit split = new FileInputSplit(0, file.getPath(), offset, file.getLen() - offset, null); // we open the split, read one line, and take its length try { open(split); if (readLine()) { totalNumBytes += this.currLen + this.delimiter.length; samplesTaken++; } } finally { // close the file stream, do not release the buffers super.close(); } offset += stepSize; // skip to the next file, if necessary while (fileNum < allFiles.size() && offset >= (file = allFiles.get(fileNum)).getLen()) { offset -= file.getLen(); fileNum++; } } // we have the width, store it return new FileBaseStatistics( stats.getLastModificationTime(), stats.getTotalInputSize(), totalNumBytes / (float) samplesTaken); } catch (IOException ioex) { if (LOG.isWarnEnabled()) { LOG.warn( "Could not determine statistics for files '" + Arrays.toString(getFilePaths()) + "' " + "due to an io error: " + ioex.getMessage()); } } catch (Throwable t) { if (LOG.isErrorEnabled()) { LOG.error( "Unexpected problem while getting the file statistics for files '" + Arrays.toString(getFilePaths()) + "': " + t.getMessage(), t); } } finally { // restore properties (even on return) this.openTimeout = oldTimeout; this.bufferSize = oldBufferSize; this.lineLengthLimit = oldLineLengthLimit; } // no statistics possible return null; }
@Test void testGetStatisticsFileDoesNotExist() throws IOException { DelimitedInputFormat<String> format = new MyTextInputFormat(); format.setFilePaths( "file:///path/does/not/really/exist", "file:///another/path/that/does/not/exist"); FileBaseStatistics stats = format.getStatistics(null); assertThat(stats).as("The file statistics should be null.").isNull(); }
public static String selectDesensitize(final String source, final String algorithm) { ShenyuDataDesensitize dataDesensitize = ExtensionLoader.getExtensionLoader(ShenyuDataDesensitize.class).getJoin(algorithm); return dataDesensitize.desensitize(source); }
@Test public void selectDesensitizeTest() { // EMPTY_STRING String emptyStr = DataDesensitizeFactory.selectDesensitize("", DataDesensitizeEnum.MD5_ENCRYPT.getDataDesensitizeAlg()); Assertions.assertEquals("", emptyStr); // test for md5 String sourceData = "123456789"; String desensitizedData = DataDesensitizeFactory.selectDesensitize(sourceData, DataDesensitizeEnum.MD5_ENCRYPT.getDataDesensitizeAlg()); Assertions.assertEquals(DigestUtils.md5Hex(sourceData), desensitizedData); // test for replacement String replaceText = DataDesensitizeFactory.selectDesensitize(sourceData, DataDesensitizeEnum.CHARACTER_REPLACE.getDataDesensitizeAlg()); String maskData = "123456789"; int maskNum = 0; for (char c : replaceText.toCharArray()) { if (c == '*') { maskNum++; } } Assertions.assertEquals(maskData.length() / 2, maskNum); }
public void mix() { LOGGER .info("Mixing the immutable stew we find: {} potatoes, {} carrots, {} meat and {} peppers", data.numPotatoes(), data.numCarrots(), data.numMeat(), data.numPeppers()); }
@Test void testMix() { var stew = new Stew(1, 2, 3, 4); var expectedMessage = "Mixing the stew we find: 1 potatoes, 2 carrots, 3 meat and 4 peppers"; for (var i = 0; i < 20; i++) { stew.mix(); assertEquals(expectedMessage, appender.getLastMessage()); } assertEquals(20, appender.getLogSize()); }
@Override public OAuth2AccessTokenDO refreshAccessToken(String refreshToken, String clientId) { // 查询访问令牌 OAuth2RefreshTokenDO refreshTokenDO = oauth2RefreshTokenMapper.selectByRefreshToken(refreshToken); if (refreshTokenDO == null) { throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "无效的刷新令牌"); } // 校验 Client 匹配 OAuth2ClientDO clientDO = oauth2ClientService.validOAuthClientFromCache(clientId); if (ObjectUtil.notEqual(clientId, refreshTokenDO.getClientId())) { throw exception0(GlobalErrorCodeConstants.BAD_REQUEST.getCode(), "刷新令牌的客户端编号不正确"); } // 移除相关的访问令牌 List<OAuth2AccessTokenDO> accessTokenDOs = oauth2AccessTokenMapper.selectListByRefreshToken(refreshToken); if (CollUtil.isNotEmpty(accessTokenDOs)) { oauth2AccessTokenMapper.deleteBatchIds(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getId)); oauth2AccessTokenRedisDAO.deleteList(convertSet(accessTokenDOs, OAuth2AccessTokenDO::getAccessToken)); } // 已过期的情况下,删除刷新令牌 if (DateUtils.isExpired(refreshTokenDO.getExpiresTime())) { oauth2RefreshTokenMapper.deleteById(refreshTokenDO.getId()); throw exception0(GlobalErrorCodeConstants.UNAUTHORIZED.getCode(), "刷新令牌已过期"); } // 创建访问令牌 return createOAuth2AccessToken(refreshTokenDO, clientDO); }
@Test public void testRefreshAccessToken_clientIdError() { // 准备参数 String refreshToken = randomString(); String clientId = randomString(); // mock 方法 OAuth2ClientDO clientDO = randomPojo(OAuth2ClientDO.class).setClientId(clientId); when(oauth2ClientService.validOAuthClientFromCache(eq(clientId))).thenReturn(clientDO); // mock 数据(访问令牌) OAuth2RefreshTokenDO refreshTokenDO = randomPojo(OAuth2RefreshTokenDO.class) .setRefreshToken(refreshToken).setClientId("error"); oauth2RefreshTokenMapper.insert(refreshTokenDO); // 调用,并断言 assertServiceException(() -> oauth2TokenService.refreshAccessToken(refreshToken, clientId), new ErrorCode(400, "刷新令牌的客户端编号不正确")); }
@Override public void isNotEqualTo(@Nullable Object expected) { super.isNotEqualTo(expected); }
@Test public void isNotEqualTo_WithoutToleranceParameter_Success_NotAnArray() { assertThat(array(2.2d, 3.3d, 4.4d)).isNotEqualTo(new Object()); }
public AuthenticationRequest startAuthenticationProcess(HttpServletRequest httpRequest) throws ComponentInitializationException, MessageDecodingException, SamlValidationException, SharedServiceClientException, DienstencatalogusException, SamlSessionException { BaseHttpServletRequestXMLMessageDecoder decoder = decodeXMLRequest(httpRequest); AuthenticationRequest authenticationRequest = createAuthenticationRequest(httpRequest, decoder); SAMLBindingContext bindingContext = createAndValidateBindingContext(decoder); validateAuthenticationRequest(authenticationRequest); parseAuthentication(authenticationRequest); validateWithOtherDomainServices(authenticationRequest, bindingContext); return authenticationRequest; }
@Test public void parseAuthenticationSuccessfulIDPTest() throws SamlSessionException, SharedServiceClientException, DienstencatalogusException, ComponentInitializationException, SamlValidationException, MessageDecodingException { String samlRequest = readXMLFile(authnRequestIdpFile); String decodeSAMLRequest = encodeAuthnRequest(samlRequest); httpServletRequestMock.setParameter("SAMLRequest", decodeSAMLRequest); AuthenticationRequest result = authenticationService.startAuthenticationProcess(httpServletRequestMock); assertNotNull(result); String idpRedirectUrl = "SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS"; assertEquals(idpRedirectUrl, result.getAuthnRequest().getDestination()); assertEquals("SSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSS", result.getServiceUuid()); }
static DatabaseInput toDatabaseInput( Namespace namespace, Map<String, String> metadata, boolean skipNameValidation) { DatabaseInput.Builder builder = DatabaseInput.builder().name(toDatabaseName(namespace, skipNameValidation)); Map<String, String> parameters = Maps.newHashMap(); metadata.forEach( (k, v) -> { if (GLUE_DESCRIPTION_KEY.equals(k)) { builder.description(v); } else if (GLUE_DB_LOCATION_KEY.equals(k)) { builder.locationUri(v); } else { parameters.put(k, v); } }); return builder.parameters(parameters).build(); }
@Test public void testToDatabaseInput() { Map<String, String> properties = ImmutableMap.of( IcebergToGlueConverter.GLUE_DESCRIPTION_KEY, "description", IcebergToGlueConverter.GLUE_DB_LOCATION_KEY, "s3://location", "key", "val"); DatabaseInput databaseInput = IcebergToGlueConverter.toDatabaseInput(Namespace.of("ns"), properties, false); assertThat(databaseInput.locationUri()).as("Location should be set").isEqualTo("s3://location"); assertThat(databaseInput.description()) .as("Description should be set") .isEqualTo("description"); assertThat(databaseInput.parameters()) .as("Parameters should be set") .isEqualTo(ImmutableMap.of("key", "val")); assertThat(databaseInput.name()).as("Database name should be set").isEqualTo("ns"); }
@Override public boolean execute() throws Exception { PluginManager.get().init(); if (_type == null) { Set<Class<? extends QuickStartBase>> quickStarts = allQuickStarts(); throw new UnsupportedOperationException("No QuickStart type provided. " + "Valid types are: " + errroMessageFor(quickStarts)); } QuickStartBase quickstart = selectQuickStart(_type); if (_tmpDir != null) { quickstart.setDataDir(_tmpDir); } if (_bootstrapTableDirs != null) { quickstart.setBootstrapDataDirs(_bootstrapTableDirs); } if (_zkExternalAddress != null) { quickstart.setZkExternalAddress(_zkExternalAddress); } if (_configFilePath != null) { quickstart.setConfigFilePath(_configFilePath); } quickstart.execute(); return true; }
@Test(expectedExceptions = UnsupportedOperationException.class, expectedExceptionsMessageRegExp = "^No QuickStart type provided. Valid types are: \\[.*\\]$") public void testNoArg() throws Exception { QuickStartCommand quickStartCommand = new QuickStartCommand(); quickStartCommand.execute(); }
@Override public String toString() { return String.format("PrioritizedFilterStatistics [id: %s, count: %d, first: %d, last: %d]", getFilterId(), getCount(), getFirst(), getLast()); }
@Test void testToStringInitial() { PrioritizedFilterStatistics stats = new PrioritizedFilterStatistics("test"); String strVal = stats.toString(); Assertions.assertEquals("PrioritizedFilterStatistics [id: test, count: 0, first: 0, last: 0]", strVal); }
@Override public AttributedList<Path> list(final Path directory, final ListProgressListener listener) throws BackgroundException { return this.list(directory, listener, String.valueOf(Path.DELIMITER)); }
@Test public void testListVirtualHostStyle() throws Exception { final AttributedList<Path> list = new S3ObjectListService(virtualhost, new S3AccessControlListFeature(virtualhost)).list( new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), new DisabledListProgressListener()); for(Path p : list) { assertEquals(new Path("/", EnumSet.of(Path.Type.directory, Path.Type.volume)), p.getParent()); if(p.isFile()) { assertNotEquals(-1L, p.attributes().getModificationDate()); assertNotEquals(-1L, p.attributes().getSize()); assertNotNull(p.attributes().getETag()); assertNotNull(p.attributes().getStorageClass()); assertNull(p.attributes().getVersionId()); } } }
public int put(final K key, final int value) { final int initialValue = this.initialValue; if (initialValue == value) { throw new IllegalArgumentException("cannot accept initialValue"); } final K[] keys = this.keys; final int[] values = this.values; @DoNotSub final int mask = values.length - 1; @DoNotSub int index = Hashing.hash(key, mask); int oldValue = initialValue; while (values[index] != initialValue) { if (Objects.equals(keys[index], key)) { oldValue = values[index]; break; } index = ++index & mask; } if (oldValue == initialValue) { ++size; keys[index] = key; } values[index] = value; increaseCapacity(); return oldValue; }
@Test void shouldNotAllowInitialValueAsValue() { assertThrows(IllegalArgumentException.class, () -> map.put(1, INITIAL_VALUE)); }
static TransferResult uploadImageFromStorage(URL fsName, Configuration conf, NNStorage storage, NameNodeFile nnf, long txid) throws IOException { return uploadImageFromStorage(fsName, conf, storage, nnf, txid, null); }
@Test(timeout = 10000) public void testImageUploadTimeout() throws Exception { Configuration conf = new HdfsConfiguration(); NNStorage mockStorage = Mockito.mock(NNStorage.class); HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs"); try { testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC, TestImageTransferServlet.class); testServer.start(); URL serverURL = HttpServerFunctionalTest.getServerURL(testServer); // set the timeout here, otherwise it will take default. TransferFsImage.timeout = 2000; File tmpDir = new File(new FileSystemTestHelper().getTestRootDir()); tmpDir.mkdirs(); File mockImageFile = File.createTempFile("image", "", tmpDir); FileOutputStream imageFile = new FileOutputStream(mockImageFile); imageFile.write("data".getBytes()); imageFile.close(); Mockito.when( mockStorage.findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong())).thenReturn(mockImageFile); Mockito.when(mockStorage.toColonSeparatedString()).thenReturn( "storage:info:string"); try { TransferFsImage.uploadImageFromStorage(serverURL, conf, mockStorage, NameNodeFile.IMAGE, 1L); fail("TransferImage Should fail with timeout"); } catch (SocketTimeoutException e) { assertEquals("Upload should timeout", "Read timed out", e.getMessage()); } } finally { testServer.stop(); } }
@Override public void receiveConfigInfo(final String configInfo) { }
@Test void receiveConfigInfo() { final Deque<String> data = new ArrayDeque<String>(); AbstractConfigChangeListener a = new AbstractConfigChangeListener() { @Override public void receiveConfigChange(ConfigChangeEvent event) { } @Override public void receiveConfigInfo(String configInfo) { super.receiveConfigInfo(configInfo); data.offer(configInfo); } }; a.receiveConfigInfo("foo"); final String actual = data.poll(); assertEquals("foo", actual); }
public ByteBuffer toByteBuffer() { return ByteBuffer.wrap(mBuffer, 0, mCount); }
@Test void testToByteBuffer() { UnsafeByteArrayOutputStream outputStream = new UnsafeByteArrayOutputStream(1); outputStream.write((int) 'a'); ByteBuffer byteBuffer = outputStream.toByteBuffer(); assertThat(byteBuffer.get(), is("a".getBytes()[0])); }
@Override public PlanNode optimize( PlanNode maxSubplan, ConnectorSession session, VariableAllocator variableAllocator, PlanNodeIdAllocator idAllocator) { return rewriteWith(new Rewriter(session, idAllocator), maxSubplan); }
@Test public void testJdbcComputePushdownWithConstants() { String table = "test_table"; String schema = "test_schema"; String expression = "(c1 + c2) = 3"; TypeProvider typeProvider = TypeProvider.copyOf(ImmutableMap.of("c1", BIGINT, "c2", BIGINT)); RowExpression rowExpression = sqlToRowExpressionTranslator.translateAndOptimize(expression(expression), typeProvider); Set<ColumnHandle> columns = Stream.of("c1", "c2").map(TestJdbcComputePushdown::integerJdbcColumnHandle).collect(Collectors.toSet()); PlanNode original = filter(jdbcTableScan(schema, table, BIGINT, "c1", "c2"), rowExpression); JdbcTableHandle jdbcTableHandle = new JdbcTableHandle(CONNECTOR_ID, new SchemaTableName(schema, table), CATALOG_NAME, schema, table); ConnectorSession session = new TestingConnectorSession(ImmutableList.of()); JdbcTableLayoutHandle jdbcTableLayoutHandle = new JdbcTableLayoutHandle( session.getSqlFunctionProperties(), jdbcTableHandle, TupleDomain.none(), Optional.of(new JdbcExpression("(('c1' + 'c2') = ?)", ImmutableList.of(new ConstantExpression(Long.valueOf(3), INTEGER))))); PlanNode actual = this.jdbcComputePushdown.optimize(original, session, null, ID_ALLOCATOR); assertPlanMatch(actual, PlanMatchPattern.filter( expression, JdbcTableScanMatcher.jdbcTableScanPattern(jdbcTableLayoutHandle, columns))); }