focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
@Produces @DefaultBean @Singleton JobRunrDashboardWebServer dashboardWebServer(StorageProvider storageProvider, JsonMapper jobRunrJsonMapper, JobRunrDashboardWebServerConfiguration dashboardWebServerConfiguration) { if (jobRunrBuildTimeConfiguration.dashboard().enabled()) { return new JobRunrDashboardWebServer(storageProvider, jobRunrJsonMapper, dashboardWebServerConfiguration); } return null; }
@Test void dashboardWebServerIsNotSetupWhenNotConfigured() { when(dashboardBuildTimeConfiguration.enabled()).thenReturn(false); assertThat(jobRunrProducer.dashboardWebServer(storageProvider, jsonMapper, usingStandardDashboardConfiguration())).isNull(); }
@Override public void stdOutput(String line) { consumer.stdOutput(cropLongLine(line)); }
@Test public void shouldCropLongLines() { InMemoryStreamConsumer actualConsumer = ProcessOutputStreamConsumer.inMemoryConsumer(); BoundedOutputStreamConsumer streamConsumer = new BoundedOutputStreamConsumer(actualConsumer, 30); streamConsumer.stdOutput("This is a fairly ridiculously long line."); assertThat(actualConsumer.getAllOutput(), is("This is ...[ cropped by GoCD ]\n")); }
<K, V> ShareInFlightBatch<K, V> fetchRecords(final Deserializers<K, V> deserializers, final int maxRecords, final boolean checkCrcs) { // Creating an empty ShareInFlightBatch ShareInFlightBatch<K, V> inFlightBatch = new ShareInFlightBatch<>(partition); if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(cachedBatchException); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(cachedRecordException); cachedRecordException = null; return inFlightBatch; } if (isConsumed) return inFlightBatch; initializeNextAcquired(); try { int recordsInBatch = 0; while (recordsInBatch < maxRecords) { lastRecord = nextFetchedRecord(checkCrcs); if (lastRecord == null) { // Any remaining acquired records are gaps while (nextAcquired != null) { inFlightBatch.addGap(nextAcquired.offset); nextAcquired = nextAcquiredRecord(); } break; } while (nextAcquired != null) { if (lastRecord.offset() == nextAcquired.offset) { // It's acquired, so we parse it and add it to the batch Optional<Integer> leaderEpoch = maybeLeaderEpoch(currentBatch.partitionLeaderEpoch()); TimestampType timestampType = currentBatch.timestampType(); ConsumerRecord<K, V> record = parseRecord(deserializers, partition, leaderEpoch, timestampType, lastRecord, nextAcquired.deliveryCount); inFlightBatch.addRecord(record); recordsRead++; bytesRead += lastRecord.sizeInBytes(); recordsInBatch++; nextAcquired = nextAcquiredRecord(); break; } else if (lastRecord.offset() < nextAcquired.offset) { // It's not acquired, so we skip it break; } else { // It's acquired, but there's no non-control record at this offset, so it's a gap inFlightBatch.addGap(nextAcquired.offset); } nextAcquired = nextAcquiredRecord(); } } } catch (SerializationException se) { nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); inFlightBatch.setException(se); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); } } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. rejectRecordBatch(inFlightBatch, currentBatch); inFlightBatch.setException(e); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); } } return inFlightBatch; }
@Test public void testNoRecordsInFetch() { ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() .setPartitionIndex(0); ShareCompletedFetch completedFetch = newShareCompletedFetch(partitionData); try (final Deserializers<String, String> deserializers = newStringDeserializers()) { ShareInFlightBatch<String, String> batch = completedFetch.fetchRecords(deserializers, 10, true); List<ConsumerRecord<String, String>> records = batch.getInFlightRecords(); assertEquals(0, records.size()); Acknowledgements acknowledgements = batch.getAcknowledgements(); assertEquals(0, acknowledgements.size()); } }
@Override public Collection<DatabasePacket> execute() { SQLParserEngine sqlParserEngine = createShardingSphereSQLParserEngine(connectionSession.getUsedDatabaseName()); String sql = packet.getSQL(); SQLStatement sqlStatement = sqlParserEngine.parse(sql, true); String escapedSql = escape(sqlStatement, sql); if (!escapedSql.equalsIgnoreCase(sql)) { sqlStatement = sqlParserEngine.parse(escapedSql, true); sql = escapedSql; } List<Integer> actualParameterMarkerIndexes = new ArrayList<>(); if (sqlStatement.getParameterCount() > 0) { List<ParameterMarkerSegment> parameterMarkerSegments = new ArrayList<>(((AbstractSQLStatement) sqlStatement).getParameterMarkerSegments()); for (ParameterMarkerSegment each : parameterMarkerSegments) { actualParameterMarkerIndexes.add(each.getParameterIndex()); } sql = convertSQLToJDBCStyle(parameterMarkerSegments, sql); sqlStatement = sqlParserEngine.parse(sql, true); } List<PostgreSQLColumnType> paddedColumnTypes = paddingColumnTypes(sqlStatement.getParameterCount(), packet.readParameterTypes()); SQLStatementContext sqlStatementContext = sqlStatement instanceof DistSQLStatement ? new DistSQLStatementContext((DistSQLStatement) sqlStatement) : new SQLBindEngine(ProxyContext.getInstance().getContextManager().getMetaDataContexts().getMetaData(), connectionSession.getCurrentDatabaseName(), packet.getHintValueContext()) .bind(sqlStatement, Collections.emptyList()); PostgreSQLServerPreparedStatement serverPreparedStatement = new PostgreSQLServerPreparedStatement(sql, sqlStatementContext, packet.getHintValueContext(), paddedColumnTypes, actualParameterMarkerIndexes); connectionSession.getServerPreparedStatementRegistry().addPreparedStatement(packet.getStatementId(), serverPreparedStatement); return Collections.singleton(PostgreSQLParseCompletePacket.getInstance()); }
@Test void assertExecuteWithParameterizedSQL() throws ReflectiveOperationException { final String rawSQL = "/*$0*/insert into sbtest1 /* $1 */ -- $2 \n (id, k, c, pad) \r values \r\n($1, $2, 'apsbd$31a', '$99')/*$0*/ \n--$0"; final String expectedSQL = "/*$0*/insert into sbtest1 /* $1 */ -- $2 \n (id, k, c, pad) \r values \r\n(?, ?, 'apsbd$31a', '$99')/*$0*/ \n--$0"; final String statementId = "S_2"; when(parsePacket.getSQL()).thenReturn(rawSQL); when(parsePacket.getStatementId()).thenReturn(statementId); when(parsePacket.readParameterTypes()).thenReturn(Collections.singletonList(PostgreSQLColumnType.INT4)); when(parsePacket.getHintValueContext()).thenReturn(new HintValueContext()); when(connectionSession.getCurrentDatabaseName()).thenReturn("foo_db"); Plugins.getMemberAccessor().set(PostgreSQLComParseExecutor.class.getDeclaredField("connectionSession"), executor, connectionSession); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); Collection<DatabasePacket> actualPackets = executor.execute(); assertThat(actualPackets.size(), is(1)); assertThat(actualPackets.iterator().next(), is(PostgreSQLParseCompletePacket.getInstance())); PostgreSQLServerPreparedStatement actualPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(statementId); assertThat(actualPreparedStatement.getSqlStatementContext(), instanceOf(InsertStatementContext.class)); assertThat(actualPreparedStatement.getSqlStatementContext().getSqlStatement(), instanceOf(PostgreSQLInsertStatement.class)); assertThat(actualPreparedStatement.getSql(), is(expectedSQL)); assertThat(actualPreparedStatement.getParameterTypes(), is(Arrays.asList(PostgreSQLColumnType.INT4, PostgreSQLColumnType.UNSPECIFIED))); }
public void close() { close(Long.MAX_VALUE, false); }
@Test public void shouldNotBlockInCloseWithCloseOptionLeaveGroupFalseForZeroDuration() throws Exception { prepareStreams(); prepareStreamThread(streamThreadOne, 1); prepareStreamThread(streamThreadTwo, 2); prepareTerminableThread(streamThreadOne); final KafkaStreams.CloseOptions closeOptions = new KafkaStreams.CloseOptions(); closeOptions.timeout(Duration.ZERO); try (final KafkaStreams streams = new KafkaStreams(getBuilderWithSource().build(), props, supplier)) { assertFalse(streams.close(closeOptions)); } }
@Override public <T> Mono<T> run(final Mono<T> toRun, final Function<Throwable, Mono<T>> fallback, final Resilience4JConf conf) { RateLimiter rateLimiter = Resilience4JRegistryFactory.rateLimiter(conf.getId(), conf.getRateLimiterConfig()); Mono<T> to = toRun.transformDeferred(RateLimiterOperator.of(rateLimiter)); return Optional.ofNullable(fallback) .map(to::onErrorResume) .orElse(to); }
@Test public void errorTest() { Resilience4JConf conf = mock(Resilience4JConf.class); when(conf.getId()).thenReturn("SHENYU"); when(conf.getRateLimiterConfig()).thenReturn(RateLimiterConfig.ofDefaults()); StepVerifier.create(ratelimiterExecutor.run(Mono.error(new RuntimeException()), Mono::error, conf)) .expectSubscription() .expectError(RuntimeException.class) .verify(); }
@Override @Transactional(rollbackFor = Exception.class) // 添加事务,异常则回滚所有导入 public UserImportRespVO importUserList(List<UserImportExcelVO> importUsers, boolean isUpdateSupport) { // 1.1 参数校验 if (CollUtil.isEmpty(importUsers)) { throw exception(USER_IMPORT_LIST_IS_EMPTY); } // 1.2 初始化密码不能为空 String initPassword = configApi.getConfigValueByKey(USER_INIT_PASSWORD_KEY); if (StrUtil.isEmpty(initPassword)) { throw exception(USER_IMPORT_INIT_PASSWORD); } // 2. 遍历,逐个创建 or 更新 UserImportRespVO respVO = UserImportRespVO.builder().createUsernames(new ArrayList<>()) .updateUsernames(new ArrayList<>()).failureUsernames(new LinkedHashMap<>()).build(); importUsers.forEach(importUser -> { // 2.1.1 校验字段是否符合要求 try { ValidationUtils.validate(BeanUtils.toBean(importUser, UserSaveReqVO.class).setPassword(initPassword)); } catch (ConstraintViolationException ex){ respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage()); return; } // 2.1.2 校验,判断是否有不符合的原因 try { validateUserForCreateOrUpdate(null, null, importUser.getMobile(), importUser.getEmail(), importUser.getDeptId(), null); } catch (ServiceException ex) { respVO.getFailureUsernames().put(importUser.getUsername(), ex.getMessage()); return; } // 2.2.1 判断如果不存在,在进行插入 AdminUserDO existUser = userMapper.selectByUsername(importUser.getUsername()); if (existUser == null) { userMapper.insert(BeanUtils.toBean(importUser, AdminUserDO.class) .setPassword(encodePassword(initPassword)).setPostIds(new HashSet<>())); // 设置默认密码及空岗位编号数组 respVO.getCreateUsernames().add(importUser.getUsername()); return; } // 2.2.2 如果存在,判断是否允许更新 if (!isUpdateSupport) { respVO.getFailureUsernames().put(importUser.getUsername(), USER_USERNAME_EXISTS.getMsg()); return; } AdminUserDO updateUser = BeanUtils.toBean(importUser, AdminUserDO.class); updateUser.setId(existUser.getId()); userMapper.updateById(updateUser); respVO.getUpdateUsernames().add(importUser.getUsername()); }); return respVO; }
@Test public void testImportUserList_04() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 UserImportExcelVO importUser = randomPojo(UserImportExcelVO.class, o -> { o.setStatus(randomEle(CommonStatusEnum.values()).getStatus()); // 保证 status 的范围 o.setSex(randomEle(SexEnum.values()).getSex()); // 保证 sex 的范围 o.setUsername(dbUser.getUsername()); o.setEmail(randomEmail()); o.setMobile(randomMobile()); }); // mock deptService 的方法 DeptDO dept = randomPojo(DeptDO.class, o -> { o.setId(importUser.getDeptId()); o.setStatus(CommonStatusEnum.ENABLE.getStatus()); }); when(deptService.getDept(eq(dept.getId()))).thenReturn(dept); // 调用 UserImportRespVO respVO = userService.importUserList(newArrayList(importUser), true); // 断言 assertEquals(0, respVO.getCreateUsernames().size()); assertEquals(1, respVO.getUpdateUsernames().size()); AdminUserDO user = userMapper.selectByUsername(respVO.getUpdateUsernames().get(0)); assertPojoEquals(importUser, user); assertEquals(0, respVO.getFailureUsernames().size()); }
@Override public Serializable getValueFromFile(String filename, String encoding, boolean hasVariable, Cache<Object,Object> cache) { Serializable value; if (hasVariable) { value = getInterpretedContent(filename, encoding, hasVariable, cache); } else { value = (Serializable) cache.get(filename, p -> getContent(filename)); } return value; }
@Test public void getValueFromFile_withDefaultEncodingAndProlog() throws IOException { String content = "<?xml version=\"1.0\" encoding=\"Windows-1252\"?>\n" + CP1252_SAFE_XML; String filename = writeFile("object_prolog_cp1252.xml", content, Charset.forName("Cp1252")); Serializable object = getRenderer().getValueFromFile(filename, PublisherSampler.DEFAULT_ENCODING, true, cache); assertObject(object, "eéè€"); Person p = (Person) object; assertEquals("eéè€", p.getName(), "object.name"); Object firstCachedValue = getFirstCachedValue(); assertEquals(content, convertLineEndingsToSystem(firstCachedValue), "cache"); }
@Override public Session<?> borrow(final BackgroundActionState callback) throws BackgroundException { final int numActive = pool.getNumActive(); if(numActive > POOL_WARNING_THRESHOLD) { log.warn(String.format("Possibly large number of open connections (%d) in pool %s", numActive, this)); } try { while(!callback.isCanceled()) { try { if(log.isInfoEnabled()) { log.info(String.format("Borrow session from pool %s", this)); } final Session<?> session = pool.borrowObject(); if(log.isInfoEnabled()) { log.info(String.format("Borrowed session %s from pool %s", session, this)); } if(DISCONNECTED == features) { features = new StatelessSessionPool(connect, session, transcript, registry); } return session.withListener(transcript); } catch(IllegalStateException e) { throw new ConnectionCanceledException(e); } catch(NoSuchElementException e) { if(pool.isClosed()) { throw new ConnectionCanceledException(e); } final Throwable cause = e.getCause(); if(null == cause) { log.warn(String.format("Timeout borrowing session from pool %s. Wait for another %dms", this, BORROW_MAX_WAIT_INTERVAL)); // Timeout continue; } if(cause instanceof BackgroundException) { final BackgroundException failure = (BackgroundException) cause; log.warn(String.format("Failure %s obtaining connection for %s", failure, this)); if(diagnostics.determine(failure) == FailureDiagnostics.Type.network) { final int max = Math.max(1, pool.getMaxIdle() - 1); log.warn(String.format("Lower maximum idle pool size to %d connections.", max)); pool.setMaxIdle(max); // Clear pool from idle connections pool.clear(); } throw failure; } log.error(String.format("Borrowing session from pool %s failed with %s", this, e)); throw new DefaultExceptionMappingService().map(cause); } } throw new ConnectionCanceledException(); } catch(BackgroundException e) { throw e; } catch(Exception e) { if(e.getCause() instanceof BackgroundException) { throw ((BackgroundException) e.getCause()); } throw new BackgroundException(e.getMessage(), e); } }
@Test(expected = ConnectionRefusedException.class) public void testConnectRefuse() throws Exception { final DefaultSessionPool pool = new DefaultSessionPool(new TestLoginConnectionService() { @Override public boolean check(final Session<?> session, final CancelCallback callback) throws BackgroundException { throw new ConnectionRefusedException("t", new RuntimeException()); } }, new DisabledX509TrustManager(), new DefaultX509KeyManager(), new DefaultVaultRegistry(new DisabledPasswordCallback()), new DisabledTranscriptListener(), new Host(new TestProtocol(), "t")); pool.borrow(BackgroundActionState.running); }
@GetMapping public String getProduct() { return "catalogue/products/product"; }
@Test void getProduct_ReturnsProductPage() { // given // when var result = this.controller.getProduct(); // then assertEquals("catalogue/products/product", result); verifyNoInteractions(this.productsRestClient); }
public void validate(Map<String, NewDocumentType> documentDefinitions, Set<NewDocumentType> globallyDistributedDocuments) { verifyReferredDocumentsArePresent(documentDefinitions); verifyReferredDocumentsAreGlobal(documentDefinitions, globallyDistributedDocuments); }
@Test void validation_succeeds_on_no_global_documents() { Fixture fixture = new Fixture() .addNonGlobalDocument(createDocumentType("foo")); validate(fixture); }
public static long ipv4ToLong(String strIP) { final Matcher matcher = PatternPool.IPV4.matcher(strIP); if (matcher.matches()) { return matchAddress(matcher); } // Validator.validateIpv4(strIP, "Invalid IPv4 address!"); // final long[] ip = Convert.convert(long[].class, StrUtil.split(strIP, CharUtil.DOT)); // return (ip[0] << 24) + (ip[1] << 16) + (ip[2] << 8) + ip[3]; throw new IllegalArgumentException("Invalid IPv4 address!"); }
@Test public void ipv4ToLongTest(){ long l = Ipv4Util.ipv4ToLong("127.0.0.1"); assertEquals(2130706433L, l); l = Ipv4Util.ipv4ToLong("114.114.114.114"); assertEquals(1920103026L, l); l = Ipv4Util.ipv4ToLong("0.0.0.0"); assertEquals(0L, l); l = Ipv4Util.ipv4ToLong("255.255.255.255"); assertEquals(4294967295L, l); }
void handleSegmentWithCopySegmentStartedState(RemoteLogSegmentId remoteLogSegmentId) { // Add this to unreferenced set of segments for the respective leader epoch. unreferencedSegmentIds.add(remoteLogSegmentId); }
@Test void handleSegmentWithCopySegmentStartedState() { RemoteLogSegmentId segmentId = new RemoteLogSegmentId(tpId, Uuid.randomUuid()); epochState.handleSegmentWithCopySegmentStartedState(segmentId); assertEquals(1, epochState.unreferencedSegmentIds().size()); assertTrue(epochState.unreferencedSegmentIds().contains(segmentId)); }
@Override public RSet<V> get(K key) { String keyHash = keyHash(key); String setName = getValuesName(keyHash); return new RedissonSetMultimapValues<>(codec, commandExecutor, setName, getTimeoutSetName(), key); }
@Test public void testContainsAll() { RMultimapCache<String, String> multimap = getMultimapCache("test"); multimap.put("1", "1"); multimap.put("1", "2"); multimap.put("1", "3"); multimap.put("1", "3"); assertThat(multimap.get("1").containsAll(List.of("1", "1", "1"))).isTrue(); assertThat(multimap.get("1").containsAll(List.of("1", "2", "4"))).isFalse(); assertThat(multimap.get("1").containsAll(List.of("1", "2", "1"))).isTrue(); assertThat(multimap.get("1").containsAll(List.of("1", "1"))).isTrue(); }
public static Optional<DwrfTableEncryptionProperties> fromHiveTableProperties(Map<String, String> properties) { String encryptTable = properties.get(ENCRYPT_TABLE_KEY); String encryptColumns = properties.get(ENCRYPT_COLUMNS_KEY); if (encryptTable != null || encryptColumns != null) { if (!properties.containsKey(DWRF_ENCRYPTION_ALGORITHM_KEY) || !properties.containsKey(DWRF_ENCRYPTION_PROVIDER_KEY)) { throw new PrestoException(HIVE_INVALID_ENCRYPTION_METADATA, format("Both %s and %s need to be set for DWRF encryption", DWRF_ENCRYPTION_ALGORITHM_KEY, DWRF_ENCRYPTION_PROVIDER_KEY)); } if (encryptTable != null) { return Optional.of(forTable(encryptTable, properties.get(DWRF_ENCRYPTION_ALGORITHM_KEY), properties.get(DWRF_ENCRYPTION_PROVIDER_KEY))); } return Optional.of(forPerColumn( fromHiveProperty(encryptColumns), properties.get(DWRF_ENCRYPTION_ALGORITHM_KEY), properties.get(DWRF_ENCRYPTION_PROVIDER_KEY))); } return Optional.empty(); }
@Test public void testFromHiveTablePropertiesNonePresent() { Optional<DwrfTableEncryptionProperties> encryptionProperties = fromHiveTableProperties(ImmutableMap.of()); assertFalse(encryptionProperties.isPresent()); }
public <T extends BuildableManifestTemplate> ManifestTemplate getManifestListTemplate( Class<T> manifestTemplateClass) throws IOException { Preconditions.checkArgument( manifestTemplateClass == V22ManifestTemplate.class, "Build an OCI image index is not yet supported"); Preconditions.checkState(!images.isEmpty(), "no images given"); V22ManifestListTemplate manifestList = new V22ManifestListTemplate(); for (Image image : images) { ImageToJsonTranslator imageTranslator = new ImageToJsonTranslator(image); BlobDescriptor configDescriptor = Digests.computeDigest(imageTranslator.getContainerConfiguration()); BuildableManifestTemplate manifestTemplate = imageTranslator.getManifestTemplate(manifestTemplateClass, configDescriptor); BlobDescriptor manifestDescriptor = Digests.computeDigest(manifestTemplate); ManifestDescriptorTemplate manifest = new ManifestDescriptorTemplate(); manifest.setMediaType(manifestTemplate.getManifestMediaType()); manifest.setSize(manifestDescriptor.getSize()); manifest.setDigest(manifestDescriptor.getDigest().toString()); manifest.setPlatform(image.getArchitecture(), image.getOs()); manifestList.addManifest(manifest); } return manifestList; }
@Test public void testGetManifestListTemplate_unsupportedImageFormat() throws IOException { try { new ManifestListGenerator(Arrays.asList(image1, image2)) .getManifestListTemplate(OciManifestTemplate.class); Assert.fail(); } catch (IllegalArgumentException ex) { Assert.assertEquals("Build an OCI image index is not yet supported", ex.getMessage()); } }
public String getBaseUrl() { String url = config.get(SERVER_BASE_URL).orElse(""); if (isEmpty(url)) { url = computeBaseUrl(); } // Remove trailing slashes return StringUtils.removeEnd(url, "/"); }
@Test public void base_url_is_http_specified_host_no_port_when_host_is_set_and_port_is_80() { settings.setProperty(HOST_PROPERTY, "foo"); settings.setProperty(PORT_PORPERTY, 80); assertThat(underTest().getBaseUrl()).isEqualTo("http://foo"); }
@Override public void addMeasure(String metricKey, int value) { Metric metric = metricRepository.getByKey(metricKey); validateAddMeasure(metric); measureRepository.add(internalComponent, metric, newMeasureBuilder().create(value)); }
@Test public void add_long_measure_create_measure_of_type_long_with_right_value() { MeasureComputerContextImpl underTest = newContext(PROJECT_REF, NCLOC_KEY, LONG_METRIC_KEY); underTest.addMeasure(LONG_METRIC_KEY, 10L); Optional<Measure> measure = measureRepository.getAddedRawMeasure(PROJECT_REF, LONG_METRIC_KEY); assertThat(measure).isPresent(); assertThat(measure.get().getLongValue()).isEqualTo(10L); }
@Override public BeamSqlTable buildBeamSqlTable(Table table) { return new BigQueryTable(table, getConversionOptions(table.getProperties())); }
@Test public void testSelectWriteDispositionMethodTruncate() { Table table = fakeTableWithProperties( "hello", "{ " + WRITE_DISPOSITION_PROPERTY + ": " + "\"" + WriteDisposition.WRITE_TRUNCATE.toString() + "\" }"); BigQueryTable sqlTable = (BigQueryTable) provider.buildBeamSqlTable(table); assertEquals(WriteDisposition.WRITE_TRUNCATE, sqlTable.writeDisposition); }
public CruiseConfig deserializeConfig(String content) throws Exception { String md5 = md5Hex(content); Element element = parseInputStream(new ByteArrayInputStream(content.getBytes())); LOGGER.debug("[Config Save] Updating config cache with new XML"); CruiseConfig configForEdit = classParser(element, BasicCruiseConfig.class, configCache, new GoCipher(), registry, new ConfigReferenceElements()).parse(); setMd5(configForEdit, md5); configForEdit.setOrigins(new FileConfigOrigin()); return configForEdit; }
@Test void shouldLoadNAntBuilder() throws Exception { CruiseConfig cruiseConfig = xmlLoader.deserializeConfig( CONFIG_WITH_NANT_AND_EXEC_BUILDER); JobConfig plan = cruiseConfig.jobConfigByName("pipeline1", "mingle", "cardlist", true); BuildTask builder = (BuildTask) plan.tasks().findFirstByType(NantTask.class); assertThat(builder.getTarget()).isEqualTo("all"); }
public static Builder builder() { return new Builder(); }
@Test public void testEqualsAndHashCode() { SelectorData selectorData1 = SelectorData.builder().id("id").pluginId("pluginId").pluginName("pluginName") .name("name").matchMode(0).type(0).sort(0).enabled(true) .logged(true).continued(true).handle("handle").conditionList(new ArrayList<>(1)).build(); SelectorData selectorData2 = SelectorData.builder().id("id").pluginId("pluginId").pluginName("pluginName") .name("name").matchMode(0).type(0).sort(0).enabled(true) .logged(true).continued(true).handle("handle").conditionList(new ArrayList<>(1)).build(); Set<SelectorData> set = new HashSet<>(); set.add(selectorData1); set.add(selectorData2); assertThat(set, hasSize(1)); }
@Override public boolean isGenerateSQLToken(final SQLStatementContext sqlStatementContext) { return sqlStatementContext instanceof InsertStatementContext && (((InsertStatementContext) sqlStatementContext).getSqlStatement()).getSetAssignment().isPresent() && encryptRule.findEncryptTable(((TableAvailable) sqlStatementContext).getTablesContext().getSimpleTables().iterator().next().getTableName().getIdentifier().getValue()).isPresent(); }
@Test void assertIsGenerateSQLToken() { when(insertStatementContext.getSqlStatement().getSetAssignment()).thenReturn(Optional.of(setAssignmentSegment)); assertTrue(tokenGenerator.isGenerateSQLToken(insertStatementContext)); }
@Override public Object[] toArray() { return toArray(new Object[size]); }
@Test public void testToGenericArraySetsNullAfterLastContainedElement() { final OAHashSet<Integer> set = new OAHashSet<>(8); populateSet(set, 10); final Integer[] setElementsProvided = new Integer[11]; final Integer[] setElementsReturned = set.toArray(setElementsProvided); assertSame(setElementsProvided, setElementsReturned); assertNull(setElementsProvided[10]); }
public URI getServerAddress() { return serverAddresses.get(0); }
@Test public void shouldParseHttpsAddressWithoutPort() throws Exception { // Given: final String serverAddress = "https://singleServer"; final URI serverURI = new URI(serverAddress.concat(":443")); // When: try (KsqlRestClient ksqlRestClient = clientWithServerAddresses(serverAddress)) { // Then: assertThat(ksqlRestClient.getServerAddress(), is(serverURI)); } }
@Override public GroupVersion groupVersion() { return PublicApiUtils.groupVersion(new Plugin()); }
@Test void groupVersion() { GroupVersion groupVersion = endpoint.groupVersion(); assertThat(groupVersion.toString()).isEqualTo("api.plugin.halo.run/v1alpha1"); }
public static void checkPermission(AppPermission.Type permission) { SecurityManager sm = System.getSecurityManager(); if (sm == null) { return; } AccessControlContext context = AccessController.getContext(); if (context == null) { sm.checkPermission(new AppPermission((permission))); } else { int contextHash = context.hashCode() ^ permission.hashCode(); PermissionCheckCache.getInstance().checkCache(contextHash, new AppPermission(permission)); } }
@Test(expected = AccessControlException.class) public void testCheckPermission() throws Exception { SecurityManager sm = new SecurityManager(); sm.checkPermission(new AppPermission((AppPermission.Type.APP_EVENT))); }
public static boolean isOverlap(ChronoLocalDateTime<?> realStartTime, ChronoLocalDateTime<?> realEndTime, ChronoLocalDateTime<?> startTime, ChronoLocalDateTime<?> endTime) { // x>b||a>y 无交集 // 则有交集的逻辑为 !(x>b||a>y) // 根据德摩根公式,可化简为 x<=b && a<=y 即 realStartTime<=endTime && startTime<=realEndTime return realStartTime.compareTo(endTime) <=0 && startTime.compareTo(realEndTime) <= 0; }
@Test public void isOverlapTest(){ final LocalDateTime oneStartTime = LocalDateTime.of(2022, 1, 1, 10, 10, 10); final LocalDateTime oneEndTime = LocalDateTime.of(2022, 1, 1, 11, 10, 10); final LocalDateTime oneStartTime2 = LocalDateTime.of(2022, 1, 1, 11, 20, 10); final LocalDateTime oneEndTime2 = LocalDateTime.of(2022, 1, 1, 11, 30, 10); final LocalDateTime oneStartTime3 = LocalDateTime.of(2022, 1, 1, 11, 40, 10); final LocalDateTime oneEndTime3 = LocalDateTime.of(2022, 1, 1, 11, 50, 10); //真实请假数据 final LocalDateTime realStartTime = LocalDateTime.of(2022, 1, 1, 11, 49, 10); final LocalDateTime realEndTime = LocalDateTime.of(2022, 1, 1, 12, 0, 10); final LocalDateTime realStartTime1 = DateUtil.parseLocalDateTime("2022-03-01 08:00:00"); final LocalDateTime realEndTime1 = DateUtil.parseLocalDateTime("2022-03-01 10:00:00"); final LocalDateTime startTime = DateUtil.parseLocalDateTime("2022-03-23 05:00:00"); final LocalDateTime endTime = DateUtil.parseLocalDateTime("2022-03-23 13:00:00"); assertFalse(LocalDateTimeUtil.isOverlap(oneStartTime,oneEndTime,realStartTime,realEndTime)); assertFalse(LocalDateTimeUtil.isOverlap(oneStartTime2,oneEndTime2,realStartTime,realEndTime)); assertTrue(LocalDateTimeUtil.isOverlap(oneStartTime3,oneEndTime3,realStartTime,realEndTime)); assertFalse(LocalDateTimeUtil.isOverlap(realStartTime1,realEndTime1,startTime,endTime)); assertFalse(LocalDateTimeUtil.isOverlap(startTime,endTime,realStartTime1,realEndTime1)); assertTrue(LocalDateTimeUtil.isOverlap(startTime,startTime,startTime,startTime)); assertTrue(LocalDateTimeUtil.isOverlap(startTime,startTime,startTime,endTime)); assertFalse(LocalDateTimeUtil.isOverlap(startTime,startTime,endTime,endTime)); assertTrue(LocalDateTimeUtil.isOverlap(startTime,endTime,endTime,endTime)); }
public static boolean jobExists(String jobSignature) { if (jobSignature.startsWith("java.") || jobSignature.startsWith("javax.")) return true; // we assume that JDK classes don't change often try { String clazzAndMethod = getFQClassNameAndMethod(jobSignature); String clazzName = getFQClassName(clazzAndMethod); String method = getMethodName(clazzAndMethod); Class<Object> clazz = toClass(clazzName); Class<?>[] jobParameterTypes = getParameterTypes(jobSignature); return findMethod(clazz, method, jobParameterTypes).isPresent(); } catch (IllegalArgumentException e) { return false; } }
@Test void jobExists() { assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.doWork(java.util.UUID)")).isTrue(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.getProcessedJobs()")).isTrue(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.doWorkThatTakesLong(java.lang.Integer)")).isTrue(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.doWork(java.lang.Integer,java.lang.Integer)")).isTrue(); assertThat(JobUtils.jobExists("java.lang.System.out.println(java.lang.String)")).isTrue(); assertThat(JobUtils.jobExists("javax.sql.DataSource.getConnection()")).isTrue(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestServiceThatDoesNotExist.doWork(java.lang.Integer,java.lang.Integer)")).isFalse(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.methodThatDoesNotExist(java.lang.Integer,java.lang.Integer)")).isFalse(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.doWork(java.util.UUID,org.jobrunr.stubs.JobParameterThatDoesNotExist)")).isFalse(); assertThat(JobUtils.jobExists("org.jobrunr.stubs.TestService.doWork(java.lang.Integer,java.lang.Integer,java.lang.Integer,java.lang.Integer)")).isFalse(); // too many parameters }
public static void setClassloaderId(int classloaderId) { IncrementingUuidGenerator.classloaderId = classloaderId & 0xfff; }
@Test void setClassloaderId_keeps_only_12_bits() throws NoSuchFieldException, IllegalAccessException { // When the classloaderId is defined with a value higher than 0xfff (12 // bits) IncrementingUuidGenerator.setClassloaderId(0xfffffABC); // Then the classloaderId is truncated to 12 bits assertEquals(0x0ABC, getStaticFieldValue(new IncrementingUuidGenerator(), CLASSLOADER_ID_FIELD_NAME)); }
public PersistentOfflineTopicStats(String topicName, String brokerName) { this.brokerName = brokerName; this.topicName = topicName; this.dataLedgerDetails = new ArrayList<>(); this.cursorDetails = new HashMap<>(); this.statGeneratedAt = new Date(System.currentTimeMillis()); }
@Test public void testPersistentOfflineTopicStats() { PersistentOfflineTopicStats pot = new PersistentOfflineTopicStats("topic1", "prod1-broker1.messaging.use.example.com"); String cursor = "cursor0"; long time = System.currentTimeMillis(); pot.addCursorDetails(cursor, 0, 1); pot.addLedgerDetails(0, time, 100, 1); Assert.assertEquals(pot.cursorDetails.get(cursor).cursorBacklog, 0); Assert.assertEquals(pot.cursorDetails.get(cursor).cursorLedgerId, 1); Assert.assertEquals(pot.dataLedgerDetails.get(0).entries, 0); Assert.assertEquals(pot.dataLedgerDetails.get(0).timestamp, time); Assert.assertEquals(pot.dataLedgerDetails.get(0).size, 100); Assert.assertEquals(pot.dataLedgerDetails.get(0).ledgerId, 1); long resetAt = System.currentTimeMillis(); pot.reset(); Assert.assertEquals(pot.storageSize, 0); Assert.assertEquals(pot.totalMessages, 0); Assert.assertEquals(pot.messageBacklog, 0); Assert.assertEquals(pot.dataLedgerDetails.size(), 0); Assert.assertEquals(pot.cursorDetails.size(), 0); Assert.assertTrue(pot.statGeneratedAt.getTime() - resetAt < 100); }
public ScanCommand(Logger console) { super(console); }
@Test public void testScanCommand() throws IOException { File file = parquetFile(); ScanCommand command = new ScanCommand(createLogger()); command.sourceFiles = Arrays.asList(file.getAbsolutePath()); command.setConf(new Configuration()); Assert.assertEquals(0, command.run()); }
public void setBuffer(byte[] buffer, int off, int len) { setSegment(MemorySegmentFactory.wrap(buffer), off, len); }
@Test void testSetBuffer() { ByteArrayInputStreamWithPos in = new ByteArrayInputStreamWithPos(); assertThat(in.read()).isEqualTo(-1); byte[] testData = new byte[] {0x42, 0x43, 0x44, 0x45}; int off = 1; int len = 2; in.setBuffer(testData, off, len); for (int i = 0; i < len; ++i) { assertThat(in.read()).isEqualTo(testData[i + off]); } assertThat(in.read()).isEqualTo(-1); }
@Override public List<ContentDescriptorTemplate> getLayers() { return Collections.unmodifiableList(layers); }
@Test public void testFromJson_optionalProperties() throws IOException, URISyntaxException { Path jsonFile = Paths.get(Resources.getResource("core/json/v22manifest_optional_properties.json").toURI()); V22ManifestTemplate manifestJson = JsonTemplateMapper.readJsonFromFile(jsonFile, V22ManifestTemplate.class); List<ContentDescriptorTemplate> layers = manifestJson.getLayers(); Assert.assertEquals(4, layers.size()); Assert.assertNull(layers.get(0).getUrls()); Assert.assertNull(layers.get(0).getAnnotations()); Assert.assertEquals(Arrays.asList("url-foo", "url-bar"), layers.get(1).getUrls()); Assert.assertNull(layers.get(1).getAnnotations()); Assert.assertNull(layers.get(2).getUrls()); Assert.assertEquals(ImmutableMap.of("key-foo", "value-foo"), layers.get(2).getAnnotations()); Assert.assertEquals(Arrays.asList("cool-url"), layers.get(3).getUrls()); Assert.assertEquals( ImmutableMap.of("key1", "value1", "key2", "value2"), layers.get(3).getAnnotations()); }
@Override public List<TableIdentifier> listTables(Namespace namespace) { SnowflakeIdentifier scope = NamespaceHelpers.toSnowflakeIdentifier(namespace); Preconditions.checkArgument( scope.type() == SnowflakeIdentifier.Type.SCHEMA, "listTables must be at SCHEMA level; got %s from namespace %s", scope, namespace); List<SnowflakeIdentifier> sfTables = snowflakeClient.listIcebergTables(scope); return sfTables.stream() .map(NamespaceHelpers::toIcebergTableIdentifier) .collect(Collectors.toList()); }
@Test public void testListTablesWithinNonexistentDB() { String dbName = "NONEXISTENT_DB"; String schemaName = "NONEXISTENT_SCHEMA"; assertThatExceptionOfType(RuntimeException.class) .isThrownBy(() -> catalog.listTables(Namespace.of(dbName, schemaName))) .withMessageContaining("does not exist") .withMessageContaining(dbName); }
public static Properties getProperties(Set<ClassLoader> classLoaders) { String path = System.getProperty(CommonConstants.DUBBO_PROPERTIES_KEY); if (StringUtils.isEmpty(path)) { path = System.getenv(CommonConstants.DUBBO_PROPERTIES_KEY); if (StringUtils.isEmpty(path)) { path = CommonConstants.DEFAULT_DUBBO_PROPERTIES; } } return ConfigUtils.loadProperties(classLoaders, path, false, true); }
@Test void testGetProperties1() throws Exception { try { System.setProperty(CommonConstants.DUBBO_PROPERTIES_KEY, "properties.load"); Properties p = ConfigUtils.getProperties(Collections.emptySet()); assertThat((String) p.get("a"), equalTo("12")); assertThat((String) p.get("b"), equalTo("34")); assertThat((String) p.get("c"), equalTo("56")); } finally { System.clearProperty(CommonConstants.DUBBO_PROPERTIES_KEY); } }
@Bean public TimeLimiterRegistry timeLimiterRegistry( TimeLimiterConfigurationProperties timeLimiterConfigurationProperties, EventConsumerRegistry<TimeLimiterEvent> timeLimiterEventConsumerRegistry, RegistryEventConsumer<TimeLimiter> timeLimiterRegistryEventConsumer, @Qualifier("compositeTimeLimiterCustomizer") CompositeCustomizer<TimeLimiterConfigCustomizer> compositeTimeLimiterCustomizer) { TimeLimiterRegistry timeLimiterRegistry = createTimeLimiterRegistry(timeLimiterConfigurationProperties, timeLimiterRegistryEventConsumer, compositeTimeLimiterCustomizer); registerEventConsumer(timeLimiterRegistry, timeLimiterEventConsumerRegistry, timeLimiterConfigurationProperties); initTimeLimiterRegistry(timeLimiterRegistry, timeLimiterConfigurationProperties, compositeTimeLimiterCustomizer); return timeLimiterRegistry; }
@Test public void testCreateTimeLimiterRegistryWithSharedConfigs() { // Given io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties defaultProperties = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); defaultProperties.setTimeoutDuration(Duration.ofSeconds(3)); defaultProperties.setCancelRunningFuture(true); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties sharedProperties = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); sharedProperties.setTimeoutDuration(Duration.ofSeconds(2)); sharedProperties.setCancelRunningFuture(false); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties backendWithDefaultConfig = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); backendWithDefaultConfig.setBaseConfig("default"); backendWithDefaultConfig.setTimeoutDuration(Duration.ofSeconds(5)); io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties backendWithSharedConfig = new io.github.resilience4j.common.timelimiter.configuration.CommonTimeLimiterConfigurationProperties.InstanceProperties(); backendWithSharedConfig.setBaseConfig("sharedConfig"); backendWithSharedConfig.setCancelRunningFuture(true); TimeLimiterConfigurationProperties timeLimiterConfigurationProperties = new TimeLimiterConfigurationProperties(); timeLimiterConfigurationProperties.getConfigs().put("default", defaultProperties); timeLimiterConfigurationProperties.getConfigs().put("sharedConfig", sharedProperties); timeLimiterConfigurationProperties.getInstances().put("backendWithDefaultConfig", backendWithDefaultConfig); timeLimiterConfigurationProperties.getInstances().put("backendWithSharedConfig", backendWithSharedConfig); TimeLimiterConfiguration timeLimiterConfiguration = new TimeLimiterConfiguration(); DefaultEventConsumerRegistry<TimeLimiterEvent> eventConsumerRegistry = new DefaultEventConsumerRegistry<>(); // When TimeLimiterRegistry timeLimiterRegistry = timeLimiterConfiguration.timeLimiterRegistry(timeLimiterConfigurationProperties, eventConsumerRegistry, new CompositeRegistryEventConsumer<>(emptyList()), compositeTimeLimiterCustomizerTestInstance()); // Then assertThat(timeLimiterRegistry.getAllTimeLimiters().size()).isEqualTo(2); // Should get default config and overwrite timeout duration TimeLimiter timeLimiter1 = timeLimiterRegistry.timeLimiter("backendWithDefaultConfig"); assertThat(timeLimiter1).isNotNull(); assertThat(timeLimiter1.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(5)); assertThat(timeLimiter1.getTimeLimiterConfig().shouldCancelRunningFuture()).isTrue(); // Should get shared config and overwrite cancelRunningFuture TimeLimiter timeLimiter2 = timeLimiterRegistry.timeLimiter("backendWithSharedConfig"); assertThat(timeLimiter2).isNotNull(); assertThat(timeLimiter2.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(2)); assertThat(timeLimiter2.getTimeLimiterConfig().shouldCancelRunningFuture()).isTrue(); // Unknown backend should get default config of Registry TimeLimiter timeLimiter3 = timeLimiterRegistry.timeLimiter("unknownBackend"); assertThat(timeLimiter3).isNotNull(); assertThat(timeLimiter3.getTimeLimiterConfig().getTimeoutDuration()).isEqualTo(Duration.ofSeconds(3)); assertThat(eventConsumerRegistry.getAllEventConsumer()).hasSize(3); }
public FEELFnResult<BigDecimal> invoke(@ParameterName( "n" ) BigDecimal n) { return invoke(n, BigDecimal.ZERO); }
@Test void invokeOutRangeScale() { FunctionTestUtil.assertResultError(roundDownFunction.invoke(BigDecimal.valueOf(1.5), BigDecimal.valueOf(6177)), InvalidParametersEvent.class); FunctionTestUtil.assertResultError(roundDownFunction.invoke(BigDecimal.valueOf(1.5), BigDecimal.valueOf(-6122)), InvalidParametersEvent.class); }
public static Map<String, ResourceModel> buildResourceModels(final Set<Class<?>> restliAnnotatedClasses) { Map<String, ResourceModel> rootResourceModels = new HashMap<>(); Map<Class<?>, ResourceModel> resourceModels = new HashMap<>(); for (Class<?> annotatedClass : restliAnnotatedClasses) { processResourceInOrder(annotatedClass, resourceModels, rootResourceModels); } return rootResourceModels; }
@Test(dataProvider = "badResources") public void testBadResource(Class<?> resourceClass, String errorMsg) { Set<Class<?>> set = new HashSet<>(); set.add(ParentResource.class); set.add(resourceClass); try { RestLiApiBuilder.buildResourceModels(set); Assert.fail("Building api with BadResource should throw " + ResourceConfigException.class); } catch (ResourceConfigException e) { Assert.assertTrue(e.getMessage().contains(errorMsg)); } }
@SuppressWarnings("unchecked") @Override public void configure(Map<String, ?> configs, boolean isKey) { if (inner != null) { log.error("Could not configure ListSerializer as the parameter has already been set -- inner: {}", inner); throw new ConfigException("List serializer was already initialized using a non-default constructor"); } final String innerSerdePropertyName = isKey ? CommonClientConfigs.DEFAULT_LIST_KEY_SERDE_INNER_CLASS : CommonClientConfigs.DEFAULT_LIST_VALUE_SERDE_INNER_CLASS; final Object innerSerdeClassOrName = configs.get(innerSerdePropertyName); if (innerSerdeClassOrName == null) { throw new ConfigException("Not able to determine the serializer class because it was neither passed via the constructor nor set in the config."); } try { if (innerSerdeClassOrName instanceof String) { inner = Utils.newInstance((String) innerSerdeClassOrName, Serde.class).serializer(); } else if (innerSerdeClassOrName instanceof Class) { inner = (Serializer<Inner>) ((Serde) Utils.newInstance((Class) innerSerdeClassOrName)).serializer(); } else { throw new KafkaException("Could not create a serializer class instance using \"" + innerSerdePropertyName + "\" property."); } inner.configure(configs, isKey); serStrategy = FIXED_LENGTH_SERIALIZERS.contains(inner.getClass()) ? SerializationStrategy.CONSTANT_SIZE : SerializationStrategy.VARIABLE_SIZE; } catch (final ClassNotFoundException e) { throw new ConfigException(innerSerdePropertyName, innerSerdeClassOrName, "Serializer class " + innerSerdeClassOrName + " could not be found."); } }
@Test public void testListKeySerializerNoArgConstructorsShouldThrowKafkaExceptionDueClassNotFound() { props.put(CommonClientConfigs.DEFAULT_LIST_KEY_SERDE_INNER_CLASS, nonExistingClass); final KafkaException exception = assertThrows( KafkaException.class, () -> listSerializer.configure(props, true) ); assertEquals("Invalid value non.existing.class for configuration " + CommonClientConfigs.DEFAULT_LIST_KEY_SERDE_INNER_CLASS + ": Serializer class " + nonExistingClass + " could not be found.", exception.getMessage()); }
@Override public boolean isShadow(final Collection<String> shadowTableNames, final PreciseHintShadowValue<String> noteShadowValue) { if (ShadowOperationType.HINT_MATCH == noteShadowValue.getShadowOperationType() || shadowTableNames.contains(noteShadowValue.getLogicTableName())) { return SQLHintUtils.extractHint(noteShadowValue.getValue()).isShadow(); } return false; }
@Test void assertIsShadow() { assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), new PreciseHintShadowValue<>("t_auto", ShadowOperationType.INSERT, "/*shadow:true*/"))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue("/**/"))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue("/*"))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue("aaa = bbb"))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue(" SHARDINGSPHERE_HINT: SHADOW=true */"))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue(" SHARDINGSPHERE_HINT: SHADOW=true "))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue(" SHARDINGSPHERE_HINT: SHADOW=true, aaa=bbb "))); assertFalse(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue("/* SHARDINGSPHERE_HINT: SHADOW = true "))); assertTrue(shadowAlgorithm.isShadow(Arrays.asList("t_user", "t_order"), createNoteShadowValue("/* SHARDINGSPHERE_HINT: SHADOW=true */"))); }
SocketIOWithTimeout(SelectableChannel channel, long timeout) throws IOException { checkChannelValidity(channel); this.channel = channel; this.timeout = timeout; // Set non-blocking channel.configureBlocking(false); }
@Test public void testSocketIOWithTimeout() throws Exception { // first open pipe: Pipe pipe = Pipe.open(); Pipe.SourceChannel source = pipe.source(); Pipe.SinkChannel sink = pipe.sink(); try { final InputStream in = new SocketInputStream(source, TIMEOUT); OutputStream out = new SocketOutputStream(sink, TIMEOUT); byte[] writeBytes = TEST_STRING.getBytes(); byte[] readBytes = new byte[writeBytes.length]; byte byteWithHighBit = (byte)0x80; out.write(writeBytes); out.write(byteWithHighBit); doIO(null, out, TIMEOUT); in.read(readBytes); assertTrue(Arrays.equals(writeBytes, readBytes)); assertEquals(byteWithHighBit & 0xff, in.read()); doIO(in, null, TIMEOUT); // Change timeout on the read side. ((SocketInputStream)in).setTimeout(TIMEOUT * 2); doIO(in, null, TIMEOUT * 2); /* * Verify that it handles interrupted threads properly. * Use a large timeout and expect the thread to return quickly * upon interruption. */ ((SocketInputStream)in).setTimeout(0); TestingThread thread = new TestingThread(ctx) { @Override public void doWork() throws Exception { try { in.read(); fail("Did not fail with interrupt"); } catch (InterruptedIOException ste) { LOG.info("Got expection while reading as expected : " + ste.getMessage()); } } }; ctx.addThread(thread); ctx.startThreads(); // If the thread is interrupted before it calls read() // then it throws ClosedByInterruptException due to // some Java quirk. Waiting for it to call read() // gets it into select(), so we get the expected // InterruptedIOException. Thread.sleep(1000); thread.interrupt(); ctx.stop(); //make sure the channels are still open assertTrue(source.isOpen()); assertTrue(sink.isOpen()); // Nevertheless, the output stream is closed, because // a partial write may have succeeded (see comment in // SocketOutputStream#write(byte[]), int, int) // This portion of the test cannot pass on Windows due to differences in // behavior of partial writes. Windows appears to buffer large amounts of // written data and send it all atomically, thus making it impossible to // simulate a partial write scenario. Attempts were made to switch the // test from using a pipe to a network socket and also to use larger and // larger buffers in doIO. Nothing helped the situation though. if (!Shell.WINDOWS) { try { out.write(1); fail("Did not throw"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( "stream is closed", ioe); } } out.close(); assertFalse(sink.isOpen()); // close sink and expect -1 from source.read() assertEquals(-1, in.read()); // make sure close() closes the underlying channel. in.close(); assertFalse(source.isOpen()); } finally { if (source != null) { source.close(); } if (sink != null) { sink.close(); } } }
public static URL toURL(java.nio.file.Path path) throws MalformedURLException { final String scheme = path.toUri().getScheme(); return new URL(scheme, null, -1, path.toString()); }
@Test void testRelativePathToURL() throws MalformedURLException { final java.nio.file.Path relativePath = Paths.get("foobar"); assertThat(relativePath).isRelative(); final URL relativeURL = FileUtils.toURL(relativePath); final java.nio.file.Path transformedPath = Paths.get(relativeURL.getPath()); assertThat(transformedPath).isEqualTo(relativePath); }
public static String formatThousand(int amount, boolean isUseTraditional) { Assert.checkBetween(amount, -999, 999, "Number support only: (-999 ~ 999)!"); final String chinese = thousandToChinese(amount, isUseTraditional); if (amount < 20 && amount >= 10) { // "十一"而非"一十一" return chinese.substring(1); } return chinese; }
@Test public void formatThousandTest() { String f = NumberChineseFormatter.formatThousand(10, false); assertEquals("十", f); f = NumberChineseFormatter.formatThousand(11, false); assertEquals("十一", f); f = NumberChineseFormatter.formatThousand(19, false); assertEquals("十九", f); }
@Override public boolean find(final Path file, final ListProgressListener listener) throws BackgroundException { if(file.isRoot()) { return true; } try { return new DropboxAttributesFinderFeature(session).find(file, listener) != PathAttributes.EMPTY; } catch(NotfoundException e) { return false; } }
@Test public void testFindFile() throws Exception { final Path file = new Path(new DefaultHomeFinderService(session).find(), new AlphanumericRandomStringService().random(), EnumSet.of(Path.Type.file)); new DropboxTouchFeature(session).touch(file, new TransferStatus()); assertTrue(new DropboxFindFeature(session).find(file)); assertFalse(new DropboxFindFeature(session).find(new Path(file.getAbsolute(), EnumSet.of(Path.Type.directory)))); new DropboxDeleteFeature(session).delete(Collections.singletonList(file), new DisabledLoginCallback(), new Delete.DisabledCallback()); }
public void addPermissionMapping(AddGithubPermissionMappingRequest addGithubPermissionMappingRequest) { callEndpoint(addGithubPermissionMappingRequest).close(); }
@Test public void addPermissionMapping_whenRequestIsSuccessful_returns() { AddGithubPermissionMappingRequest addGithubPermissionMappingRequest = new AddGithubPermissionMappingRequest("admin", new SonarqubePermissions(true, true, true, true, true, true)); WsResponse response = mock(WsResponse.class); when(response.failIfNotSuccessful()).thenReturn(response); when(wsConnector.call(any(PostRequest.class))).thenReturn(response); githubPermissionsService.addPermissionMapping(addGithubPermissionMappingRequest); ArgumentCaptor<WsRequest> wsRequestArgumentCaptor = ArgumentCaptor.forClass(WsRequest.class); verify(wsConnector).call(wsRequestArgumentCaptor.capture()); WsRequest request = wsRequestArgumentCaptor.getValue(); assertThat(request.getMethod()).isEqualTo(WsRequest.Method.POST); assertThat(request.getPath()).isEqualTo("api/v2/dop-translation/github-permission-mappings"); }
public Result parse(final String string) throws DateNotParsableException { return this.parse(string, new Date()); }
@Test public void testParse() throws Exception { NaturalDateParser.Result today = naturalDateParser.parse("today"); assertNotNull(today.getFrom()); assertNotNull(today.getTo()); naturalDateParser.parse("today"); naturalDateParser.parse("last week to today"); }
@Override public boolean isAvailable() { return true; }
@Test public void testIsAvailable() { assertTrue(roundRobinLoadBalancerProvider.isAvailable()); }
public static void ensureTopic( final String name, final KsqlConfig ksqlConfig, final KafkaTopicClient topicClient ) { if (topicClient.isTopicExists(name)) { validateTopicConfig(name, ksqlConfig, topicClient); return; } final short replicationFactor = ksqlConfig .getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_REPLICAS_PROPERTY); if (replicationFactor < 2) { log.warn("Creating topic {} with replication factor of {} which is less than 2. " + "This is not advisable in a production environment. ", name, replicationFactor); } final short minInSyncReplicas = ksqlConfig .getShort(KsqlConfig.KSQL_INTERNAL_TOPIC_MIN_INSYNC_REPLICAS_PROPERTY); topicClient.createTopic( name, INTERNAL_TOPIC_PARTITION_COUNT, replicationFactor, ImmutableMap.<String, Object>builder() .putAll(INTERNAL_TOPIC_CONFIG) .put(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, minInSyncReplicas) .build() ); }
@Test @SuppressFBWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") public void shouldEnsureInternalTopicHasInfiniteRetentionAndDeleteCleanUpPolicy() { // Given: whenTopicExistsWith(1, NREPLICAS); // When: KsqlInternalTopicUtils.ensureTopic(TOPIC_NAME, ksqlConfig, topicClient); // Then: verify(topicClient).addTopicConfig(TOPIC_NAME, ImmutableMap.of( TopicConfig.RETENTION_MS_CONFIG, -1L, TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE, TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, false )); }
@Override protected double maintain() { List<Node> provisionedSnapshot; try { NodeList nodes; // Host and child nodes are written in separate transactions, but both are written while holding the // unallocated lock. Hold the unallocated lock while reading nodes to ensure we get all the children // of newly provisioned hosts. try (Mutex ignored = nodeRepository().nodes().lockUnallocated()) { nodes = nodeRepository().nodes().list(); } provisionedSnapshot = provision(nodes); } catch (NodeAllocationException | IllegalStateException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts: " + e.getMessage()); return 0; // avoid removing excess hosts } catch (RuntimeException e) { log.log(Level.WARNING, "Failed to allocate preprovisioned capacity and/or find excess hosts", e); return 0; // avoid removing excess hosts } return markForRemoval(provisionedSnapshot); }
@Test public void works_as_before_without_make_exclusive() { // TODO(hakon): Remove test once make-exclusive has rolled out tester = new DynamicProvisioningTester(Cloud.builder().name(CloudName.AWS).dynamicProvisioning(true).allowHostSharing(false).build(), new MockNameResolver()); NodeResources resources1 = new NodeResources(24, 64, 100, 10); setPreprovisionCapacityFlag(tester, new ClusterCapacity(1, resources1.vcpu(), resources1.memoryGiB(), resources1.diskGb(), resources1.bandwidthGbps(), resources1.diskSpeed().name(), resources1.storageType().name(), resources1.architecture().name(), null)); tester.flagSource.withJacksonFlag(PermanentFlags.SHARED_HOST.id(), new SharedHost(List.of(new HostResources(48d, 128d, 200d, 20d, "fast", "remote", null, 4, "x86_64"))), SharedHost.class); tester.maintain(); // Hosts are provisioned assertEquals(1, tester.provisionedHostsMatching(resources1)); assertEquals(0, tester.hostProvisioner.deprovisionedHosts()); assertEquals(Optional.empty(), tester.nodeRepository.nodes().node("host100").flatMap(Node::exclusiveToApplicationId)); // Next maintenance run does nothing tester.assertNodesUnchanged(); // One host is allocated exclusively to some other application tester.nodeRepository.nodes().write(tester.nodeRepository.nodes().node("host100").get() .withExclusiveToApplicationId(ApplicationId.from("t", "a", "i")), () -> { }); tester.maintain(); // New hosts are provisioned, and the empty exclusive host is deallocated assertEquals(1, tester.provisionedHostsMatching(resources1)); assertEquals(1, tester.hostProvisioner.deprovisionedHosts()); // Next maintenance run does nothing tester.assertNodesUnchanged(); }
@Override public Path find() throws BackgroundException { return this.find(Context.files); }
@Test public void testFindNoUsername() throws Exception { final NextcloudHomeFeature feature = new NextcloudHomeFeature(new Host(new NextcloudProtocol())); { assertEquals(new Path("/ocs/v1.php", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.ocs)); assertEquals(new Path("/remote.php/webdav", EnumSet.of(Path.Type.directory)), feature.find()); assertEquals(new Path("/remote.php/webdav", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.files)); assertEquals(new Path("/remote.php/dav/meta", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.meta)); assertEquals(new Path("/", EnumSet.of(Path.Type.directory)), feature.find(NextcloudHomeFeature.Context.versions)); } }
public static Entry entry(String name) throws BlockException { return Env.sph.entry(name, EntryType.OUT, 1, OBJECTS0); }
@Test public void testStringEntryCount() throws BlockException { Entry e = SphU.entry("resourceName", 2); assertNotNull(e); assertEquals("resourceName", e.resourceWrapper.getName()); assertEquals(e.resourceWrapper.getEntryType(), EntryType.OUT); assertEquals(ContextUtil.getContext().getName(), Constants.CONTEXT_DEFAULT_NAME); e.exit(2); }
public final void isGreaterThan(int other) { isGreaterThan((double) other); }
@Test public void isGreaterThan_int() { expectFailureWhenTestingThat(2.0).isGreaterThan(2); assertThat(2.0).isGreaterThan(1); }
@Override public ImportResult importItem( UUID jobId, IdempotentImportExecutor idempotentExecutor, TokenSecretAuthData authData, VideosContainerResource data) throws Exception { if (data == null) { // Nothing to do return ImportResult.OK; } BackblazeDataTransferClient b2Client = b2ClientFactory.getOrCreateB2Client(jobId, authData); final LongAdder totalImportedFilesSizes = new LongAdder(); if (data.getVideos() != null && data.getVideos().size() > 0) { for (VideoModel video : data.getVideos()) { idempotentExecutor.importAndSwallowIOExceptions( video, v -> { ItemImportResult<String> fileImportResult = importSingleVideo(jobId, b2Client, v); if (fileImportResult.hasBytes()) { totalImportedFilesSizes.add(fileImportResult.getBytes()); } return fileImportResult; }); } } return ImportResult.OK.copyWithBytes(totalImportedFilesSizes.longValue()); }
@Test public void testImportVideo() throws Exception { String dataId = "dataId"; String title = "title"; String videoUrl = "videoUrl"; String description = "description"; String encodingFormat = "video/mp4"; String albumName = "albumName"; String albumId = "albumId"; String response = "response"; UUID jobId = UUID.randomUUID(); VideoModel videoObject = new VideoModel(title, videoUrl, description, encodingFormat, dataId, albumId, false, null); ArrayList<VideoModel> videos = new ArrayList<>(); videos.add(videoObject); VideosContainerResource data = mock(VideosContainerResource.class); when(data.getVideos()).thenReturn(videos); when(executor.getCachedValue(albumId)).thenReturn(albumName); when(streamProvider.getInputStreamForItem(jobId, videoObject)) .thenReturn(new InputStreamWrapper(IOUtils.toInputStream("video content", "UTF-8"))); when(dataStore.getTempFileFromInputStream(any(), any(), any())).thenReturn(folder.toFile()); when(client.uploadFile(eq("Video Transfer/dataId.mp4"), any())).thenReturn(response); when(clientFactory.getOrCreateB2Client(jobId, authData)).thenReturn(client); BackblazeVideosImporter sut = new BackblazeVideosImporter(monitor, dataStore, streamProvider, clientFactory); sut.importItem(jobId, executor, authData, data); ArgumentCaptor<ImportFunction<VideoModel, String>> importCapture = ArgumentCaptor.forClass(ImportFunction.class); verify(executor, times(1)) .importAndSwallowIOExceptions(eq(videoObject), importCapture.capture()); String actual = importCapture.getValue().apply(videoObject).getData(); assertEquals(response, actual); }
public static DefaultProcessCommands main(File directory, int processNumber) { return new DefaultProcessCommands(directory, processNumber, true); }
@Test public void main_fails_if_processNumber_is_higher_than_MAX_PROCESSES() throws Exception { int processNumber = MAX_PROCESSES + 1; expectProcessNumberNoValidIAE(() -> { try (DefaultProcessCommands main = DefaultProcessCommands.main(temp.newFolder(), processNumber)) { } }, processNumber); }
@Override public Reader<E> restoreReader( Configuration config, FSDataInputStream stream, long restoredOffset, long fileLen, long splitEnd) throws IOException { // current version does not support splitting. checkNotSplit(fileLen, splitEnd); checkArgument( restoredOffset == CheckpointedPosition.NO_OFFSET, "The restoredOffset should always be NO_OFFSET"); return createReader(config, stream, fileLen, splitEnd); }
@Test void testRestoreGenericReaderWithWrongOffset() { assertThatThrownBy( () -> restoreReader( AvroParquetReaders.forGenericRecord(schema), new Configuration(), userPath, 10, 0, userPath.getFileSystem().getFileStatus(userPath).getLen())) .isInstanceOf(IllegalArgumentException.class); }
@Override public void close() throws Exception { reaper.initiateShutdown(); // Improve shutdown time by waking up the reaper thread // blocked on poll by sending a no-op. timer.add(new TimerTask(0) { @Override public void run() {} }); reaper.awaitShutdown(); timer.close(); }
@Test public void testReaperClose() throws Exception { Timer timer = Mockito.mock(Timer.class); SystemTimerReaper timerReaper = new SystemTimerReaper("reaper", timer); timerReaper.close(); Mockito.verify(timer, Mockito.times(1)).close(); TestUtils.waitForCondition(timerReaper::isShutdown, "reaper not shutdown"); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); List<MemberInfo> memberInfoList = new ArrayList<>(); for (Map.Entry<String, Subscription> memberSubscription : subscriptions.entrySet()) { assignment.put(memberSubscription.getKey(), new ArrayList<>()); memberInfoList.add(new MemberInfo(memberSubscription.getKey(), memberSubscription.getValue().groupInstanceId())); } CircularIterator<MemberInfo> assigner = new CircularIterator<>(Utils.sorted(memberInfoList)); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek().memberId).topics().contains(topic)) assigner.next(); assignment.get(assigner.next().memberId).add(partition); } return assignment; }
@Test public void testTwoStaticConsumersTwoTopicsSixPartitions() { // although consumer 2 has a higher rank than 1, the comparison happens on // instance id level. String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer-b"; String instance1 = "instance1"; String consumer2 = "consumer-a"; String instance2 = "instance2"; Map<String, Integer> partitionsPerTopic = setupPartitionsPerTopicWithTwoTopics(3, 3); Map<String, Subscription> consumers = new HashMap<>(); Subscription consumer1Subscription = new Subscription(topics(topic1, topic2), null); consumer1Subscription.setGroupInstanceId(Optional.of(instance1)); consumers.put(consumer1, consumer1Subscription); Subscription consumer2Subscription = new Subscription(topics(topic1, topic2), null); consumer2Subscription.setGroupInstanceId(Optional.of(instance2)); consumers.put(consumer2, consumer2Subscription); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2), tp(topic2, 1)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 2)), assignment.get(consumer2)); }
@Deprecated public static <T> ExtensionLoader<T> getExtensionLoader(Class<T> type) { return ApplicationModel.defaultModel().getDefaultModule().getExtensionLoader(type); }
@Test void test_getExtensionLoader_NotInterface() { try { getExtensionLoader(ExtensionLoaderTest.class); fail(); } catch (IllegalArgumentException expected) { assertThat( expected.getMessage(), containsString( "Extension type (class org.apache.dubbo.common.extension.ExtensionLoaderTest) is not an interface")); } }
@Override public ChannelFuture goAway(final ChannelHandlerContext ctx, final int lastStreamId, final long errorCode, final ByteBuf debugData, ChannelPromise promise) { promise = promise.unvoid(); final Http2Connection connection = connection(); try { if (!connection.goAwaySent(lastStreamId, errorCode, debugData)) { debugData.release(); promise.trySuccess(); return promise; } } catch (Throwable cause) { debugData.release(); promise.tryFailure(cause); return promise; } // Need to retain before we write the buffer because if we do it after the refCnt could already be 0 and // result in an IllegalRefCountException. debugData.retain(); ChannelFuture future = frameWriter().writeGoAway(ctx, lastStreamId, errorCode, debugData, promise); if (future.isDone()) { processGoAwayWriteResult(ctx, lastStreamId, errorCode, debugData, future); } else { future.addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { processGoAwayWriteResult(ctx, lastStreamId, errorCode, debugData, future); } }); } return future; }
@Test public void canSendGoAwayFramesWithDecreasingLastStreamIds() throws Exception { handler = newHandler(); ByteBuf data = dummyData(); long errorCode = Http2Error.INTERNAL_ERROR.code(); handler.goAway(ctx, STREAM_ID + 2, errorCode, data.retain(), promise); verify(frameWriter).writeGoAway(eq(ctx), eq(STREAM_ID + 2), eq(errorCode), eq(data), eq(promise)); verify(connection).goAwaySent(eq(STREAM_ID + 2), eq(errorCode), eq(data)); promise = new DefaultChannelPromise(channel); handler.goAway(ctx, STREAM_ID, errorCode, data, promise); verify(frameWriter).writeGoAway(eq(ctx), eq(STREAM_ID), eq(errorCode), eq(data), eq(promise)); verify(connection).goAwaySent(eq(STREAM_ID), eq(errorCode), eq(data)); assertEquals(0, data.refCnt()); }
public static NullableValue parsePartitionValue(HivePartitionKey key, Type type, DateTimeZone timeZone) { return parsePartitionValue(key.getName(), key.getValue().orElse(HIVE_DEFAULT_DYNAMIC_PARTITION), type, timeZone); }
@Test public void testParsePartitionValue() { Object prestoValue = parsePartitionValue("p=1970-01-02", "1970-01-02", DATE, ZoneId.of(TimeZone.getDefault().getID())).getValue(); assertEquals(Long.parseLong(String.valueOf(prestoValue)), 1L); prestoValue = parsePartitionValue("p=1234", "1234", INTEGER, ZoneId.of(TimeZone.getDefault().getID())).getValue(); assertEquals(Integer.parseInt(String.valueOf(prestoValue)), 1234); prestoValue = parsePartitionValue("p=true", "true", BOOLEAN, ZoneId.of(TimeZone.getDefault().getID())).getValue(); assertTrue(Boolean.parseBoolean(String.valueOf(prestoValue))); prestoValue = parsePartitionValue("p=USA", "USA", VARCHAR, ZoneId.of(TimeZone.getDefault().getID())).getValue(); assertEquals(prestoValue, Slices.utf8Slice("USA")); }
@Override public AdminUserDO getUserByUsername(String username) { return userMapper.selectByUsername(username); }
@Test public void testGetUserByUsername() { // mock 数据 AdminUserDO dbUser = randomAdminUserDO(); userMapper.insert(dbUser); // 准备参数 String username = dbUser.getUsername(); // 调用 AdminUserDO user = userService.getUserByUsername(username); // 断言 assertPojoEquals(dbUser, user); }
@Override public void isEqualTo(@Nullable Object expected) { super.isEqualTo(expected); }
@Test @GwtIncompatible("Math.nextAfter") public void testFloatConstants_matchNextAfter() { assertThat(nextAfter(2.2f, POSITIVE_INFINITY)).isEqualTo(JUST_OVER_2POINT2); assertThat(nextAfter(3.3f, POSITIVE_INFINITY)).isEqualTo(JUST_OVER_3POINT3); assertThat(nextAfter(3.3f + DEFAULT_TOLERANCE, NEGATIVE_INFINITY)).isEqualTo(TOLERABLE_3POINT3); assertThat(nextAfter(3.3f + DEFAULT_TOLERANCE, POSITIVE_INFINITY)) .isEqualTo(INTOLERABLE_3POINT3); assertThat(nextAfter(Long.MIN_VALUE, NEGATIVE_INFINITY)).isEqualTo(UNDER_LONG_MIN); assertThat(nextAfter(2.2f + DEFAULT_TOLERANCE, NEGATIVE_INFINITY)).isEqualTo(TOLERABLE_2POINT2); assertThat(nextAfter(2.2f + DEFAULT_TOLERANCE, POSITIVE_INFINITY)) .isEqualTo(INTOLERABLE_2POINT2); }
static void populateEvaluateNodeWithMissingValuePenalty(final BlockStmt toPopulate, final Double missingValuePenalty) { CommonCodegenUtils.setVariableDeclaratorValue(toPopulate, MISSING_VALUE_PENALTY, getExpressionForObject(missingValuePenalty)); }
@Test void populateEvaluateNodeWithMissingValuePenalty() { final BlockStmt toPopulate = new BlockStmt(); final VariableDeclarator variableDeclarator = new VariableDeclarator(); variableDeclarator.setType("double"); variableDeclarator.setName(MISSING_VALUE_PENALTY); toPopulate.addStatement(new VariableDeclarationExpr(variableDeclarator)); assertThat(variableDeclarator.getInitializer()).isNotPresent(); final double missingValuePenalty = new Random().nextDouble(); KiePMMLNodeFactory.populateEvaluateNodeWithMissingValuePenalty(toPopulate, missingValuePenalty); assertThat(variableDeclarator.getInitializer()).isPresent(); Expression expression = variableDeclarator.getInitializer().get(); assertThat(expression).isInstanceOf(DoubleLiteralExpr.class); DoubleLiteralExpr doubleLiteralExpr = (DoubleLiteralExpr) expression; assertThat(doubleLiteralExpr.asDouble()).isCloseTo(missingValuePenalty, Offset.offset(0.0)); }
@Override public void check(final String databaseName, final ShadowRuleConfiguration ruleConfig, final Map<String, DataSource> dataSourceMap, final Collection<ShardingSphereRule> builtRules) { checkShadowAlgorithms(ruleConfig.getShadowAlgorithms()); checkDefaultShadowAlgorithmConfiguration(ruleConfig.getDefaultShadowAlgorithmName(), ruleConfig.getShadowAlgorithms()); checkDataSources(ruleConfig.getDataSources(), dataSourceMap, databaseName); checkShadowTableDataSourcesReferences(ruleConfig.getTables(), ruleConfig.getDataSources()); checkShadowTableAlgorithmsReferences(ruleConfig.getTables(), ruleConfig.getShadowAlgorithms(), databaseName); }
@Test void assertCheck() { new ShadowRuleConfigurationChecker().check("", createShadowRuleConfiguration(), createDataSourceMap(), Collections.emptyList()); }
public void executeInKeyContext(Runnable runnable, Object key) { final Object oldKey = currentKeySupplier.get(); setCurrentKey(key); try { runnable.run(); } finally { resetCurrentKey(oldKey); } }
@Test void testExecuteInKeyContext() { final int oldKey = 1; final int newKey = 2; // -1 as unset value AtomicInteger setKey = new AtomicInteger(-1); DefaultStateManager stateManager = new DefaultStateManager( () -> oldKey, k -> setKey.set((Integer) k), new MockStreamingRuntimeContext(false, 1, 0), new MockOperatorStateStore()); stateManager.executeInKeyContext(() -> assertThat(setKey).hasValue(newKey), newKey); assertThat(setKey).hasValue(oldKey); }
@Description("Returns the Geometry value that represents the point set symmetric difference of two Geometries") @ScalarFunction("ST_SymDifference") @SqlType(GEOMETRY_TYPE_NAME) public static Slice stSymmetricDifference(@SqlType(GEOMETRY_TYPE_NAME) Slice left, @SqlType(GEOMETRY_TYPE_NAME) Slice right) { OGCGeometry leftGeometry = EsriGeometrySerde.deserialize(left); OGCGeometry rightGeometry = EsriGeometrySerde.deserialize(right); verifySameSpatialReference(leftGeometry, rightGeometry); return EsriGeometrySerde.serialize(leftGeometry.symDifference(rightGeometry)); }
@Test public void testSTSymmetricDifference() { assertFunction("ST_AsText(ST_SymDifference(ST_GeometryFromText('POINT (50 100)'), ST_GeometryFromText('POINT (50 150)')))", VARCHAR, "MULTIPOINT ((50 100), (50 150))"); assertFunction("ST_AsText(ST_SymDifference(ST_GeometryFromText('MULTIPOINT (50 100, 60 200)'), ST_GeometryFromText('MULTIPOINT (60 200, 70 150)')))", VARCHAR, "MULTIPOINT ((50 100), (70 150))"); assertFunction("ST_AsText(ST_SymDifference(ST_GeometryFromText('LINESTRING (50 100, 50 200)'), ST_GeometryFromText('LINESTRING (50 50, 50 150)')))", VARCHAR, "MULTILINESTRING ((50 50, 50 100), (50 150, 50 200))"); assertFunction("ST_AsText(ST_SymDifference(ST_GeometryFromText('MULTILINESTRING ((1 1, 5 1), (2 4, 4 4))'), ST_GeometryFromText('MULTILINESTRING ((3 4, 6 4), (5 0, 5 4))')))", VARCHAR, "MULTILINESTRING ((5 0, 5 1), (1 1, 5 1), (5 1, 5 4), (2 4, 3 4), (4 4, 5 4), (5 4, 6 4))"); assertFunction("ST_AsText(ST_SymDifference(ST_GeometryFromText('POLYGON ((1 1, 1 4, 4 4, 4 1, 1 1))'), ST_GeometryFromText('POLYGON ((2 2, 2 5, 5 5, 5 2, 2 2))')))", VARCHAR, "MULTIPOLYGON (((1 1, 1 4, 2 4, 2 2, 4 2, 4 1, 1 1)), ((4 2, 4 4, 2 4, 2 5, 5 5, 5 2, 4 2)))"); assertFunction("ST_AsText(ST_SymDifference(ST_GeometryFromText('MULTIPOLYGON (((0 0, 0 2, 2 2, 2 0, 0 0)), ((2 2, 2 4, 4 4, 4 2, 2 2)))'), ST_GeometryFromText('POLYGON ((0 0, 0 3, 3 3, 3 0, 0 0))')))", VARCHAR, "MULTIPOLYGON (((2 0, 2 2, 3 2, 3 0, 2 0)), ((0 2, 0 3, 2 3, 2 2, 0 2)), ((3 2, 3 3, 2 3, 2 4, 4 4, 4 2, 3 2)))"); }
public static void trimRecordTemplate(RecordTemplate recordTemplate, MaskTree override, final boolean failOnMismatch) { trimRecordTemplate(recordTemplate.data(), recordTemplate.schema(), override, failOnMismatch); }
@Test public void testNestedArrayRefRecord() throws CloneNotSupportedException { TyperefTest test = new TyperefTest(); NestedArrayRefRecord nestedArrayRefRecord = new NestedArrayRefRecord(); RecordBarArray recordBarArray = new RecordBarArray(); RecordBar recordBar = new RecordBar(); recordBar.setLocation("mountain view"); recordBarArray.add(recordBar); RecordBar recordBar2 = new RecordBar(); recordBar2.setLocation("palo alto"); recordBarArray.add(recordBar2); RecordBarArrayArray recordBarArrayArray = new RecordBarArrayArray(); recordBarArrayArray.add(recordBarArray); nestedArrayRefRecord.setNestedRecordRefArray(recordBarArrayArray); test.setNestedArrayRefRecord(nestedArrayRefRecord); // Generate expected copy. TyperefTest expected = test.copy(); // Introduce bad elements. test.getNestedArrayRefRecord().getNestedRecordRefArray().get(0).get(0).data().put("evil", "bar"); test.getNestedArrayRefRecord().getNestedRecordRefArray().get(0).get(0).data().put("evil2", "bar"); test.getNestedArrayRefRecord().getNestedRecordRefArray().get(0).get(1).data().put("evil", "foo"); test.getNestedArrayRefRecord().getNestedRecordRefArray().get(0).get(1).data().put("evil2", "foo"); Assert.assertEquals(test.getNestedArrayRefRecord().getNestedRecordRefArray().get(0).get(0).data().size(), 3); Assert.assertEquals(test.getNestedArrayRefRecord().getNestedRecordRefArray().get(0).get(1).data().size(), 3); RestUtils.trimRecordTemplate(test, false); Assert.assertEquals(test, expected); }
public static String byteCountToDisplaySize(long size) { if (size < 1024L) { return String.valueOf(size) + (size > 1 ? " bytes" : " byte"); } long exp = (long) (Math.log(size) / Math.log((long) 1024)); double value = size / Math.pow((long) 1024, exp); char unit = "KMGTPEZY".charAt((int) exp - 1); return String.format("%.1f %s%s", value, unit, "B"); }
@Test public void shouldConvertBytesToTB() { long twoGiga = 2L * 1024 * 1024 * 1024 * 1024 + 512L * 1024 * 1024 * 1024; assertThat(FileSizeUtils.byteCountToDisplaySize(twoGiga), is("2.5 TB")); }
@Override public int read() throws IOException { if (mPosition == mLength) { // at end of file return -1; } updateStreamIfNeeded(); int res = mUfsInStream.get().read(); if (res == -1) { return -1; } mPosition++; Metrics.BYTES_READ_FROM_UFS.inc(1); return res; }
@Test public void readNullArray() throws IOException, AlluxioException { AlluxioURI ufsPath = getUfsPath(); createFile(ufsPath, CHUNK_SIZE); try (FileInStream inStream = getStream(ufsPath)) { assertThrows(NullPointerException.class, () -> inStream.read((byte[]) null)); } }
@Override void toHtml() throws IOException { writeHtmlHeader(); htmlCoreReport.toHtml(); writeHtmlFooter(); }
@Test public void testPeriodeNonTout() throws IOException { // counter avec période non TOUT et des requêtes collector.collectWithoutErrors(javaInformationsList); final String requestName = "test 1"; counter.bindContext(requestName, "complete test 1", null, -1, -1); sqlCounter.addRequest("sql1", 10, 10, 10, false, -1); counter.addRequest(requestName, 0, 0, 0, false, 1000); counter.addRequest("test2", 1000, 500, 500, false, 1000); counter.addRequest("test3", 10000, 500, 500, true, 10000); collector.collectWithoutErrors(javaInformationsList); final HtmlReport htmlReport = new HtmlReport(collector, null, javaInformationsList, Period.SEMAINE, writer); htmlReport.toHtml("message 6", null); assertNotEmptyAndClear(writer); // période personnalisée final HtmlReport htmlReportRange = new HtmlReport(collector, null, javaInformationsList, Range.createCustomRange(new Date(), new Date()), writer); htmlReportRange.toHtml("message 6", null); assertNotEmptyAndClear(writer); }
@Override public VoidOutput run(RunContext runContext) throws Exception { String renderedNamespace = runContext.render(this.namespace); String renderedKey = runContext.render(this.key); Object renderedValue = runContext.renderTyped(this.value); KVStore kvStore = runContext.namespaceKv(renderedNamespace); kvStore.put(renderedKey, new KVValueAndMetadata(new KVMetadata(ttl), renderedValue), this.overwrite); return null; }
@Test void shouldFailGivenNonExistingNamespace() { // Given RunContext runContext = this.runContextFactory.of(Map.of( "flow", Map.of("namespace", "io.kestra.test"), "inputs", Map.of( "key", TEST_KEY, "value", "test-value" ) )); Set set = Set.builder() .id(Set.class.getSimpleName()) .type(Set.class.getName()) .key("{{ inputs.key }}") .value("{{ inputs.value }}") .namespace("???") .build(); // When - Then Assertions.assertThrows(KVStoreException.class, () -> set.run(runContext)); }
Object of(Object component) { return of(component, LOG); }
@Test public void should_log_warning_if_toString_is_not_overridden() { Logger log = mock(Logger.class); keys.of(new Object(), log); verifyNoInteractions(log); // only on non-first runs, to avoid false-positives on singletons keys.of(new Object(), log); verify(log).warn(startsWith("Bad component key")); }
public static Class<?> toRawType(Type type) { return GenericTypeReflector.erase(type); }
@Test public void toRawType() { var type = Types.listOf(Person.class); assertThat(Reflection.toRawType(type)).isEqualTo(List.class); assertThat(Reflection.toRawType(List.class)).isEqualTo(List.class); }
@ExecuteOn(TaskExecutors.IO) @Post(consumes = MediaType.APPLICATION_YAML) @Operation(tags = {"Flows"}, summary = "Create a flow from yaml source") public HttpResponse<FlowWithSource> create( @Parameter(description = "The flow") @Body String flow ) throws ConstraintViolationException { Flow flowParsed = yamlFlowParser.parse(flow, Flow.class); return HttpResponse.ok(doCreate(flowParsed, flow)); }
@Test void updateFlowFromString() throws IOException { String flow = generateFlowAsString("updatedFlow","io.kestra.unittest","a"); Flow assertFlow = parseFlow(flow); FlowWithSource result = client.toBlocking().retrieve(POST("/api/v1/flows", flow).contentType(MediaType.APPLICATION_YAML), FlowWithSource.class); assertThat(result.getId(), is(assertFlow.getId())); assertThat(result.getInputs().getFirst().getId(), is("a")); flow = generateFlowAsString("updatedFlow","io.kestra.unittest","b"); FlowWithSource get = client.toBlocking().retrieve( PUT("/api/v1/flows/io.kestra.unittest/updatedFlow", flow).contentType(MediaType.APPLICATION_YAML), FlowWithSource.class ); assertThat(get.getId(), is(assertFlow.getId())); assertThat(get.getInputs().getFirst().getId(), is("b")); String finalFlow = flow; HttpClientResponseException e = assertThrows(HttpClientResponseException.class, () -> { HttpResponse<Void> response = client.toBlocking().exchange( PUT("/api/v1/flows/io.kestra.unittest/" + IdUtils.create(), finalFlow).contentType(MediaType.APPLICATION_YAML) ); }); assertThat(e.getStatus(), is(NOT_FOUND)); }
public V put(final long key, final V value) { final Entry<V>[] table = this.table; final int index = HashUtil.indexFor(key, table.length, mask); for (Entry<V> e = table[index]; e != null; e = e.hashNext) { if (e.key == key) { moveToTop(e); return e.setValue(value); } } final Entry<V> e = new Entry<>(key, value); e.hashNext = table[index]; table[index] = e; final Entry<V> top = this.top; e.next = top; if (top != null) { top.previous = e; } else { back = e; } this.top = e; _size += 1; if (removeEldestEntry(back)) { remove(back.key); } else if (_size > capacity) { rehash(HashUtil.nextCapacity(capacity)); } return null; }
@Test public void keySet() { final LongLinkedHashMap<String> tested = new LongLinkedHashMap<>(); for (long i = 0; i < 10000; ++i) { tested.put(i, Long.toString(i)); } long i = 10000; for (Long key : tested.keySet()) { Assert.assertEquals(--i, key.longValue()); } }
public static InetSocketAddress getBindAddress(Configuration conf) { return conf.getSocketAddr( YarnConfiguration.PROXY_BIND_HOST, YarnConfiguration.PROXY_ADDRESS, YarnConfiguration.DEFAULT_PROXY_ADDRESS, YarnConfiguration.DEFAULT_PROXY_PORT); }
@Test void testStartWithBindHost() { String bindHost = "0.0.0.0"; conf.set(YarnConfiguration.PROXY_BIND_HOST, bindHost); webAppProxy.init(conf); assertEquals(STATE.INITED, webAppProxy.getServiceState()); webAppProxy.start(); for (Service service : webAppProxy.getServices()) { if (service instanceof WebAppProxy) { assertEquals(bindHost + ":" + port, ((WebAppProxy) service).getBindAddress()); } } assertEquals(STATE.STARTED, webAppProxy.getServiceState()); }
@Override @PublicAPI(usage = ACCESS) public SliceRule as(String newDescription) { return copyWithTransformation(new As(newDescription)); }
@Test public void reports_hint_that_cycles_have_been_omitted_if_number_of_cycles_exceed_configured_limit() { int expectedNumberOfCycles = getNumberOfCyclesInCompleteGraph(7); int expectedLimit = expectedNumberOfCycles / 2; String failureReport = evaluateCompleteGraphCycleFreeWithCycleLimit(expectedLimit); assertThat(failureReport).as("failure report").contains("( >= " + expectedLimit + " times - " + "the maximum number of cycles to detect has been reached; " + "this limit can be adapted using the `archunit.properties` value `cycles.maxNumberToDetect=xxx`)"); }
public static Schema inferSchema(Object value) { if (value instanceof String) { return Schema.STRING_SCHEMA; } else if (value instanceof Boolean) { return Schema.BOOLEAN_SCHEMA; } else if (value instanceof Byte) { return Schema.INT8_SCHEMA; } else if (value instanceof Short) { return Schema.INT16_SCHEMA; } else if (value instanceof Integer) { return Schema.INT32_SCHEMA; } else if (value instanceof Long) { return Schema.INT64_SCHEMA; } else if (value instanceof Float) { return Schema.FLOAT32_SCHEMA; } else if (value instanceof Double) { return Schema.FLOAT64_SCHEMA; } else if (value instanceof byte[] || value instanceof ByteBuffer) { return Schema.BYTES_SCHEMA; } else if (value instanceof List) { return inferListSchema((List<?>) value); } else if (value instanceof Map) { return inferMapSchema((Map<?, ?>) value); } else if (value instanceof Struct) { return ((Struct) value).schema(); } return null; }
@Test public void shouldInferByteSchema() { byte[] bytes = new byte[1]; Schema byteSchema = Values.inferSchema(bytes); assertEquals(Schema.BYTES_SCHEMA, byteSchema); Schema byteBufferSchema = Values.inferSchema(ByteBuffer.wrap(bytes)); assertEquals(Schema.BYTES_SCHEMA, byteBufferSchema); }
public void clear() { for (int i = 0; i < sections.length; i++) { sections[i].clear(); } }
@Test public void testClear() { ConcurrentOpenHashMap<String, String> map = ConcurrentOpenHashMap.<String, String>newBuilder() .expectedItems(2) .concurrencyLevel(1) .autoShrink(true) .mapIdleFactor(0.25f) .build(); assertTrue(map.capacity() == 4); assertNull(map.put("k1", "v1")); assertNull(map.put("k2", "v2")); assertNull(map.put("k3", "v3")); assertTrue(map.capacity() == 8); map.clear(); assertTrue(map.capacity() == 4); }
@Override public int getPort() { if (port <= 0) { if (servletWebServerApplicationContext != null) { port = servletWebServerApplicationContext.getWebServer().getPort(); } else if (reactiveWebServerApplicationContext != null) { port = reactiveWebServerApplicationContext.getWebServer().getPort(); } else { throw new RuntimeException("Unsupported web type."); } } return port; }
@Test public void testGetPort() { assertThat(polarisRegistration1.getPort()).isEqualTo(PORT); assertThat(polarisRegistration2.getPort()).isEqualTo(PORT + 1); try { polarisRegistration3.getPort(); } catch (RuntimeException e) { assertThat(e.getMessage()).isEqualTo("Unsupported web type."); } assertThat(polarisRegistration4.getPort()).isEqualTo(testLocalPort); }
public List<KuduPredicate> convert(ScalarOperator operator) { if (operator == null) { return null; } return operator.accept(this, null); }
@Test public void testEqDate() { ConstantOperator value = ConstantOperator.createDate(LocalDateTime.of(2024, 1, 1, 0, 0)); ScalarOperator op = new BinaryPredicateOperator(BinaryType.EQ, F3, value); List<KuduPredicate> result = CONVERTER.convert(op); Assert.assertEquals(result.get(0).toString(), "`f3` = 2024-01-01"); }
public static IUser normalizeUserInfo( IUser user ) { user.setLogin( user.getLogin().trim() ); user.setName( user.getName().trim() ); return user; }
@Test( expected = NullPointerException.class ) public void normalizeUserInfo_Null() { RepositoryCommonValidations.normalizeUserInfo( null ); }
@Override public void process(Exchange exchange) throws Exception { String operation = getOperation(exchange); switch (operation) { case GlanceConstants.RESERVE: doReserve(exchange); break; case OpenstackConstants.CREATE: doCreate(exchange); break; case OpenstackConstants.UPDATE: doUpdate(exchange); break; case GlanceConstants.UPLOAD: doUpload(exchange); break; case OpenstackConstants.GET: doGet(exchange); break; case OpenstackConstants.GET_ALL: doGetAll(exchange); break; case OpenstackConstants.DELETE: doDelete(exchange); break; default: throw new IllegalArgumentException("Unsupported operation " + operation); } }
@Test public void reserveTest() throws Exception { when(endpoint.getOperation()).thenReturn(GlanceConstants.RESERVE); msg.setBody(dummyImage); producer.process(exchange); verify(imageService).reserve(captor.capture()); assertEquals(dummyImage, captor.getValue()); Image result = msg.getBody(Image.class); assertNotNull(result.getId()); assertEqualsImages(dummyImage, result); }
protected GrpcClientChannel registerProducer(ProxyContext ctx, String topicName) { String clientId = ctx.getClientID(); LanguageCode languageCode = LanguageCode.valueOf(ctx.getLanguage()); GrpcClientChannel channel = this.grpcChannelManager.createChannel(ctx, clientId); // use topic name as producer group ClientChannelInfo clientChannelInfo = new ClientChannelInfo(channel, clientId, languageCode, parseClientVersion(ctx.getClientVersion())); this.messagingProcessor.registerProducer(ctx, topicName, clientChannelInfo); TopicMessageType topicMessageType = this.messagingProcessor.getMetadataService().getTopicMessageType(ctx, topicName); if (TopicMessageType.TRANSACTION.equals(topicMessageType)) { this.messagingProcessor.addTransactionSubscription(ctx, topicName, topicName); } return channel; }
@Test public void testProducerHeartbeat() throws Throwable { ProxyContext context = createContext(); this.sendProducerTelemetry(context); ArgumentCaptor<String> registerProducerGroupArgumentCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<ClientChannelInfo> channelInfoArgumentCaptor = ArgumentCaptor.forClass(ClientChannelInfo.class); doNothing().when(this.messagingProcessor).registerProducer(any(), registerProducerGroupArgumentCaptor.capture(), channelInfoArgumentCaptor.capture()); ArgumentCaptor<String> txProducerGroupArgumentCaptor = ArgumentCaptor.forClass(String.class); ArgumentCaptor<String> txProducerTopicArgumentCaptor = ArgumentCaptor.forClass(String.class); doNothing().when(this.messagingProcessor).addTransactionSubscription(any(), txProducerGroupArgumentCaptor.capture(), txProducerTopicArgumentCaptor.capture() ); when(this.metadataService.getTopicMessageType(any(), anyString())).thenReturn(TopicMessageType.TRANSACTION); HeartbeatResponse response = this.sendProducerHeartbeat(context); assertEquals(Code.OK, response.getStatus().getCode()); assertEquals(Lists.newArrayList(TOPIC), registerProducerGroupArgumentCaptor.getAllValues()); ClientChannelInfo clientChannelInfo = channelInfoArgumentCaptor.getValue(); assertClientChannelInfo(clientChannelInfo, TOPIC); assertEquals(Lists.newArrayList(TOPIC), txProducerGroupArgumentCaptor.getAllValues()); assertEquals(Lists.newArrayList(TOPIC), txProducerTopicArgumentCaptor.getAllValues()); }
@GET public Response getApplication(@PathParam("version") String version, @HeaderParam("Accept") final String acceptHeader, @HeaderParam(EurekaAccept.HTTP_X_EUREKA_ACCEPT) String eurekaAccept) { if (!registry.shouldAllowAccess(false)) { return Response.status(Status.FORBIDDEN).build(); } EurekaMonitors.GET_APPLICATION.increment(); CurrentRequestVersion.set(Version.toEnum(version)); KeyType keyType = Key.KeyType.JSON; if (acceptHeader == null || !acceptHeader.contains("json")) { keyType = Key.KeyType.XML; } Key cacheKey = new Key( Key.EntityType.Application, appName, keyType, CurrentRequestVersion.get(), EurekaAccept.fromString(eurekaAccept) ); String payLoad = responseCache.get(cacheKey); CurrentRequestVersion.remove(); if (payLoad != null) { logger.debug("Found: {}", appName); return Response.ok(payLoad).build(); } else { logger.debug("Not Found: {}", appName); return Response.status(Status.NOT_FOUND).build(); } }
@Test public void testMiniAppGet() throws Exception { Response response = applicationResource.getApplication( Version.V2.name(), MediaType.APPLICATION_JSON, EurekaAccept.compact.name() ); String json = String.valueOf(response.getEntity()); DecoderWrapper decoder = CodecWrappers.getDecoder(CodecWrappers.LegacyJacksonJson.class); Application decodedApp = decoder.decode(json, Application.class); // assert false as one is mini, so should NOT equal assertThat(EurekaEntityComparators.equal(testApplication, decodedApp), is(false)); for (InstanceInfo instanceInfo : testApplication.getInstances()) { InstanceInfo decodedInfo = decodedApp.getByInstanceId(instanceInfo.getId()); assertThat(EurekaEntityComparators.equalMini(instanceInfo, decodedInfo), is(true)); } }
static CatalogLoader createCatalogLoader( String name, Map<String, String> properties, Configuration hadoopConf) { String catalogImpl = properties.get(CatalogProperties.CATALOG_IMPL); if (catalogImpl != null) { String catalogType = properties.get(ICEBERG_CATALOG_TYPE); Preconditions.checkArgument( catalogType == null, "Cannot create catalog %s, both catalog-type and catalog-impl are set: catalog-type=%s, catalog-impl=%s", name, catalogType, catalogImpl); return CatalogLoader.custom(name, properties, hadoopConf, catalogImpl); } String catalogType = properties.getOrDefault(ICEBERG_CATALOG_TYPE, ICEBERG_CATALOG_TYPE_HIVE); switch (catalogType.toLowerCase(Locale.ENGLISH)) { case ICEBERG_CATALOG_TYPE_HIVE: // The values of properties 'uri', 'warehouse', 'hive-conf-dir' are allowed to be null, in // that case it will // fallback to parse those values from hadoop configuration which is loaded from classpath. String hiveConfDir = properties.get(HIVE_CONF_DIR); String hadoopConfDir = properties.get(HADOOP_CONF_DIR); Configuration newHadoopConf = mergeHiveConf(hadoopConf, hiveConfDir, hadoopConfDir); return CatalogLoader.hive(name, newHadoopConf, properties); case ICEBERG_CATALOG_TYPE_HADOOP: return CatalogLoader.hadoop(name, hadoopConf, properties); case ICEBERG_CATALOG_TYPE_REST: return CatalogLoader.rest(name, hadoopConf, properties); default: throw new UnsupportedOperationException( "Unknown catalog-type: " + catalogType + " (Must be 'hive', 'hadoop' or 'rest')"); } }
@Test public void testCreateCatalogCustomWithHiveCatalogTypeSet() { String catalogName = "customCatalog"; props.put(CatalogProperties.CATALOG_IMPL, CustomHadoopCatalog.class.getName()); props.put( FlinkCatalogFactory.ICEBERG_CATALOG_TYPE, FlinkCatalogFactory.ICEBERG_CATALOG_TYPE_HIVE); assertThatThrownBy( () -> FlinkCatalogFactory.createCatalogLoader(catalogName, props, new Configuration())) .isInstanceOf(IllegalArgumentException.class) .hasMessageStartingWith( "Cannot create catalog customCatalog, both catalog-type and catalog-impl are set"); }
public static String byteCountToDisplaySize(long size) { if (size < 1024L) { return String.valueOf(size) + (size > 1 ? " bytes" : " byte"); } long exp = (long) (Math.log(size) / Math.log((long) 1024)); double value = size / Math.pow((long) 1024, exp); char unit = "KMGTPEZY".charAt((int) exp - 1); return String.format("%.1f %s%s", value, unit, "B"); }
@Test public void shouldConvertBytes() { assertThat(FileSizeUtils.byteCountToDisplaySize(1023), is("1023 bytes")); }
@Override public void open(Map<String, Object> config, SourceContext sourceContext) throws Exception { this.config = config; this.sourceContext = sourceContext; this.intermediateTopicName = SourceConfigUtils.computeBatchSourceIntermediateTopicName(sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName()).toString(); this.discoveryThread = Executors.newSingleThreadExecutor( new DefaultThreadFactory( String.format("%s-batch-source-discovery", FunctionCommon.getFullyQualifiedName( sourceContext.getTenant(), sourceContext.getNamespace(), sourceContext.getSourceName())))); this.getBatchSourceConfigs(config); this.initializeBatchSource(); this.start(); }
@Test public void testPushOpenWithRightSource() throws Exception { batchSourceExecutor.open(pushConfig, context); }
public CompatibilityInfoMap checkRestSpecVsSnapshot(String prevRestSpecPath, String currSnapshotPath, CompatibilityLevel compatLevel) { return checkCompatibility(prevRestSpecPath, currSnapshotPath, compatLevel, true); }
@Test public void testIncompatibleRestSpecVsSnapshot() { final Collection<CompatibilityInfo> restSpecErrors = new HashSet<>(); final Collection<CompatibilityInfo> restSpecDiffs = new HashSet<>(); restSpecErrors.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "identifier", "type"), CompatibilityInfo.Type.TYPE_ERROR, "schema type changed from int to long")); restSpecErrors.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "alternativeKeys"), CompatibilityInfo.Type.ARRAY_MISSING_ELEMENT, "altTwo")); restSpecErrors.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "alternativeKeys", "alt", "type"), CompatibilityInfo.Type.VALUE_NOT_EQUAL, "long", "string")); restSpecErrors.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "alternativeKeys", "alt", "keyCoercer"), CompatibilityInfo.Type.VALUE_NOT_EQUAL, "com.linkedin.restli.tools.twitter.IntLongCoercer", "com.linkedin.restli.tools.twitter.StringLongCoercer")); restSpecDiffs.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "supports"), CompatibilityInfo.Type.SUPERSET, new HashSet<>(Arrays.asList("create")))); restSpecDiffs.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "methods"), CompatibilityInfo.Type.SUPERSET, new HashSet<>(Arrays.asList("create")))); restSpecDiffs.add(new CompatibilityInfo(Arrays.<Object>asList("", "collection", "alternativeKeys"), CompatibilityInfo.Type.SUPERSET, new HashSet<>(Arrays.asList("newAlt")))); final RestLiSnapshotCompatibilityChecker checker = new RestLiSnapshotCompatibilityChecker(); final CompatibilityInfoMap incompatibleInfoMap = checker.checkRestSpecVsSnapshot(RESOURCES_DIR + FS + "idls" + FS + "twitter-statuses-incompatible.restspec.json", RESOURCES_DIR + FS + "snapshots" + FS + "twitter-statuses.snapshot.json", CompatibilityLevel.EQUIVALENT); Assert.assertTrue(incompatibleInfoMap.isModelEquivalent()); final Collection<CompatibilityInfo> restSpecIncompatibles = incompatibleInfoMap.getRestSpecIncompatibles(); final Collection<CompatibilityInfo> restSpecCompatibles = incompatibleInfoMap.getRestSpecCompatibles(); for (CompatibilityInfo te : restSpecErrors) { Assert.assertTrue(restSpecIncompatibles.contains(te), "Reported restspec incompatibles should contain: " + te.toString()); restSpecIncompatibles.remove(te); } for (CompatibilityInfo di : restSpecDiffs) { Assert.assertTrue(restSpecCompatibles.contains(di), "Reported restspec compatibles should contain: " + di.toString()); restSpecCompatibles.remove(di); } Assert.assertTrue(restSpecIncompatibles.isEmpty()); Assert.assertTrue(restSpecCompatibles.isEmpty()); }
private LinkKey(ConnectPoint src, ConnectPoint dst) { this.src = checkNotNull(src); this.dst = checkNotNull(dst); }
@Test(expected = NullPointerException.class) public void testNullSrc() { LinkKey key = LinkKey.linkKey(null, DST1); }
static <ID, T> TaskExecutors<ID, T> singleItemExecutors(final String name, int workerCount, final TaskProcessor<T> processor, final AcceptorExecutor<ID, T> acceptorExecutor) { final AtomicBoolean isShutdown = new AtomicBoolean(); final TaskExecutorMetrics metrics = new TaskExecutorMetrics(name); registeredMonitors.put(name, metrics); return new TaskExecutors<>(idx -> new SingleTaskWorkerRunnable<>("TaskNonBatchingWorker-" + name + '-' + idx, isShutdown, metrics, processor, acceptorExecutor), workerCount, isShutdown); }
@Test public void testSingleItemProcessingWithPermanentError() throws Exception { taskExecutors = TaskExecutors.singleItemExecutors("TEST", 1, processor, acceptorExecutor); TaskHolder<Integer, ProcessingResult> taskHolder = permanentErrorTaskHolder(1); taskQueue.add(taskHolder); // Verify that transient task is re-scheduled processor.expectPermanentErrors(1); verify(acceptorExecutor, never()).reprocess(taskHolder, ProcessingResult.TransientError); }
public static CompositeEvictionChecker newCompositeEvictionChecker(CompositionOperator compositionOperator, EvictionChecker... evictionCheckers) { Preconditions.isNotNull(compositionOperator, "composition"); Preconditions.isNotNull(evictionCheckers, "evictionCheckers"); if (evictionCheckers.length == 0) { throw new IllegalArgumentException("EvictionCheckers cannot be empty!"); } switch (compositionOperator) { case AND: return new CompositeEvictionCheckerWithAndComposition(evictionCheckers); case OR: return new CompositeEvictionCheckerWithOrComposition(evictionCheckers); default: throw new IllegalArgumentException("Invalid composition operator: " + compositionOperator); } }
@Test(expected = IllegalArgumentException.class) public void evictionCheckersCannotBeNull() { CompositeEvictionChecker.newCompositeEvictionChecker( CompositeEvictionChecker.CompositionOperator.AND); }
@JsonProperty public DataSize getBufferSize() { return bufferSize; }
@Test void hasABufferSize() { assertThat(gzip.getBufferSize()) .isEqualTo(DataSize.kibibytes(32)); }
public int completeName(String buffer, int cursor, List<InterpreterCompletion> candidates, Map<String, String> aliases) { CursorArgument cursorArgument = parseCursorArgument(buffer, cursor); // find schema and table name if they are String schema; String table; String column; if (cursorArgument.getSchema() == null) { // process all List<CharSequence> keywordsCandidates = new ArrayList<>(); List<CharSequence> schemaCandidates = new ArrayList<>(); int keywordsRes = completeKeyword(buffer, cursor, keywordsCandidates); int schemaRes = completeSchema(buffer, cursor, schemaCandidates); addCompletions(candidates, keywordsCandidates, CompletionType.keyword.name()); addCompletions(candidates, schemaCandidates, CompletionType.schema.name()); return NumberUtils.max(keywordsRes, schemaRes); } else { schema = cursorArgument.getSchema(); if (aliases.containsKey(schema)) { // process alias case String alias = aliases.get(schema); int pointPos = alias.indexOf('.'); schema = alias.substring(0, pointPos); table = alias.substring(pointPos + 1); column = cursorArgument.getColumn(); List<CharSequence> columnCandidates = new ArrayList<>(); int columnRes = completeColumn(schema, table, column, cursorArgument.getCursorPosition(), columnCandidates); addCompletions(candidates, columnCandidates, CompletionType.column.name()); // process schema.table case } else if (cursorArgument.getTable() != null && cursorArgument.getColumn() == null) { List<CharSequence> tableCandidates = new ArrayList<>(); table = cursorArgument.getTable(); int tableRes = completeTable(schema, table, cursorArgument.getCursorPosition(), tableCandidates); addCompletions(candidates, tableCandidates, CompletionType.table.name()); return tableRes; } else { List<CharSequence> columnCandidates = new ArrayList<>(); table = cursorArgument.getTable(); column = cursorArgument.getColumn(); int columnRes = completeColumn(schema, table, column, cursorArgument.getCursorPosition(), columnCandidates); addCompletions(candidates, columnCandidates, CompletionType.column.name()); } } return -1; }
@Test void testCompleteName_Empty() { String buffer = ""; int cursor = 0; List<InterpreterCompletion> candidates = new ArrayList<>(); Map<String, String> aliases = new HashMap<>(); sqlCompleter.completeName(buffer, cursor, candidates, aliases); assertEquals(9, candidates.size()); assertTrue(candidates.contains(new InterpreterCompletion("prod_dds", "prod_dds", CompletionType.schema.name()))); assertTrue(candidates.contains(new InterpreterCompletion("prod_emart", "prod_emart", CompletionType.schema.name()))); assertTrue(candidates.contains(new InterpreterCompletion("SUM", "SUM", CompletionType.keyword.name()))); assertTrue(candidates.contains(new InterpreterCompletion("SUBSTRING", "SUBSTRING", CompletionType.keyword.name()))); assertTrue(candidates.contains(new InterpreterCompletion("SUBCLASS_ORIGIN", "SUBCLASS_ORIGIN", CompletionType.keyword.name()))); assertTrue(candidates.contains(new InterpreterCompletion("SELECT", "SELECT", CompletionType.keyword.name()))); assertTrue(candidates.contains(new InterpreterCompletion("ORDER", "ORDER", CompletionType.keyword.name()))); assertTrue(candidates.contains(new InterpreterCompletion("LIMIT", "LIMIT", CompletionType.keyword.name()))); assertTrue(candidates.contains(new InterpreterCompletion("FROM", "FROM", CompletionType.keyword.name()))); }
@Override public TypeSerializerSchemaCompatibility<T> resolveSchemaCompatibility( TypeSerializerSnapshot<T> oldSerializerSnapshot) { if (!(oldSerializerSnapshot instanceof AvroSerializerSnapshot)) { return TypeSerializerSchemaCompatibility.incompatible(); } AvroSerializerSnapshot<?> oldAvroSerializerSnapshot = (AvroSerializerSnapshot<?>) oldSerializerSnapshot; return resolveSchemaCompatibility(oldAvroSerializerSnapshot.schema, schema); }
@Test void sameSchemaShouldBeCompatibleAsIs() { assertThat(AvroSerializerSnapshot.resolveSchemaCompatibility(FIRST_NAME, FIRST_NAME)) .is(isCompatibleAsIs()); }
static int decodeULE128(ByteBuf in, int result) throws Http2Exception { final int readerIndex = in.readerIndex(); final long v = decodeULE128(in, (long) result); if (v > Integer.MAX_VALUE) { // the maximum value that can be represented by a signed 32 bit number is: // [0x1,0x7f] + 0x7f + (0x7f << 7) + (0x7f << 14) + (0x7f << 21) + (0x6 << 28) // OR // 0x0 + 0x7f + (0x7f << 7) + (0x7f << 14) + (0x7f << 21) + (0x7 << 28) // we should reset the readerIndex if we overflowed the int type. in.readerIndex(readerIndex); throw DECODE_ULE_128_TO_INT_DECOMPRESSION_EXCEPTION; } return (int) v; }
@Test public void testDecodeULE128LongMax() throws Http2Exception { byte[] input = {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0x7F}; ByteBuf in = Unpooled.wrappedBuffer(input); try { assertEquals(Long.MAX_VALUE, decodeULE128(in, 0L)); } finally { in.release(); } }
@Override public Collection<FileSourceSplit> enumerateSplits(Path[] paths, int minDesiredSplits) throws IOException { final ArrayList<FileSourceSplit> splits = new ArrayList<>(); for (Path path : paths) { final FileSystem fs = path.getFileSystem(); final FileStatus status = fs.getFileStatus(path); addSplitsForPath(status, fs, splits); } return splits; }
@Test void testHiddenDirectories() throws Exception { final Path[] testPaths = new Path[] { new Path("testfs:///dir/visiblefile"), new Path("testfs:///dir/.hiddendir/file"), new Path("testfs:///_notvisible/afile") }; testFs = TestingFileSystem.createWithFiles("testfs", testPaths); testFs.register(); final NonSplittingRecursiveEnumerator enumerator = createEnumerator(); final Collection<FileSourceSplit> splits = enumerator.enumerateSplits(new Path[] {new Path("testfs:///")}, 1); assertThat(toPaths(splits)) .isEqualTo(Collections.singletonList(new Path("testfs:///dir/visiblefile"))); }
public static ParseResult parse(String text) { Map<String, String> localProperties = new HashMap<>(); String intpText = ""; String scriptText = null; Matcher matcher = REPL_PATTERN.matcher(text); if (matcher.find()) { String headingSpace = matcher.group(1); intpText = matcher.group(2); int startPos = headingSpace.length() + intpText.length() + 1; if (startPos < text.length() && text.charAt(startPos) == '(') { startPos = parseLocalProperties(text, startPos, localProperties); } scriptText = text.substring(startPos); } else { intpText = ""; scriptText = text; } return new ParseResult(intpText, removeLeadingWhiteSpaces(scriptText), localProperties); }
@Test void testParagraphTextNoLocalProperties() { ParagraphTextParser.ParseResult parseResult = ParagraphTextParser.parse("%spark.pyspark\nsc.version"); assertEquals("spark.pyspark", parseResult.getIntpText()); assertEquals(0, parseResult.getLocalProperties().size()); assertEquals("\nsc.version", parseResult.getScriptText()); }
@Override public boolean lessThanOrEqualTo(final OffsetVector other) { final List<Long> offsetsOther = other.getDenseRepresentation(); // Special case that says that a vectors is "less than or equal" to an uninitialized vector if (offsetsOther.isEmpty()) { return true; } Preconditions.checkState(offsetsOther.size() == offsets.get().size()); int partition = 0; for (Long offset : offsets.get()) { final long offsetOther = offsetsOther.get(partition); if (offset >= 0 && offsetOther >= 0) { if (offset > offsetOther) { return false; } } partition++; } return true; }
@Test public void shouldBeLessThanOrEqual() { // Given: PushOffsetVector pushOffsetVector1 = new PushOffsetVector(ImmutableList.of(2L, 3L, 4L)); // Then: assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(2L, 3L, 4L))), is(true)); assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(3L, 3L, 4L))), is(true)); assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(3L, 4L, 4L))), is(true)); assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(3L, 4L, 5L))), is(true)); assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(1L, 3L, 4L))), is(false)); assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(1L, 2L, 4L))), is(false)); assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of(1L, 2L, 3L))), is(false)); // Special case assertThat(pushOffsetVector1.lessThanOrEqualTo( new PushOffsetVector(ImmutableList.of())), is(true)); }
@Override public RefreshServiceAclsResponse refreshServiceAcls(RefreshServiceAclsRequest request) throws YarnException, IOException { // parameter verification. if (request == null) { routerMetrics.incrRefreshServiceAclsFailedRetrieved(); RouterServerUtil.logAndThrowException("Missing RefreshServiceAcls request.", null); } // call refreshAdminAcls of activeSubClusters. try { long startTime = clock.getTime(); RMAdminProtocolMethod remoteMethod = new RMAdminProtocolMethod( new Class[]{RefreshServiceAclsRequest.class}, new Object[]{request}); String subClusterId = request.getSubClusterId(); Collection<RefreshServiceAclsResponse> refreshServiceAclsResps = remoteMethod.invokeConcurrent(this, RefreshServiceAclsResponse.class, subClusterId); if (CollectionUtils.isNotEmpty(refreshServiceAclsResps)) { long stopTime = clock.getTime(); routerMetrics.succeededRefreshServiceAclsRetrieved(stopTime - startTime); return RefreshServiceAclsResponse.newInstance(); } } catch (YarnException e) { routerMetrics.incrRefreshServiceAclsFailedRetrieved(); RouterServerUtil.logAndThrowException(e, "Unable to refreshAdminAcls due to exception. " + e.getMessage()); } routerMetrics.incrRefreshServiceAclsFailedRetrieved(); throw new YarnException("Unable to refreshServiceAcls."); }
@Test public void testRefreshServiceAcls() throws Exception { // null request. LambdaTestUtils.intercept(YarnException.class, "Missing RefreshServiceAcls request.", () -> interceptor.refreshServiceAcls(null)); // normal request. RefreshServiceAclsRequest request = RefreshServiceAclsRequest.newInstance(); RefreshServiceAclsResponse response = interceptor.refreshServiceAcls(request); assertNotNull(response); }